Skip to content
Snippets Groups Projects
Commit fab62a19 authored by felix kleinert's avatar felix kleinert
Browse files

Merge branch 'vincent_issue310-intellio3-implementation-into-mlair' into 'develop'

Resolve "IntelliO3 implementation into MLAir"

Closes #310

See merge request !303
parents 74e6e53c 6ee12817
No related branches found
No related tags found
5 merge requests!319add all changes of dev into release v1.4.0 branch,!318Resolve "release v1.4.0",!317enabled window_lead_time=1,!303Resolve "IntelliO3 implementation into MLAir",!259Draft: Resolve "WRF-Datahandler should inherit from SingleStationDatahandler"
Pipeline #71799 passed
......@@ -126,6 +126,7 @@ from mlair.model_modules import AbstractModelClass
from mlair.model_modules.inception_model import InceptionModelBase
from mlair.model_modules.flatten import flatten_tail
from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, SymmetricPadding2D
from mlair.model_modules.loss import l_p_loss
class MyLittleModelHourly(AbstractModelClass):
......@@ -349,7 +350,7 @@ class MyTowerModel(AbstractModelClass):
self.compile_options = {"loss": [keras.losses.mean_squared_error], "metrics": ["mse"]}
class MyPaperModel(AbstractModelClass):
class IntelliO3_ts_architecture(AbstractModelClass):
def __init__(self, input_shape: list, output_shape: list):
"""
......@@ -366,9 +367,9 @@ class MyPaperModel(AbstractModelClass):
from mlair.model_modules.keras_extensions import LearningRateDecay
# settings
self.dropout_rate = .3
self.regularizer = keras.regularizers.l2(0.001)
self.initial_lr = 1e-3
self.dropout_rate = .35
self.regularizer = keras.regularizers.l2(0.01)
self.initial_lr = 1e-4
self.lr_decay = LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
self.activation = keras.layers.ELU
self.padding = "SymPad2D"
......@@ -398,35 +399,22 @@ class MyPaperModel(AbstractModelClass):
conv_settings_dict1 = {
'tower_1': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (3, 1),
'activation': activation},
# 'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
# 'activation': activation},
# 'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
# 'activation': activation},
# 'tower_4':{'reduction_filter':8, 'tower_filter':8*2, 'tower_kernel':(7,1), 'activation':activation},
'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
'activation': activation},
'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
'activation': activation}
}
pool_settings_dict1 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
conv_settings_dict2 = {
'tower_1': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (3, 1),
'activation': activation},
# 'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
# 'activation': activation},
# 'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
# 'activation': activation},
# 'tower_4':{'reduction_filter':8*2, 'tower_filter':16*2, 'tower_kernel':(7,1), 'activation':activation},
}
pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
conv_settings_dict3 = {
'tower_1': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (3, 1),
'activation': activation},
'tower_2': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (5, 1),
'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
'activation': activation},
'tower_3': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (1, 1),
'activation': activation},
# 'tower_4':{'reduction_filter':16*4, 'tower_filter':32, 'tower_kernel':(7,1), 'activation':activation},
'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
'activation': activation}
}
pool_settings_dict3 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
##########################################
inception_model = InceptionModelBase()
......@@ -445,10 +433,9 @@ class MyPaperModel(AbstractModelClass):
regularizer=self.regularizer,
batch_normalisation=True,
padding=self.padding)
# out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
# self.activation, 32, 64)
out_minor1 = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self._output_shape,
output_activation='linear', reduction_filter=32,
output_activation='linear', reduction_filter=32 * 2,
name='minor_1', bound_weight=False, dropout_rate=self.dropout_rate,
kernel_regularizer=self.regularizer
)
......@@ -459,10 +446,6 @@ class MyPaperModel(AbstractModelClass):
regularizer=self.regularizer,
batch_normalisation=True, padding=self.padding)
# X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
#
# X_in = inception_model.inception_block(X_in, conv_settings_dict3, pool_settings_dict3, regularizer=self.regularizer,
# batch_normalisation=True)
#############################################
out_main = flatten_tail(X_in, inner_neurons=64 * 2, activation=activation, output_neurons=self._output_shape,
......@@ -474,6 +457,8 @@ class MyPaperModel(AbstractModelClass):
self.model = keras.Model(inputs=X_input, outputs=[out_minor1, out_main])
def set_compile_options(self):
self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
self.compile_options = {"loss": [keras.losses.mean_squared_error, keras.losses.mean_squared_error],
"metrics": ['mse', 'mae']}
self.compile_options = {"optimizer": keras.optimizers.adam(lr=self.initial_lr, amsgrad=True),
"loss": [l_p_loss(4), keras.losses.mean_squared_error],
"metrics": ['mse'],
"loss_weights": [.01, .99]
}
\ No newline at end of file
import keras
import pytest
from mlair.model_modules.model_class import MyPaperModel
from mlair.model_modules.model_class import IntelliO3_ts_architecture
class TestMyPaperModel:
class TestIntelliO3_ts_architecture:
@pytest.fixture
def mpm(self):
return MyPaperModel(input_shape=[(7, 1, 9)], output_shape=[(4,)])
return IntelliO3_ts_architecture(input_shape=[(7, 1, 9)], output_shape=[(4,)])
def test_init(self, mpm):
# check if loss number of loss functions fit to model outputs
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment