Skip to content
Snippets Groups Projects

Vincent issue310 intellio3 implementation into mlair

1 file
+ 20
35
Compare changes
  • Side-by-side
  • Inline
@@ -126,6 +126,7 @@ from mlair.model_modules import AbstractModelClass
from mlair.model_modules.inception_model import InceptionModelBase
from mlair.model_modules.flatten import flatten_tail
from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, SymmetricPadding2D
from mlair.model_modules.loss import l_p_loss
class MyLittleModelHourly(AbstractModelClass):
@@ -349,7 +350,7 @@ class MyTowerModel(AbstractModelClass):
self.compile_options = {"loss": [keras.losses.mean_squared_error], "metrics": ["mse"]}
class MyPaperModel(AbstractModelClass):
class IntelliO3_ts_architecture(AbstractModelClass):
def __init__(self, input_shape: list, output_shape: list):
"""
@@ -366,9 +367,9 @@ class MyPaperModel(AbstractModelClass):
from mlair.model_modules.keras_extensions import LearningRateDecay
# settings
self.dropout_rate = .3
self.regularizer = keras.regularizers.l2(0.001)
self.initial_lr = 1e-3
self.dropout_rate = .35
self.regularizer = keras.regularizers.l2(0.01)
self.initial_lr = 1e-4
self.lr_decay = LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
self.activation = keras.layers.ELU
self.padding = "SymPad2D"
@@ -398,35 +399,22 @@ class MyPaperModel(AbstractModelClass):
conv_settings_dict1 = {
'tower_1': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (3, 1),
'activation': activation},
# 'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
# 'activation': activation},
# 'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
# 'activation': activation},
# 'tower_4':{'reduction_filter':8, 'tower_filter':8*2, 'tower_kernel':(7,1), 'activation':activation},
'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
'activation': activation},
'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
'activation': activation}
}
pool_settings_dict1 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
conv_settings_dict2 = {
'tower_1': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (3, 1),
'activation': activation},
# 'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
# 'activation': activation},
# 'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
# 'activation': activation},
# 'tower_4':{'reduction_filter':8*2, 'tower_filter':16*2, 'tower_kernel':(7,1), 'activation':activation},
}
pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
conv_settings_dict3 = {
'tower_1': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (3, 1),
'activation': activation},
'tower_2': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (5, 1),
'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
'activation': activation},
'tower_3': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (1, 1),
'activation': activation},
# 'tower_4':{'reduction_filter':16*4, 'tower_filter':32, 'tower_kernel':(7,1), 'activation':activation},
'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
'activation': activation}
}
pool_settings_dict3 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
##########################################
inception_model = InceptionModelBase()
@@ -445,10 +433,9 @@ class MyPaperModel(AbstractModelClass):
regularizer=self.regularizer,
batch_normalisation=True,
padding=self.padding)
# out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
# self.activation, 32, 64)
out_minor1 = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self._output_shape,
output_activation='linear', reduction_filter=32,
output_activation='linear', reduction_filter=32 * 2,
name='minor_1', bound_weight=False, dropout_rate=self.dropout_rate,
kernel_regularizer=self.regularizer
)
@@ -459,10 +446,6 @@ class MyPaperModel(AbstractModelClass):
regularizer=self.regularizer,
batch_normalisation=True, padding=self.padding)
# X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
#
# X_in = inception_model.inception_block(X_in, conv_settings_dict3, pool_settings_dict3, regularizer=self.regularizer,
# batch_normalisation=True)
#############################################
out_main = flatten_tail(X_in, inner_neurons=64 * 2, activation=activation, output_neurons=self._output_shape,
@@ -474,6 +457,8 @@ class MyPaperModel(AbstractModelClass):
self.model = keras.Model(inputs=X_input, outputs=[out_minor1, out_main])
def set_compile_options(self):
self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
self.compile_options = {"loss": [keras.losses.mean_squared_error, keras.losses.mean_squared_error],
"metrics": ['mse', 'mae']}
self.compile_options = {"optimizer": keras.optimizers.adam(lr=self.initial_lr, amsgrad=True),
"loss": [l_p_loss(4), keras.losses.mean_squared_error],
"metrics": ['mse'],
"loss_weights": [.01, .99]
}
\ No newline at end of file
Loading