Select Git revision
model_setup.py
model_setup.py 8.52 KiB
__author__ = "Lukas Leufen, Felix Kleinert"
__date__ = '2019-12-02'
import keras
from keras import losses
from keras.callbacks import ModelCheckpoint
from keras.regularizers import l2
from keras.optimizers import SGD
import tensorflow as tf
import logging
import os
from src.run_modules.run_environment import RunEnvironment
from src.helpers import l_p_loss, LearningRateDecay
from src.model_modules.inception_model import InceptionModelBase
from src.model_modules.flatten import flatten_tail
class ModelSetup(RunEnvironment):
def __init__(self):
# create run framework
super().__init__()
self.model = None
path = self.data_store.get("experiment_path", "general")
exp_name = self.data_store.get("experiment_name", "general")
self.scope = "general.model"
self.checkpoint_name = os.path.join(path, f"{exp_name}_model-best.h5")
self._run()
def _run(self):
# create checkpoint
self._set_checkpoint()
# set all model settings
self.my_model_settings()
# build model graph using settings from my_model_settings()
self.build_model()
# plot model structure
self.plot_model()
# load weights if no training shall be performed
if self.data_store.get("trainable", self.scope) is False:
self.load_weights()
# compile model
self.compile_model()
def compile_model(self):
optimizer = self.data_store.get("optimizer", self.scope)
loss = self.data_store.get("loss", self.scope)
self.model.compile(optimizer=optimizer, loss=loss, metrics=["mse", "mae"])
self.data_store.put("model", self.model, self.scope)
def _set_checkpoint(self):
checkpoint = ModelCheckpoint(self.checkpoint_name, verbose=1, monitor='val_loss', save_best_only=True, mode='auto')
self.data_store.put("checkpoint", checkpoint, self.scope)
def load_weights(self):
try:
self.model.load_weights(self.checkpoint_name)
logging.info('reload weights...')
except OSError:
logging.info('no weights to reload...')
def build_model(self):
args_list = ["activation", "window_history_size", "channels", "regularizer", "dropout_rate", "window_lead_time"]
args = self.data_store.create_args_dict(args_list, self.scope)
self.model: keras.Model = my_little_model(**args)
def plot_model(self): # pragma: no cover
with tf.device("/cpu:0"):
path = self.data_store.get("experiment_path", "general")
name = self.data_store.get("experiment_name", "general") + "_model.pdf"
file_name = os.path.join(path, name)
keras.utils.plot_model(self.model, to_file=file_name, show_shapes=True, show_layer_names=True)
def my_model_settings(self):
# channels
X, _ = self.data_store.get("generator", "general.train")[0]
channels = X.shape[-1] # input variables
self.data_store.put("channels", channels, self.scope)
# dropout
self.data_store.put("dropout_rate", 0.1, self.scope)
# regularizer
self.data_store.put("regularizer", l2(0.1), self.scope)
# learning rate
initial_lr = 1e-2
self.data_store.put("initial_lr", initial_lr, self.scope)
optimizer = SGD(lr=initial_lr, momentum=0.9)
# optimizer=Adam(lr=initial_lr, amsgrad=True)
self.data_store.put("optimizer", optimizer, self.scope)
self.data_store.put("lr_decay", LearningRateDecay(base_lr=initial_lr, drop=.94, epochs_drop=10), self.scope)
# learning settings
self.data_store.put("epochs", 2, self.scope)
self.data_store.put("batch_size", int(256), self.scope)
# activation
activation = keras.layers.PReLU # ELU #LeakyReLU keras.activations.tanh #
self.data_store.put("activation", activation, self.scope)
# set los
loss_all = my_little_loss()
self.data_store.put("loss", loss_all, self.scope)
def my_loss():
loss = l_p_loss(4)
keras_loss = losses.mean_squared_error
loss_all = [loss] + [keras_loss]
return loss_all
def my_little_loss():
return losses.mean_squared_error
def my_little_model(activation, window_history_size, channels, regularizer, dropout_rate, window_lead_time):
X_input = keras.layers.Input(
shape=(window_history_size + 1, 1, channels)) # add 1 to window_size to include current time step t0
X_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(X_input)
X_in = activation(name='{}_conv_act'.format("major"))(X_in)
X_in = keras.layers.Flatten(name='{}'.format("major"))(X_in)
X_in = keras.layers.Dropout(dropout_rate, name='{}_Dropout_1'.format("major"))(X_in)
X_in = keras.layers.Dense(64, name='{}_Dense_64'.format("major"))(X_in)
X_in = activation()(X_in)
X_in = keras.layers.Dense(32, name='{}_Dense_32'.format("major"))(X_in)
X_in = activation()(X_in)
X_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(X_in)
X_in = activation()(X_in)
X_in = keras.layers.Dense(window_lead_time, name='{}_Dense'.format("major"))(X_in)
out_main = activation()(X_in)
return keras.Model(inputs=X_input, outputs=[out_main])
def my_model(activation, window_history_size, channels, regularizer, dropout_rate, window_lead_time):
conv_settings_dict1 = {
'tower_1': {'reduction_filter': 8, 'tower_filter': 8 * 2, 'tower_kernel': (3, 1), 'activation': activation},
'tower_2': {'reduction_filter': 8, 'tower_filter': 8 * 2, 'tower_kernel': (5, 1), 'activation': activation},
'tower_3': {'reduction_filter': 8, 'tower_filter': 8 * 2, 'tower_kernel': (1, 1), 'activation': activation},
}
pool_settings_dict1 = {'pool_kernel': (3, 1), 'tower_filter': 8 * 2, 'activation': activation}
conv_settings_dict2 = {'tower_1': {'reduction_filter': 8 * 2, 'tower_filter': 16 * 2 * 2, 'tower_kernel': (3, 1),
'activation': activation},
'tower_2': {'reduction_filter': 8 * 2, 'tower_filter': 16 * 2 * 2, 'tower_kernel': (5, 1),
'activation': activation},
'tower_3': {'reduction_filter': 8 * 2, 'tower_filter': 16 * 2 * 2, 'tower_kernel': (1, 1),
'activation': activation},
}
pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
conv_settings_dict3 = {'tower_1': {'reduction_filter': 16 * 4, 'tower_filter': 32 * 2, 'tower_kernel': (3, 1),
'activation': activation},
'tower_2': {'reduction_filter': 16 * 4, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
'activation': activation},
'tower_3': {'reduction_filter': 16 * 4, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
'activation': activation},
}
pool_settings_dict3 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
##########################################
inception_model = InceptionModelBase()
X_input = keras.layers.Input(shape=(window_history_size + 1, 1, channels)) # add 1 to window_size to include current time step t0
X_in = inception_model.inception_block(X_input, conv_settings_dict1, pool_settings_dict1, regularizer=regularizer,
batch_normalisation=True)
out_minor = flatten_tail(X_in, 'Minor_1', bound_weight=True, activation=activation, dropout_rate=dropout_rate,
reduction_filter=4, first_dense=32, window_lead_time=window_lead_time)
X_in = keras.layers.Dropout(dropout_rate)(X_in)
X_in = inception_model.inception_block(X_in, conv_settings_dict2, pool_settings_dict2, regularizer=regularizer,
batch_normalisation=True)
X_in = keras.layers.Dropout(dropout_rate)(X_in)
X_in = inception_model.inception_block(X_in, conv_settings_dict3, pool_settings_dict3, regularizer=regularizer,
batch_normalisation=True)
#############################################
out_main = flatten_tail(X_in, 'Main', activation=activation, bound_weight=True, dropout_rate=dropout_rate,
reduction_filter=64, first_dense=64, window_lead_time=window_lead_time)
return keras.Model(inputs=X_input, outputs=[out_minor, out_main])