From 33940965f7812decca45b5e23ebafcaaff243d10 Mon Sep 17 00:00:00 2001 From: leufen1 <l.leufen@fz-juelich.de> Date: Fri, 12 Mar 2021 12:02:20 +0100 Subject: [PATCH] first CNN class try --- mlair/model_modules/convolutional_networks.py | 113 ++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 mlair/model_modules/convolutional_networks.py diff --git a/mlair/model_modules/convolutional_networks.py b/mlair/model_modules/convolutional_networks.py new file mode 100644 index 00000000..f9acdb72 --- /dev/null +++ b/mlair/model_modules/convolutional_networks.py @@ -0,0 +1,113 @@ +__author__ = "Lukas Leufen" +__date__ = '2021-02-' + +from functools import reduce, partial + +from mlair.model_modules import AbstractModelClass +from mlair.helpers import select_from_dict +from mlair.model_modules.loss import var_loss, custom_loss +from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, SymmetricPadding2D + +import keras + + +class CNN(AbstractModelClass): + _activation = {"relu": keras.layers.ReLU, "tanh": partial(keras.layers.Activation, "tanh"), + "sigmoid": partial(keras.layers.Activation, "sigmoid"), + "linear": partial(keras.layers.Activation, "linear"), + "selu": partial(keras.layers.Activation, "selu")} + _initializer = {"selu": keras.initializers.lecun_normal()} + _optimizer = {"adam": keras.optimizers.adam} + _regularizer = {"l1": keras.regularizers.l1, "l2": keras.regularizers.l2, "l1_l2": keras.regularizers.l1_l2} + _requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad"] + + def __init__(self, input_shape: list, output_shape: list, activation="relu", activation_output="linear", + optimizer="adam", regularizer=None, **kwargs): + + assert len(input_shape) == 1 + assert len(output_shape) == 1 + super().__init__(input_shape[0], output_shape[0]) + + # settings + self.activation = self._set_activation(activation) + self.activation_name = activation + self.activation_output = self._set_activation(activation_output) + self.activation_output_name = activation_output + self.kernel_initializer = self._initializer.get(activation, "glorot_uniform") + self.kernel_regularizer = self._set_regularizer(regularizer, **kwargs) + self.optimizer = self._set_optimizer(optimizer, **kwargs) + + # apply to model + self.set_model() + self.set_compile_options() + self.set_custom_objects(loss=custom_loss([keras.losses.mean_squared_error, var_loss]), var_loss=var_loss) + + def _set_activation(self, activation): + try: + return self._activation.get(activation.lower()) + except KeyError: + raise AttributeError(f"Given activation {activation} is not supported in this model class.") + + def _set_optimizer(self, optimizer, **kwargs): + try: + opt_name = optimizer.lower() + opt = self._optimizer.get(opt_name) + opt_kwargs = {} + if opt_name == "adam": + opt_kwargs = select_from_dict(kwargs, ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad"]) + return opt(**opt_kwargs) + except KeyError: + raise AttributeError(f"Given optimizer {optimizer} is not supported in this model class.") + + def _set_regularizer(self, regularizer, **kwargs): + if regularizer is None or (isinstance(regularizer, str) and regularizer.lower() == "none"): + return None + try: + reg_name = regularizer.lower() + reg = self._regularizer.get(reg_name) + reg_kwargs = {} + if reg_name in ["l1", "l2"]: + reg_kwargs = select_from_dict(kwargs, reg_name, remove_none=True) + if reg_name in reg_kwargs: + reg_kwargs["l"] = reg_kwargs.pop(reg_name) + elif reg_name == "l1_l2": + reg_kwargs = select_from_dict(kwargs, ["l1", "l2"], remove_none=True) + return reg(**reg_kwargs) + except KeyError: + raise AttributeError(f"Given regularizer {regularizer} is not supported in this model class.") + + def set_model(self): + """ + Build the model. + """ + x_input = keras.layers.Input(shape=self._input_shape) + kernel = (1, 1) + pad_size = PadUtils.get_padding_for_same(kernel) + x_in = Padding2D("SymPad2D")(padding=pad_size, name="SymPad")(x_input) + x_in = keras.layers.Conv2D(filters=16, kernel_size=kernel, + kernel_initializer=self.kernel_initializer, + kernel_regularizer=self.kernel_regularizer)(x_in) + x_in = self.activation()(x_in) + x_in = keras.layers.Conv2D(filters=32, kernel_size=kernel, + kernel_initializer=self.kernel_initializer, + kernel_regularizer=self.kernel_regularizer)(x_in) + x_in = self.activation()(x_in) + x_in = Padding2D("SymPad2D")(padding=pad_size, name="SymPad")(x_in) + x_in = keras.layers.Conv2D(filters=64, kernel_size=kernel, + kernel_initializer=self.kernel_initializer, + kernel_regularizer=self.kernel_regularizer)(x_in) + x_in = self.activation()(x_in) + x_in = keras.layers.Flatten()(x_in) + x_in = keras.layers.Dense(64, kernel_initializer=self.kernel_initializer, + kernel_regularizer=self.kernel_regularizer)(x_in) + x_in = self.activation()(x_in) + x_in = keras.layers.Dense(16, kernel_initializer=self.kernel_initializer, + kernel_regularizer=self.kernel_regularizer)(x_in) + x_in = self.activation()(x_in) + x_in = keras.layers.Dense(self._output_shape)(x_in) + out = self.activation_output(name=f"{self.activation_output_name}_output")(x_in) + self.model = keras.Model(inputs=x_input, outputs=[out]) + + def set_compile_options(self): + self.compile_options = {"loss": [custom_loss([keras.losses.mean_squared_error, var_loss])], + "metrics": ["mse", "mae", var_loss]} -- GitLab