diff --git a/src/helpers/statistics.py b/src/helpers/statistics.py
index dd102cf3ad376cf145e3c447ff9ef902815776bb..99ae80d5d0622f4bce923b6914335ca8423e9308 100644
--- a/src/helpers/statistics.py
+++ b/src/helpers/statistics.py
@@ -11,7 +11,17 @@ from typing import Union, Tuple
 Data = Union[xr.DataArray, pd.DataFrame]
 
 
-def apply_inverse_transformation(data, mean, std=None, method="standardise"):
+def apply_inverse_transformation(data: Data, mean: Data, std: Data = None, method: str = "standardise") -> Data:
+    """
+    Apply inverse transformation for given statistics.
+
+    :param data: transform this data back
+    :param mean: mean of transformation
+    :param std: standard deviation of transformation (optional)
+    :param method: transformation method
+
+    :return: inverse transformed data
+    """
     if method == 'standardise':  # pragma: no branch
         return standardise_inverse(data, mean, std)
     elif method == 'centre':  # pragma: no branch
@@ -25,84 +35,79 @@ def apply_inverse_transformation(data, mean, std=None, method="standardise"):
 
 def standardise(data: Data, dim: Union[str, int]) -> Tuple[Data, Data, Data]:
     """
-    This function standardises a xarray.dataarray (along dim) or pandas.DataFrame (along axis) with mean=0 and std=1
-
-    :param data:
-    :param string/int dim:
-            | for xarray.DataArray as string: name of dimension which should be standardised
-            | for pandas.DataFrame as int: axis of dimension which should be standardised
-    :return: xarray.DataArrays or pandas.DataFrames:
-            #. mean: Mean of data
-            #. std: Standard deviation of data
-            #. data: Standardised data
+    Standardise a xarray.dataarray (along dim) or pandas.DataFrame (along axis) with mean=0 and std=1.
+
+    :param data: data to standardise
+    :param dim: name (xarray) or axis (pandas) of dimension which should be standardised
+    :return: mean, standard deviation and standardised data
     """
     return data.mean(dim), data.std(dim), (data - data.mean(dim)) / data.std(dim)
 
 
 def standardise_inverse(data: Data, mean: Data, std: Data) -> Data:
     """
-    This is the inverse function of `standardise` and therefore vanishes the standardising.
+    Apply inverse function of `standardise` on data and therefore vanishes the standardising.
+
+    :param data: standardised data
+    :param mean: mean of standardisation
+    :param std: standard deviation of transformation
 
-    :param data:
-    :param mean:
-    :param std:
-    :return:
+    :return: inverse standardised data
     """
     return data * std + mean
 
 
 def standardise_apply(data: Data, mean: Data, std: Data) -> Data:
     """
-    This applies `standardise` on data using given mean and std.
+    Apply `standardise` on data using given mean and std.
 
-    :param data:
-    :param mean:
-    :param std:
-    :return:
+    :param data: data to transform
+    :param mean: mean to use for transformation
+    :param std: standard deviation for transformation
+
+    :return: transformed data
     """
     return (data - mean) / std
 
 
 def centre(data: Data, dim: Union[str, int]) -> Tuple[Data, None, Data]:
     """
-    This function centres a xarray.dataarray (along dim) or pandas.DataFrame (along axis) to mean=0
-
-    :param data:
-    :param string/int dim:
-            | for xarray.DataArray as string: name of dimension which should be standardised
-            | for pandas.DataFrame as int: axis of dimension which should be standardised
-    :return: xarray.DataArrays or pandas.DataFrames:
-            #. mean: Mean of data
-            #. std: Standard deviation of data
-            #. data: Standardised data
+    Centre a xarray.dataarray (along dim) or pandas.DataFrame (along axis) to mean=0.
+
+    :param data: data to centre
+    :param dim: name (xarray) or axis (pandas) of dimension which should be centred
+
+    :return: mean, None placeholder and centred data
     """
     return data.mean(dim), None, data - data.mean(dim)
 
 
 def centre_inverse(data: Data, mean: Data) -> Data:
     """
-    This function is the inverse function of `centre` and therefore adds the given values of mean to the data.
+    Apply inverse function of `centre` and therefore add given values of mean to data.
 
-    :param data:
-    :param mean:
-    :return:
+    :param data: data to apply inverse centering
+    :param mean: mean to use for inverse transformation
+
+    :return: inverted centering transformation data
     """
     return data + mean
 
 
 def centre_apply(data: Data, mean: Data) -> Data:
     """
-    This applies `centre` on data using given mean and std.
+    Apply `centre` on data using given mean.
+
+    :param data: data to transform
+    :param mean: mean to use for transformation
 
-    :param data:
-    :param mean:
-    :param std:
-    :return:
+    :return: transformed data
     """
     return data - mean
 
 
 def mean_squared_error(a, b):
+    """Calculate mean squared error."""
     return np.square(a - b).mean()
 
 
diff --git a/src/model_modules/model_class.py b/src/model_modules/model_class.py
index d6dcea179bcfa8a6ec41518db34b186e30d908fc..4de0bf5a6cd8f13898e9684f29e966ecd349c5be 100644
--- a/src/model_modules/model_class.py
+++ b/src/model_modules/model_class.py
@@ -1,9 +1,120 @@
+"""
+Module for neural models to use during experiment.
+
+To work properly, each customised model needs to inherit from AbstractModelClass and needs an implementation of the
+set_model and set_loss method.
+
+In this module, you can find some exemplary model classes that have been build and were running in a experiment.
+
+* `MyLittleModel`: small model implementation with a single 1x1 Conv, and 4 Dense layers (64, 32, 16, window_lead_time).
+* `MyBranchedModel`: a model with single  1x1 Conv, and 4 Dense layers (64, 32, 16, window_lead_time), it has three
+    output branches from different layers of the model.
+* `MyTowerModel`: a more complex model with inception blocks (called towers)
+* `MyPaperModel`: A model used for the publication: <Add Publication Title / Citation>
+
+In addition, a short introduction how to create your own model is given hereinafter.
+
+How to create a customised model?
+#################################
+
+* Create a new class:
+
+    .. code-block:: python
+
+        class MyCustomisedModel(AbstractModelClass):
+
+            def __init__(self, window_history_size, window_lead_time, channels):
+                super.__init__()
+                # settings
+                self.window_history_size = window_history_size
+                self.window_lead_time = window_lead_time
+                self.channels = channels
+                self.dropout_rate = 0.1
+
+                # apply to model
+                self.set_model()
+                self.set_loss()
+                self.set_custom_objects(loss=self.loss)
+
+* Make sure to add the `super().__init__()` and at least `set_model()` and  `set_loss()` to your custom init method.
+* If you have custom objects in your model, that are not part of keras, you need to add them to custom objects. To do
+    this, call `set_custom_objects` with arbitrarily kwargs. In the shown example, the loss has been added, because it
+    wasn't a standard loss. Apart from this, we always encourage you to add the loss as custom object, to prevent
+    potential errors when loading an already created model instead of training a new one.
+* Build your model inside `set_model()`, e.g.
+
+    .. code-block:: python
+
+        class MyCustomisedModel(AbstractModelClass):
+
+            def set_model(self):
+                x_input = keras.layers.Input(shape=(self.window_history_size + 1, 1, self.channels))
+                x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
+                x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
+                x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
+                x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
+                x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
+                x_in = self.activation()(x_in)
+                x_in = keras.layers.Dense(self.window_lead_time, name='{}_Dense'.format("major"))(x_in)
+                out_main = self.activation()(x_in)
+                self.model = keras.Model(inputs=x_input, outputs=[out_main])
+
+* Your are free, how to design your model. Just make sure to save it in the class attribute model.
+* Finally, set your custom loss.
+
+    .. code-block:: python
+
+        class MyCustomisedModel(AbstractModelClass):
+
+            def set_loss(self):
+                self.loss = keras.losses.mean_squared_error
+
+* If you have a branched model with multiple outputs, you need to consider the right ordering. E.g.
+
+    .. code-block:: python
+
+        class MyCustomisedModel(AbstractModelClass):
+
+            def set_model(self):
+                ...
+                self.model = keras.Model(inputs=x_input, outputs=[out_minor_1, out_minor_2, out_main])
+
+            def set_loss(self):
+                self.loss = [keras.losses.mean_absolute_error] +  # for out_minor_1
+                            [keras.losses.mean_squared_error] +   # for out_minor_2
+                            [keras.losses.mean_squared_error]     # for out_main
+
+
+How to access my customised model?
+##################################
+
+If the customised model is created, you can easily access the model with
+
+>>> MyCustomisedModel().model
+<your custom model>
+
+The loss is accessible via
+
+>>> MyCustomisedModel().loss
+<your custom loss>
+
+You can treat the instance of your model as instance but also as the model itself. If you call a method, that refers to
+the model instead of the model instance, you can directly apply the command on the instance instead of adding the model
+parameter call.
+
+>>> MyCustomisedModel().model.compile()
+
+is therefore equal to the command
+
+>>> MyCustomisedModel().compile()
+
+"""
+
 import src.model_modules.keras_extensions
 
 __author__ = "Lukas Leufen"
 __date__ = '2019-12-12'
 
-
 from abc import ABC
 from typing import Any, Callable, Dict
 
@@ -14,47 +125,44 @@ from src.model_modules.advanced_paddings import PadUtils, Padding2D
 
 
 class AbstractModelClass(ABC):
-
     """
-    The AbstractModelClass provides a unified skeleton for any model provided to the machine learning workflow. The
-    model can always be accessed by calling ModelClass.model or directly by an model method without parsing the model
-    attribute name (e.g. ModelClass.model.compile -> ModelClass.compile). Beside the model, this class provides the
-    corresponding loss function.
+    The AbstractModelClass provides a unified skeleton for any model provided to the machine learning workflow.
+
+    The model can always be accessed by calling ModelClass.model or directly by an model method without parsing the
+    model attribute name (e.g. ModelClass.model.compile -> ModelClass.compile). Beside the model, this class provides
+    the corresponding loss function.
     """
 
     def __init__(self) -> None:
-
-        """
-        Predefine internal attributes for model and loss.
-        """
-
+        """Predefine internal attributes for model and loss."""
         self.__model = None
         self.__loss = None
         self.model_name = self.__class__.__name__
         self.__custom_objects = {}
 
     def __getattr__(self, name: str) -> Any:
-
         """
-        Is called if __getattribute__ is not able to find requested attribute. Normally, the model class is saved into
-        a variable like `model = ModelClass()`. To bypass a call like `model.model` to access the _model attribute,
-        this method tries to search for the named attribute in the self.model namespace and returns this attribute if
-        available. Therefore, following expression is true: `ModelClass().compile == ModelClass().model.compile` as long
-        the called attribute/method is not part if the ModelClass itself.
+        Is called if __getattribute__ is not able to find requested attribute.
+
+        Normally, the model class is saved into a variable like `model = ModelClass()`. To bypass a call like
+        `model.model` to access the _model attribute, this method tries to search for the named attribute in the
+        self.model namespace and returns this attribute if available. Therefore, following expression is true:
+        `ModelClass().compile == ModelClass().model.compile` as long the called attribute/method is not part if the
+        ModelClass itself.
+
         :param name: name of the attribute or method to call
+
         :return: attribute or method from self.model namespace
         """
-
         return self.model.__getattribute__(name)
 
     @property
     def model(self) -> keras.Model:
-
         """
         The model property containing a keras.Model instance.
+
         :return: the keras model
         """
-
         return self.__model
 
     @model.setter
@@ -63,17 +171,21 @@ class AbstractModelClass(ABC):
 
     @property
     def loss(self) -> Callable:
-
         """
-        The loss property containing a callable loss function. The loss function can be any keras loss or a customised
-        function. If the loss is a customised function, it must contain the internal loss(y_true, y_pred) function:
+        The loss property containing a callable loss function.
+
+        The loss function can be any keras loss or a customised function. If the loss is a customised function, it must
+        contain the internal loss(y_true, y_pred) function:
+
+        .. code-block:: python
+
             def customised_loss(args):
                 def loss(y_true, y_pred):
                     return actual_function(y_true, y_pred, args)
             return loss
-        :return: the loss function
-        """
 
+        :return: loss function
+        """
         return self.__loss
 
     @loss.setter
@@ -83,9 +195,11 @@ class AbstractModelClass(ABC):
     @property
     def custom_objects(self) -> Dict:
         """
-        The custom objects property collects all non-keras utilities that are used in the model class. To load such a
-        customised and already compiled model (e.g. from local disk), this information is required.
-        :return: the custom objects in a dictionary
+        The custom objects property collects all non-keras utilities that are used in the model class.
+
+        To load such a customised and already compiled model (e.g. from local disk), this information is required.
+
+        :return: custom objects in a dictionary
         """
         return self.__custom_objects
 
@@ -96,22 +210,27 @@ class AbstractModelClass(ABC):
     def get_settings(self) -> Dict:
         """
         Get all class attributes that are not protected in the AbstractModelClass as dictionary.
+
         :return: all class attributes
         """
         return dict((k, v) for (k, v) in self.__dict__.items() if not k.startswith("_AbstractModelClass__"))
 
     def set_model(self):
-        pass
+        """Abstract method to set model."""
+        raise NotImplementedError
 
     def set_loss(self):
-        pass
+        """Abstract method to set loss."""
+        raise NotImplementedError
 
     def set_custom_objects(self, **kwargs) -> None:
         """
-        Set custom objects that are not part of keras framework. These custom objects are needed if an already compiled
-        model is loaded from disk. There is a special treatment for the Padding2D class, which is a base class for
-        different padding types. For a correct behaviour, all supported subclasses are added as custom objects in
-        addition to the given ones.
+        Set custom objects that are not part of keras framework.
+
+        These custom objects are needed if an already compiled model is loaded from disk. There is a special treatment
+        for the Padding2D class, which is a base class for different padding types. For a correct behaviour, all
+        supported subclasses are added as custom objects in addition to the given ones.
+
         :param kwargs: all custom objects, that should be saved
         """
         if "Padding2D" in kwargs.keys():
@@ -120,7 +239,6 @@ class AbstractModelClass(ABC):
 
 
 class MyLittleModel(AbstractModelClass):
-
     """
     A customised model with a 1x1 Conv, and 4 Dense layers (64, 32, 16, window_lead_time), where the last layer is the
     output layer depending on the window_lead_time parameter. Dropout is used between the Convolution and the first
@@ -128,9 +246,9 @@ class MyLittleModel(AbstractModelClass):
     """
 
     def __init__(self, window_history_size, window_lead_time, channels):
-
         """
         Sets model and loss depending on the given arguments.
+
         :param activation: activation function
         :param window_history_size: number of historical time steps included in the input data
         :param channels: number of variables used in input data
@@ -149,7 +267,8 @@ class MyLittleModel(AbstractModelClass):
         self.regularizer = keras.regularizers.l2(0.1)
         self.initial_lr = 1e-2
         self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
-        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
+        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94,
+                                                                             epochs_drop=10)
         self.epochs = 20
         self.batch_size = int(256)
         self.activation = keras.layers.PReLU
@@ -160,9 +279,9 @@ class MyLittleModel(AbstractModelClass):
         self.set_custom_objects(loss=self.loss)
 
     def set_model(self):
-
         """
         Build the model.
+
         :param activation: activation function
         :param window_history_size: number of historical time steps included in the input data
         :param channels: number of variables used in input data
@@ -188,9 +307,9 @@ class MyLittleModel(AbstractModelClass):
         self.model = keras.Model(inputs=x_input, outputs=[out_main])
 
     def set_loss(self):
-
         """
         Set the loss
+
         :return: loss function
         """
 
@@ -198,20 +317,18 @@ class MyLittleModel(AbstractModelClass):
 
 
 class MyBranchedModel(AbstractModelClass):
-
     """
     A customised model
 
-
     with a 1x1 Conv, and 4 Dense layers (64, 32, 16, window_lead_time), where the last layer is the
     output layer depending on the window_lead_time parameter. Dropout is used between the Convolution and the first
     Dense layer.
     """
 
     def __init__(self, window_history_size, window_lead_time, channels):
-
         """
         Sets model and loss depending on the given arguments.
+
         :param activation: activation function
         :param window_history_size: number of historical time steps included in the input data
         :param channels: number of variables used in input data
@@ -230,7 +347,8 @@ class MyBranchedModel(AbstractModelClass):
         self.regularizer = keras.regularizers.l2(0.1)
         self.initial_lr = 1e-2
         self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
-        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
+        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94,
+                                                                             epochs_drop=10)
         self.epochs = 20
         self.batch_size = int(256)
         self.activation = keras.layers.PReLU
@@ -241,9 +359,9 @@ class MyBranchedModel(AbstractModelClass):
         self.set_custom_objects(loss=self.loss)
 
     def set_model(self):
-
         """
         Build the model.
+
         :param activation: activation function
         :param window_history_size: number of historical time steps included in the input data
         :param channels: number of variables used in input data
@@ -273,9 +391,9 @@ class MyBranchedModel(AbstractModelClass):
         self.model = keras.Model(inputs=x_input, outputs=[out_minor_1, out_minor_2, out_main])
 
     def set_loss(self):
-
         """
         Set the loss
+
         :return: loss function
         """
 
@@ -286,9 +404,9 @@ class MyBranchedModel(AbstractModelClass):
 class MyTowerModel(AbstractModelClass):
 
     def __init__(self, window_history_size, window_lead_time, channels):
-
         """
         Sets model and loss depending on the given arguments.
+
         :param activation: activation function
         :param window_history_size: number of historical time steps included in the input data
         :param channels: number of variables used in input data
@@ -307,9 +425,10 @@ class MyTowerModel(AbstractModelClass):
         self.regularizer = keras.regularizers.l2(0.1)
         self.initial_lr = 1e-2
         self.optimizer = keras.optimizers.adam(lr=self.initial_lr)
-        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
+        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94,
+                                                                             epochs_drop=10)
         self.epochs = 20
-        self.batch_size = int(256*4)
+        self.batch_size = int(256 * 4)
         self.activation = keras.layers.PReLU
 
         # apply to model
@@ -318,9 +437,9 @@ class MyTowerModel(AbstractModelClass):
         self.set_custom_objects(loss=self.loss)
 
     def set_model(self):
-
         """
         Build the model.
+
         :param activation: activation function
         :param window_history_size: number of historical time steps included in the input data
         :param channels: number of variables used in input data
@@ -344,7 +463,7 @@ class MyTowerModel(AbstractModelClass):
                         'activation': activation},
             'tower_3': {'reduction_filter': 8 * 2, 'tower_filter': 16 * 2 * 2, 'tower_kernel': (1, 1),
                         'activation': activation},
-            }
+        }
         pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
 
         conv_settings_dict3 = {'tower_1': {'reduction_filter': 16 * 4, 'tower_filter': 32 * 2, 'tower_kernel': (3, 1),
@@ -361,7 +480,8 @@ class MyTowerModel(AbstractModelClass):
         inception_model = InceptionModelBase()
 
         X_input = keras.layers.Input(
-            shape=(self.window_history_size + 1, 1, self.channels))  # add 1 to window_size to include current time step t0
+            shape=(
+            self.window_history_size + 1, 1, self.channels))  # add 1 to window_size to include current time step t0
 
         X_in = inception_model.inception_block(X_input, conv_settings_dict1, pool_settings_dict1,
                                                regularizer=self.regularizer,
@@ -369,12 +489,14 @@ class MyTowerModel(AbstractModelClass):
 
         X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
 
-        X_in = inception_model.inception_block(X_in, conv_settings_dict2, pool_settings_dict2, regularizer=self.regularizer,
+        X_in = inception_model.inception_block(X_in, conv_settings_dict2, pool_settings_dict2,
+                                               regularizer=self.regularizer,
                                                batch_normalisation=True)
 
         X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
 
-        X_in = inception_model.inception_block(X_in, conv_settings_dict3, pool_settings_dict3, regularizer=self.regularizer,
+        X_in = inception_model.inception_block(X_in, conv_settings_dict3, pool_settings_dict3,
+                                               regularizer=self.regularizer,
                                                batch_normalisation=True)
         #############################################
 
@@ -384,9 +506,9 @@ class MyTowerModel(AbstractModelClass):
         self.model = keras.Model(inputs=X_input, outputs=[out_main])
 
     def set_loss(self):
-
         """
         Set the loss
+
         :return: loss function
         """
 
@@ -396,9 +518,9 @@ class MyTowerModel(AbstractModelClass):
 class MyPaperModel(AbstractModelClass):
 
     def __init__(self, window_history_size, window_lead_time, channels):
-
         """
         Sets model and loss depending on the given arguments.
+
         :param activation: activation function
         :param window_history_size: number of historical time steps included in the input data
         :param channels: number of variables used in input data
@@ -418,7 +540,8 @@ class MyPaperModel(AbstractModelClass):
         self.initial_lr = 1e-3
         # self.optimizer = keras.optimizers.adam(lr=self.initial_lr, amsgrad=True)
         self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
-        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
+        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94,
+                                                                             epochs_drop=10)
         self.epochs = 150
         self.batch_size = int(256 * 2)
         self.activation = keras.layers.ELU
@@ -430,9 +553,9 @@ class MyPaperModel(AbstractModelClass):
         self.set_custom_objects(loss=self.loss, Padding2D=Padding2D)
 
     def set_model(self):
-
         """
         Build the model.
+
         :param activation: activation function
         :param window_history_size: number of historical time steps included in the input data
         :param channels: number of variables used in input data
@@ -441,7 +564,7 @@ class MyPaperModel(AbstractModelClass):
         :return: built keras model
         """
         activation = self.activation
-        first_kernel = (3,1)
+        first_kernel = (3, 1)
         first_filters = 16
 
         conv_settings_dict1 = {
@@ -481,7 +604,8 @@ class MyPaperModel(AbstractModelClass):
         inception_model = InceptionModelBase()
 
         X_input = keras.layers.Input(
-            shape=(self.window_history_size + 1, 1, self.channels))  # add 1 to window_size to include current time step t0
+            shape=(
+            self.window_history_size + 1, 1, self.channels))  # add 1 to window_size to include current time step t0
 
         pad_size = PadUtils.get_padding_for_same(first_kernel)
         # X_in = adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
@@ -493,7 +617,6 @@ class MyPaperModel(AbstractModelClass):
                                    name="First_conv_{}x{}".format(first_kernel[0], first_kernel[1]))(X_in)
         X_in = self.activation(name='FirstAct')(X_in)
 
-
         X_in = inception_model.inception_block(X_in, conv_settings_dict1, pool_settings_dict1,
                                                regularizer=self.regularizer,
                                                batch_normalisation=True,
@@ -503,7 +626,8 @@ class MyPaperModel(AbstractModelClass):
 
         X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
 
-        X_in = inception_model.inception_block(X_in, conv_settings_dict2, pool_settings_dict2, regularizer=self.regularizer,
+        X_in = inception_model.inception_block(X_in, conv_settings_dict2, pool_settings_dict2,
+                                               regularizer=self.regularizer,
                                                batch_normalisation=True, padding=self.padding)
 
         # X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
@@ -518,9 +642,9 @@ class MyPaperModel(AbstractModelClass):
         self.model = keras.Model(inputs=X_input, outputs=[out_minor1, out_main])
 
     def set_loss(self):
-
         """
         Set the loss
+
         :return: loss function
         """