diff --git a/src/model_modules/model_class.py b/src/model_modules/model_class.py
index ebbd7a25cef9031436d932a6502c9726bfe3e318..84f98690754a5d21a579270f4357636db692747d 100644
--- a/src/model_modules/model_class.py
+++ b/src/model_modules/model_class.py
@@ -10,6 +10,7 @@ from typing import Any, Callable
 import keras
 from src.model_modules.inception_model import InceptionModelBase
 from src.model_modules.flatten import flatten_tail
+import src.model_modules.advanced_paddings as adv_pad
 
 
 class AbstractModelClass(ABC):
@@ -351,3 +352,136 @@ class MyTowerModel(AbstractModelClass):
         """
 
         self.loss = [keras.losses.mean_squared_error]
+
+
+class MyPaperModel(AbstractModelClass):
+
+    def __init__(self, window_history_size, window_lead_time, channels):
+
+        """
+        Sets model and loss depending on the given arguments.
+        :param activation: activation function
+        :param window_history_size: number of historical time steps included in the input data
+        :param channels: number of variables used in input data
+        :param regularizer: <not used here>
+        :param dropout_rate: dropout rate used in the model [0, 1)
+        :param window_lead_time: number of time steps to forecast in the output layer
+        """
+
+        super().__init__()
+
+        # settings
+        self.window_history_size = window_history_size
+        self.window_lead_time = window_lead_time
+        self.channels = channels
+        self.dropout_rate = .3
+        self.regularizer = keras.regularizers.l2(0.001)
+        self.initial_lr = 1e-3
+        # self.optimizer = keras.optimizers.adam(lr=self.initial_lr, amsgrad=True)
+        self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
+        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
+        self.epochs = 150
+        self.batch_size = int(256 * 2)
+        self.activation = keras.layers.ELU
+        self.padding = "SymPad2D"
+
+        # apply to model
+        self.set_model()
+        self.set_loss()
+
+    def set_model(self):
+
+        """
+        Build the model.
+        :param activation: activation function
+        :param window_history_size: number of historical time steps included in the input data
+        :param channels: number of variables used in input data
+        :param dropout_rate: dropout rate used in the model [0, 1)
+        :param window_lead_time: number of time steps to forecast in the output layer
+        :return: built keras model
+        """
+        activation = self.activation
+        first_kernel = (3,1)
+        first_filters = 16
+
+        conv_settings_dict1 = {
+            'tower_1': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (3, 1),
+                        'activation': activation},
+            'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
+                        'activation': activation},
+            'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
+                        'activation': activation},
+            # 'tower_4':{'reduction_filter':8, 'tower_filter':8*2, 'tower_kernel':(7,1), 'activation':activation},
+        }
+        pool_settings_dict1 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
+
+        conv_settings_dict2 = {
+            'tower_1': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (3, 1),
+                        'activation': activation},
+            'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
+                        'activation': activation},
+            'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
+                        'activation': activation},
+            # 'tower_4':{'reduction_filter':8*2, 'tower_filter':16*2, 'tower_kernel':(7,1), 'activation':activation},
+        }
+        pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
+
+        conv_settings_dict3 = {
+            'tower_1': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (3, 1),
+                        'activation': activation},
+            'tower_2': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (5, 1),
+                        'activation': activation},
+            'tower_3': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (1, 1),
+                        'activation': activation},
+            # 'tower_4':{'reduction_filter':16*4, 'tower_filter':32, 'tower_kernel':(7,1), 'activation':activation},
+        }
+        pool_settings_dict3 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
+
+        ##########################################
+        inception_model = InceptionModelBase()
+
+        X_input = keras.layers.Input(
+            shape=(self.window_history_size + 1, 1, self.channels))  # add 1 to window_size to include current time step t0
+
+        pad_size = adv_pad.PadUtils.get_padding_for_same(first_kernel)
+        # X_in = adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
+        # X_in = inception_model.padding_layer("SymPad2D")(padding=pad_size, name="SymPad")(X_input)  # adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
+        X_in = inception_model.padding_layer("SymPad2D")(padding=pad_size, name="SymPad")(X_input)
+        X_in = keras.layers.Conv2D(filters=first_filters,
+                                   kernel_size=first_kernel,
+                                   kernel_regularizer=self.regularizer,
+                                   name="First_conv_{}x{}".format(first_kernel[0], first_kernel[1]))(X_in)
+        X_in = self.activation(name='FirstAct')(X_in)
+
+
+        X_in = inception_model.inception_block(X_in, conv_settings_dict1, pool_settings_dict1,
+                                               regularizer=self.regularizer,
+                                               batch_normalisation=True,
+                                               padding=self.padding)
+        out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
+                                  self.activation, 32, 64)
+
+        X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
+
+        X_in = inception_model.inception_block(X_in, conv_settings_dict2, pool_settings_dict2, regularizer=self.regularizer,
+                                               batch_normalisation=True, padding=self.padding)
+
+        # X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
+        #
+        # X_in = inception_model.inception_block(X_in, conv_settings_dict3, pool_settings_dict3, regularizer=self.regularizer,
+        #                                        batch_normalisation=True)
+        #############################################
+
+        out_main = flatten_tail(X_in, 'Main', activation=activation, bound_weight=False, dropout_rate=self.dropout_rate,
+                                reduction_filter=64 * 2, first_dense=64 * 2, window_lead_time=self.window_lead_time)
+
+        self.model = keras.Model(inputs=X_input, outputs=[out_minor1, out_main])
+
+    def set_loss(self):
+
+        """
+        Set the loss
+        :return: loss function
+        """
+
+        self.loss = [keras.losses.mean_squared_error, keras.losses.mean_squared_error]
diff --git a/src/run_modules/model_setup.py b/src/run_modules/model_setup.py
index 32ca0d2e82af32d8164d80ac42731e10f431a458..307fd63018df1e4825fa8fbee1fb07f6c8fef67e 100644
--- a/src/run_modules/model_setup.py
+++ b/src/run_modules/model_setup.py
@@ -12,6 +12,7 @@ from src.model_modules.keras_extensions import HistoryAdvanced, CallbackHandler
 # from src.model_modules.model_class import MyBranchedModel as MyModel
 from src.model_modules.model_class import MyLittleModel as MyModel
 # from src.model_modules.model_class import MyTowerModel as MyModel
+# from src.model_modules.model_class import MyPaperModel as MyModel
 from src.run_modules.run_environment import RunEnvironment