diff --git a/src/model_modules/model_class.py b/src/model_modules/model_class.py
new file mode 100644
index 0000000000000000000000000000000000000000..c16c2ecd2a104679b35edb21d43435822baedc85
--- /dev/null
+++ b/src/model_modules/model_class.py
@@ -0,0 +1,114 @@
+__author__ = "Lukas Leufen"
+__date__ = '2019-12-12'
+
+
+from abc import ABC
+from typing import Any, Callable
+
+import keras
+
+
+class AbstractModelClass(ABC):
+
+    """
+    The AbstractModelClass provides a unified skeleton for any model provided to the machine learning workflow. The
+    model can always be accessed by calling ModelClass.model or directly by an model method without parsing the model
+    attribute name (e.g. ModelClass.model.compile -> ModelClass.compile). Beside the model, this class provides the
+    corresponding loss function.
+    """
+
+    def __init__(self) -> None:
+        """
+        Predefine internal attributes for model and loss.
+        """
+        self._model = None
+        self._loss = None
+
+    def __getattr__(self, name: str) -> Any:
+        """
+        Is called if __getattribute__ is not able to find requested attribute. Normally, the model class is saved into
+        a variable like `model = ModelClass()`. To bypass a call like `model.model` to access the _model attribute,
+        this method tries to search for the named attribute in the self.model namespace and returns this attribute if
+        available. Therefore, following expression is true: `ModelClass().compile == ModelClass().model.compile` as long
+        the called attribute/method is not part if the ModelClass itself.
+        :param name: name of the attribute or method to call
+        :return: attribute or method from self.model namespace
+        """
+        return self.model.__getattribute__(name)
+
+    @property
+    def model(self) -> keras.Model:
+        """
+        The model property containing a keras.Model instance.
+        :return: the keras model
+        """
+        return self._model
+
+    @property
+    def loss(self) -> Callable:
+        """
+        The loss property containing a callable loss function. The loss function can be any keras loss or a customised
+        function. If the loss is a customised function, it must contain the internal loss(y_true, y_pred) function:
+            def customised_loss(args):
+                def loss(y_true, y_pred):
+                    return actual_function(y_true, y_pred, args)
+            return loss
+        :return: the loss function
+        """
+        return self._loss
+
+
+class MyLittleModel(AbstractModelClass):
+
+    """
+    A customised model with a 1x1 Conv, and 4 Dense layers (64, 32, 16, window_lead_time), where the last layer is the
+    output layer depending on the window_lead_time parameter. Dropout is used between the Convolution and the first
+    Dense layer.
+    """
+
+    def __init__(self, activation, window_history_size, channels, regularizer, dropout_rate, window_lead_time):
+        """
+        Sets model and loss depending on the given arguments.
+        :param activation: activation function
+        :param window_history_size: number of historical time steps included in the input data
+        :param channels: number of variables used in input data
+        :param regularizer: <not used here>
+        :param dropout_rate: dropout rate used in the model [0, 1)
+        :param window_lead_time: number of time steps to forecast in the output layer
+        """
+        super().__init__()
+        self.set_model(activation, window_history_size, channels, dropout_rate, window_lead_time)
+        self.set_loss()
+
+    def set_model(self, activation, window_history_size, channels, dropout_rate, window_lead_time):
+        """
+        Build the model.
+        :param activation: activation function
+        :param window_history_size: number of historical time steps included in the input data
+        :param channels: number of variables used in input data
+        :param dropout_rate: dropout rate used in the model [0, 1)
+        :param window_lead_time: number of time steps to forecast in the output layer
+        :return: built keras model
+        """
+        X_input = keras.layers.Input(shape=(window_history_size + 1, 1, channels))  # add 1 to window_size to include current time step t0
+        X_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(X_input)
+        X_in = activation(name='{}_conv_act'.format("major"))(X_in)
+        X_in = keras.layers.Flatten(name='{}'.format("major"))(X_in)
+        X_in = keras.layers.Dropout(dropout_rate, name='{}_Dropout_1'.format("major"))(X_in)
+        X_in = keras.layers.Dense(64, name='{}_Dense_64'.format("major"))(X_in)
+        X_in = activation()(X_in)
+        X_in = keras.layers.Dense(32, name='{}_Dense_32'.format("major"))(X_in)
+        X_in = activation()(X_in)
+        X_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(X_in)
+        X_in = activation()(X_in)
+        X_in = keras.layers.Dense(window_lead_time, name='{}_Dense'.format("major"))(X_in)
+        out_main = activation()(X_in)
+        self._model = keras.Model(inputs=X_input, outputs=[out_main])
+
+    def set_loss(self):
+        """
+        Set the loss
+        :return: loss function
+        """
+        self._loss = keras.losses.mean_squared_error
+
diff --git a/src/modules/model_setup.py b/src/modules/model_setup.py
index f6c25aff51372f84ede5cda0884fd7c603ffaa6b..947b9fa3cb54413ba24bae31791f0b2057a64fe7 100644
--- a/src/modules/model_setup.py
+++ b/src/modules/model_setup.py
@@ -15,6 +15,7 @@ from src.modules.run_environment import RunEnvironment
 from src.helpers import l_p_loss, LearningRateDecay
 from src.inception_model import InceptionModelBase
 from src.flatten import flatten_tail
+from src.model_modules.model_class import MyLittleModel
 
 
 class ModelSetup(RunEnvironment):
@@ -53,7 +54,7 @@ class ModelSetup(RunEnvironment):
 
     def compile_model(self):
         optimizer = self.data_store.get("optimizer", self.scope)
-        loss = self.data_store.get("loss", self.scope)
+        loss = self.model.loss
         self.model.compile(optimizer=optimizer, loss=loss, metrics=["mse", "mae"])
         self.data_store.put("model", self.model, self.scope)
 
@@ -71,7 +72,7 @@ class ModelSetup(RunEnvironment):
     def build_model(self):
         args_list = ["activation", "window_history_size", "channels", "regularizer", "dropout_rate", "window_lead_time"]
         args = self.data_store.create_args_dict(args_list, self.scope)
-        self.model = my_little_model(**args)
+        self.model = MyLittleModel(**args)
 
     def plot_model(self):  # pragma: no cover
         with tf.device("/cpu:0"):
@@ -109,10 +110,6 @@ class ModelSetup(RunEnvironment):
         activation = keras.layers.PReLU  # ELU #LeakyReLU  keras.activations.tanh #
         self.data_store.put("activation", activation, self.scope)
 
-        # set los
-        loss_all = my_little_loss()
-        self.data_store.put("loss", loss_all, self.scope)
-
 
 def my_loss():
     loss = l_p_loss(4)