diff --git a/ACKNOWLEDGMENTS .md b/ACKNOWLEDGMENTS.md
similarity index 100%
rename from ACKNOWLEDGMENTS .md
rename to ACKNOWLEDGMENTS.md
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4f59375d8ee3c245e7d8008e7e8c6d6ff13b3d96..d3989b65d2206d26aff9778582ef99014e45ce2f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,16 @@
 # Changelog
 All notable changes to this project will be documented in this file.
 
+## v1.2.1 -  2021-02-08  - bug fix for recursive import error
+
+### general:
+
+* applied bug fix
+
+### technical:
+
+* bug fix for recursive import error, #269
+
 ## v1.2.0 -  2020-12-18  - parallel preprocessing and improved data handlers
 
 ### general:
diff --git a/HPC_setup/create_runscripts_HPC.sh b/HPC_setup/create_runscripts_HPC.sh
index 7bf08a34c6285e31895d817735f319ddde5bfb04..5e37d820ae1241c09c1c87c141bdcf005044a3b7 100755
--- a/HPC_setup/create_runscripts_HPC.sh
+++ b/HPC_setup/create_runscripts_HPC.sh
@@ -87,7 +87,7 @@ timestamp=\`date +"%Y-%m-%d_%H%M-%S"\`
 
 export PYTHONPATH=\${PWD}/venv_${hpcsys}/lib/python3.6/site-packages:\${PYTHONPATH}
 
-srun python run.py --experiment_date=\$timestamp
+srun --cpu-bind=none python run.py --experiment_date=\$timestamp
 EOT
 
   echo "Created runscript: run_${hpcsys}_$1.bash"
@@ -112,7 +112,7 @@ timestamp=\`date +"%Y-%m-%d_%H%M-%S"\`
 
 export PYTHONPATH=\${PWD}/venv_${hpcsys}/lib/python3.6/site-packages:\${PYTHONPATH}
 
-srun python run_HPC.py --experiment_date=\$timestamp
+srun --cpu-bind=none python run_HPC.py --experiment_date=\$timestamp
 EOT
 
 fi
diff --git a/README.md b/README.md
index bfb5996a29b1a12ad8b9e8b280ea84750805d951..3733882832181c721188005050f775e40ec23878 100644
--- a/README.md
+++ b/README.md
@@ -31,7 +31,7 @@ HPC systems, see [here](#special-instructions-for-installation-on-jülich-hpc-sy
 * Installation of **MLAir**:
     * Either clone MLAir from the [gitlab repository](https://gitlab.version.fz-juelich.de/toar/mlair.git) 
       and use it without installation (beside the requirements) 
-    * or download the distribution file ([current version](https://gitlab.version.fz-juelich.de/toar/mlair/-/blob/master/dist/mlair-1.2.0-py3-none-any.whl)) 
+    * or download the distribution file ([current version](https://gitlab.version.fz-juelich.de/toar/mlair/-/blob/master/dist/mlair-1.2.1-py3-none-any.whl)) 
       and install it via `pip install <dist_file>.whl`. In this case, you can simply import MLAir in any python script 
       inside your virtual environment using `import mlair`.
 * (tf) Currently, TensorFlow-1.13 is mentioned in the requirements. We already tested the TensorFlow-1.15 version and couldn't
diff --git a/conftest.py b/conftest.py
index 08641ff36543dbfba7109f84616ead8d2b472891..abb0c0f52757e4b2228d7d48e3dc07e08b302841 100644
--- a/conftest.py
+++ b/conftest.py
@@ -58,10 +58,13 @@ def default_session_fixture(request):
     :type request: _pytest.python.SubRequest
     :return:
     """
-    patched = mock.patch("multiprocessing.cpu_count", return_value=1)
-    patched.__enter__()
+    # patched = mock.patch("multiprocessing.cpu_count", return_value=1)
+    # patched.__enter__()
 
-    def unpatch():
-        patched.__exit__()
+    # def unpatch():
+    #    patched.__exit__()
 
-    request.addfinalizer(unpatch)
+    # request.addfinalizer(unpatch)
+
+    with mock.patch("multiprocessing.cpu_count", return_value=1):
+        yield
diff --git a/dist/mlair-1.2.1-py3-none-any.whl b/dist/mlair-1.2.1-py3-none-any.whl
new file mode 100644
index 0000000000000000000000000000000000000000..686330f087aed80c9d9e86d77332a8c8ae1f96d3
Binary files /dev/null and b/dist/mlair-1.2.1-py3-none-any.whl differ
diff --git a/mlair/__init__.py b/mlair/__init__.py
index e9a157ca5bba11b22e80df0f3f18092fb0f32db6..e8ad9509e3fb90826c862d1a17641df071168b18 100644
--- a/mlair/__init__.py
+++ b/mlair/__init__.py
@@ -1,7 +1,7 @@
 __version_info__ = {
     'major': 1,
     'minor': 2,
-    'micro': 0,
+    'micro': 1,
 }
 
 from mlair.run_modules import RunEnvironment, ExperimentSetup, PreProcessing, ModelSetup, Training, PostProcessing
diff --git a/mlair/data_handler/default_data_handler.py b/mlair/data_handler/default_data_handler.py
index 5a62731de44cdfa24a72cdd0d200ddb561be29c0..ddf276cf2d88c108d8622c507471f989c4f99e8b 100644
--- a/mlair/data_handler/default_data_handler.py
+++ b/mlair/data_handler/default_data_handler.py
@@ -12,6 +12,8 @@ import shutil
 from functools import reduce
 from typing import Tuple, Union, List
 import multiprocessing
+import psutil
+import dask
 
 import numpy as np
 import xarray as xr
@@ -82,11 +84,20 @@ class DefaultDataHandler(AbstractDataHandler):
         if store_processed_data is True:
             self._cleanup() if fresh_store is True else None
             data = {"X": self._X, "Y": self._Y, "X_extreme": self._X_extreme, "Y_extreme": self._Y_extreme}
+            data = self._force_dask_computation(data)
             with open(self._save_file, "wb") as f:
                 pickle.dump(data, f)
             logging.debug(f"save pickle data to {self._save_file}")
             self._reset_data()
 
+    @staticmethod
+    def _force_dask_computation(data):
+        try:
+            data = dask.compute(data)[0]
+        except:
+            pass
+        return data
+
     def _load(self):
         try:
             with open(self._save_file, "rb") as f:
@@ -270,7 +281,8 @@ class DefaultDataHandler(AbstractDataHandler):
 
         if multiprocessing.cpu_count() > 1:  # parallel solution
             logging.info("use parallel transformation approach")
-            pool = multiprocessing.Pool()
+            pool = multiprocessing.Pool(
+                min([psutil.cpu_count(logical=False), len(set_stations), 16]))  # use only physical cpus
             logging.info(f"running {getattr(pool, '_processes')} processes in parallel")
             output = [
                 pool.apply_async(f_proc, args=(cls.data_handler_transformation, station), kwds=sp_keys)
diff --git a/mlair/helpers/statistics.py b/mlair/helpers/statistics.py
index ea5a9f05c8ff91a5cd6be678ad03d12b923a4bec..3631597aedb90b3411163a42490e9c023bad706a 100644
--- a/mlair/helpers/statistics.py
+++ b/mlair/helpers/statistics.py
@@ -196,9 +196,23 @@ def log_apply(data: Data, mean: Data, std: Data) -> Data:
     return standardise_apply(np.log1p(data), mean, std)
 
 
-def mean_squared_error(a, b):
+def mean_squared_error(a, b, dim=None):
     """Calculate mean squared error."""
-    return np.square(a - b).mean()
+    return np.square(a - b).mean(dim)
+
+
+def mean_absolute_error(a, b, dim=None):
+    """Calculate mean absolute error."""
+    return np.abs(a - b).mean(dim)
+
+
+def calculate_error_metrics(a, b, dim):
+    """Calculate MSE, RMSE, and MAE. Additionally return number of used values for calculation."""
+    mse = mean_squared_error(a, b, dim)
+    rmse = np.sqrt(mse)
+    mae = mean_absolute_error(a, b, dim)
+    n = (a - b).notnull().sum(dim)
+    return {"mse": mse, "rmse": rmse, "mae": mae, "n": n}
 
 
 class SkillScores:
@@ -311,7 +325,6 @@ class SkillScores:
 
         for iahead in ahead_names:
             data = internal_data.sel(ahead=iahead)
-            external_data = self.external_data.sel(ahead=iahead, type=[self.observation_name])
 
             skill_score.loc[["CASE I", "AI", "BI", "CI"], iahead] = np.stack(self._climatological_skill_score(
                 data, mu_type=1, forecast_name=forecast_name, observation_name=self.observation_name).values.flatten())
@@ -319,14 +332,15 @@ class SkillScores:
             skill_score.loc[["CASE II", "AII", "BII"], iahead] = np.stack(self._climatological_skill_score(
                 data, mu_type=2, forecast_name=forecast_name, observation_name=self.observation_name).values.flatten())
 
-            # if external_data is not None:
-            skill_score.loc[["CASE III", "AIII"], iahead] = np.stack(self._climatological_skill_score(
-                data, mu_type=3, forecast_name=forecast_name, observation_name=self.observation_name,
-                external_data=external_data).values.flatten())
+            if self.external_data is not None:
+                external_data = self.external_data.sel(ahead=iahead, type=[self.observation_name])
+                skill_score.loc[["CASE III", "AIII"], iahead] = np.stack(self._climatological_skill_score(
+                    data, mu_type=3, forecast_name=forecast_name, observation_name=self.observation_name,
+                    external_data=external_data).values.flatten())
 
-            skill_score.loc[["CASE IV", "AIV", "BIV", "CIV"], iahead] = np.stack(self._climatological_skill_score(
-                data, mu_type=4, forecast_name=forecast_name, observation_name=self.observation_name,
-                external_data=external_data).values.flatten())
+                skill_score.loc[["CASE IV", "AIV", "BIV", "CIV"], iahead] = np.stack(self._climatological_skill_score(
+                    data, mu_type=4, forecast_name=forecast_name, observation_name=self.observation_name,
+                    external_data=external_data).values.flatten())
 
         return skill_score
 
diff --git a/mlair/helpers/tables.py b/mlair/helpers/tables.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7628ba4f88f56a80eb321a3210d4699148fc485
--- /dev/null
+++ b/mlair/helpers/tables.py
@@ -0,0 +1,24 @@
+import pandas as pd
+import numpy as np
+import os
+
+
+def create_column_format_for_tex(df: pd.DataFrame) -> str:
+    """
+    Creates column format for latex table based on the shape of a given DataFrame.
+
+    Calculates number of columns and uses 'c' as column position. First element is set to 'l', last to 'r'
+    """
+    column_format = np.repeat('c', df.shape[1] + 1)
+    column_format[0] = 'l'
+    column_format[-1] = 'r'
+    column_format = ''.join(column_format.tolist())
+    return column_format
+
+
+def save_to_tex(path, filename, column_format, df, na_rep='---'):
+    df.to_latex(os.path.join(path, filename), na_rep=na_rep, column_format=column_format)
+
+
+def save_to_md(path, filename, df, mode="w", encoding='utf-8', tablefmt="github"):
+    df.to_markdown(open(os.path.join(path, filename), mode=mode, encoding=encoding), tablefmt=tablefmt)
diff --git a/mlair/model_modules/__init__.py b/mlair/model_modules/__init__.py
index ea2067bdfdaacb6290157be681786212b0422812..96c92108ca4cbe63460722db12e61d9343593b06 100644
--- a/mlair/model_modules/__init__.py
+++ b/mlair/model_modules/__init__.py
@@ -1,3 +1,3 @@
 """Collection of all modules that are related to a model."""
 
-from .model_class import AbstractModelClass
+from .abstract_model_class import AbstractModelClass
diff --git a/mlair/model_modules/abstract_model_class.py b/mlair/model_modules/abstract_model_class.py
new file mode 100644
index 0000000000000000000000000000000000000000..894ff7ac4e787a8b31f75ff932f60bec8c561094
--- /dev/null
+++ b/mlair/model_modules/abstract_model_class.py
@@ -0,0 +1,241 @@
+import inspect
+from abc import ABC
+from typing import Any, Dict, Callable
+
+import keras
+import tensorflow as tf
+
+from mlair.helpers import remove_items
+
+
+class AbstractModelClass(ABC):
+    """
+    The AbstractModelClass provides a unified skeleton for any model provided to the machine learning workflow.
+
+    The model can always be accessed by calling ModelClass.model or directly by an model method without parsing the
+    model attribute name (e.g. ModelClass.model.compile -> ModelClass.compile). Beside the model, this class provides
+    the corresponding loss function.
+    """
+
+    _requirements = []
+
+    def __init__(self, input_shape, output_shape) -> None:
+        """Predefine internal attributes for model and loss."""
+        self.__model = None
+        self.model_name = self.__class__.__name__
+        self.__custom_objects = {}
+        self.__allowed_compile_options = {'optimizer': None,
+                                          'loss': None,
+                                          'metrics': None,
+                                          'loss_weights': None,
+                                          'sample_weight_mode': None,
+                                          'weighted_metrics': None,
+                                          'target_tensors': None
+                                          }
+        self.__compile_options = self.__allowed_compile_options
+        self.__compile_options_is_set = False
+        self._input_shape = input_shape
+        self._output_shape = self.__extract_from_tuple(output_shape)
+
+    def __getattr__(self, name: str) -> Any:
+        """
+        Is called if __getattribute__ is not able to find requested attribute.
+
+        Normally, the model class is saved into a variable like `model = ModelClass()`. To bypass a call like
+        `model.model` to access the _model attribute, this method tries to search for the named attribute in the
+        self.model namespace and returns this attribute if available. Therefore, following expression is true:
+        `ModelClass().compile == ModelClass().model.compile` as long the called attribute/method is not part if the
+        ModelClass itself.
+
+        :param name: name of the attribute or method to call
+
+        :return: attribute or method from self.model namespace
+        """
+        return self.model.__getattribute__(name)
+
+    @property
+    def model(self) -> keras.Model:
+        """
+        The model property containing a keras.Model instance.
+
+        :return: the keras model
+        """
+        return self.__model
+
+    @model.setter
+    def model(self, value):
+        self.__model = value
+
+    @property
+    def custom_objects(self) -> Dict:
+        """
+        The custom objects property collects all non-keras utilities that are used in the model class.
+
+        To load such a customised and already compiled model (e.g. from local disk), this information is required.
+
+        :return: custom objects in a dictionary
+        """
+        return self.__custom_objects
+
+    @custom_objects.setter
+    def custom_objects(self, value) -> None:
+        self.__custom_objects = value
+
+    @property
+    def compile_options(self) -> Callable:
+        """
+        The compile options property allows the user to use all keras.compile() arguments. They can ether be passed as
+        dictionary (1), as attribute, without setting compile_options (2) or as mixture (partly defined as instance
+        attributes and partly parsing a dictionary) of both of them (3).
+        The method will raise an Error when the same parameter is set differently.
+
+        Example (1) Recommended (includes check for valid keywords which are used as args in keras.compile)
+        .. code-block:: python
+            def set_compile_options(self):
+                self.compile_options = {"optimizer": keras.optimizers.SGD(),
+                                        "loss": keras.losses.mean_squared_error,
+                                        "metrics": ["mse", "mae"]}
+
+        Example (2)
+        .. code-block:: python
+            def set_compile_options(self):
+                self.optimizer = keras.optimizers.SGD()
+                self.loss = keras.losses.mean_squared_error
+                self.metrics = ["mse", "mae"]
+
+        Example (3)
+        Correct:
+        .. code-block:: python
+            def set_compile_options(self):
+                self.optimizer = keras.optimizers.SGD()
+                self.loss = keras.losses.mean_squared_error
+                self.compile_options = {"metrics": ["mse", "mae"]}
+
+        Incorrect: (Will raise an error)
+        .. code-block:: python
+            def set_compile_options(self):
+                self.optimizer = keras.optimizers.SGD()
+                self.loss = keras.losses.mean_squared_error
+                self.compile_options = {"optimizer" = keras.optimizers.Adam(), "metrics": ["mse", "mae"]}
+
+        Note:
+        * As long as the attribute and the dict value have exactly the same values, the setter method will not raise
+        an error
+        * For example (2) there is no check implemented, if the attributes are valid compile options
+
+
+        :return:
+        """
+        if self.__compile_options_is_set is False:
+            self.compile_options = None
+        return self.__compile_options
+
+    @compile_options.setter
+    def compile_options(self, value: Dict) -> None:
+        if isinstance(value, dict):
+            if not (set(value.keys()) <= set(self.__allowed_compile_options.keys())):
+                raise ValueError(f"Got invalid key for compile_options. {value.keys()}")
+
+        for allow_k in self.__allowed_compile_options.keys():
+            if hasattr(self, allow_k):
+                new_v_attr = getattr(self, allow_k)
+            else:
+                new_v_attr = None
+            if isinstance(value, dict):
+                new_v_dic = value.pop(allow_k, None)
+            elif value is None:
+                new_v_dic = None
+            else:
+                raise TypeError(f"`compile_options' must be `dict' or `None', but is {type(value)}.")
+            if (new_v_attr == new_v_dic or self.__compare_keras_optimizers(new_v_attr, new_v_dic)) or (
+                    (new_v_attr is None) ^ (new_v_dic is None)):
+                if new_v_attr is not None:
+                    self.__compile_options[allow_k] = new_v_attr
+                else:
+                    self.__compile_options[allow_k] = new_v_dic
+
+            else:
+                raise ValueError(
+                    f"Got different values or arguments for same argument: self.{allow_k}={new_v_attr.__class__} and '{allow_k}': {new_v_dic.__class__}")
+        self.__compile_options_is_set = True
+
+    @staticmethod
+    def __extract_from_tuple(tup):
+        """Return element of tuple if it contains only a single element."""
+        return tup[0] if isinstance(tup, tuple) and len(tup) == 1 else tup
+
+    @staticmethod
+    def __compare_keras_optimizers(first, second):
+        """
+        Compares if optimiser and all settings of the optimisers are exactly equal.
+
+        :return True if optimisers are interchangeable, or False if optimisers are distinguishable.
+        """
+        if first.__class__ == second.__class__ and first.__module__ == 'keras.optimizers':
+            res = True
+            init = tf.global_variables_initializer()
+            with tf.Session() as sess:
+                sess.run(init)
+                for k, v in first.__dict__.items():
+                    try:
+                        res *= sess.run(v) == sess.run(second.__dict__[k])
+                    except TypeError:
+                        res *= v == second.__dict__[k]
+        else:
+            res = False
+        return bool(res)
+
+    def get_settings(self) -> Dict:
+        """
+        Get all class attributes that are not protected in the AbstractModelClass as dictionary.
+
+        :return: all class attributes
+        """
+        return dict((k, v) for (k, v) in self.__dict__.items() if not k.startswith("_AbstractModelClass__"))
+
+    def set_model(self):
+        """Abstract method to set model."""
+        raise NotImplementedError
+
+    def set_compile_options(self):
+        """
+        This method only has to be defined in child class, when additional compile options should be used ()
+        (other options than optimizer and loss)
+        Has to be set as dictionary: {'optimizer': None,
+                                      'loss': None,
+                                      'metrics': None,
+                                      'loss_weights': None,
+                                      'sample_weight_mode': None,
+                                      'weighted_metrics': None,
+                                      'target_tensors': None
+                                      }
+
+        :return:
+        """
+        raise NotImplementedError
+
+    def set_custom_objects(self, **kwargs) -> None:
+        """
+        Set custom objects that are not part of keras framework.
+
+        These custom objects are needed if an already compiled model is loaded from disk. There is a special treatment
+        for the Padding2D class, which is a base class for different padding types. For a correct behaviour, all
+        supported subclasses are added as custom objects in addition to the given ones.
+
+        :param kwargs: all custom objects, that should be saved
+        """
+        if "Padding2D" in kwargs.keys():
+            kwargs.update(kwargs["Padding2D"].allowed_paddings)
+        self.custom_objects = kwargs
+
+    @classmethod
+    def requirements(cls):
+        """Return requirements and own arguments without duplicates."""
+        return list(set(cls._requirements + cls.own_args()))
+
+    @classmethod
+    def own_args(cls, *args):
+        """Return all arguments (including kwonlyargs)."""
+        arg_spec = inspect.getfullargspec(cls)
+        list_of_args = arg_spec.args + arg_spec.kwonlyargs
+        return remove_items(list_of_args, ["self"] + list(args))
diff --git a/mlair/model_modules/fully_connected_networks.py b/mlair/model_modules/fully_connected_networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbcd3a9f41ca1b9a7435be95b93eb40c2b37c5a0
--- /dev/null
+++ b/mlair/model_modules/fully_connected_networks.py
@@ -0,0 +1,136 @@
+__author__ = "Lukas Leufen"
+__date__ = '2021-02-'
+
+from functools import reduce, partial
+
+from mlair.model_modules import AbstractModelClass
+from mlair.helpers import select_from_dict
+
+import keras
+
+
+class FCN_64_32_16(AbstractModelClass):
+    """
+    A customised model 4 Dense layers (64, 32, 16, window_lead_time), where the last layer is the output layer depending
+    on the window_lead_time parameter.
+    """
+
+    def __init__(self, input_shape: list, output_shape: list):
+        """
+        Sets model and loss depending on the given arguments.
+
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
+        """
+
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
+
+        # settings
+        self.activation = keras.layers.PReLU
+
+        # apply to model
+        self.set_model()
+        self.set_compile_options()
+        self.set_custom_objects(loss=self.compile_options['loss'])
+
+    def set_model(self):
+        """
+        Build the model.
+        """
+        x_input = keras.layers.Input(shape=self._input_shape)
+        x_in = keras.layers.Flatten()(x_input)
+        x_in = keras.layers.Dense(64, name="Dense_64")(x_in)
+        x_in = self.activation()(x_in)
+        x_in = keras.layers.Dense(32, name="Dense_32")(x_in)
+        x_in = self.activation()(x_in)
+        x_in = keras.layers.Dense(16, name="Dense_16")(x_in)
+        x_in = self.activation()(x_in)
+        x_in = keras.layers.Dense(self._output_shape, name="Dense_output")(x_in)
+        out_main = self.activation()(x_in)
+        self.model = keras.Model(inputs=x_input, outputs=[out_main])
+
+    def set_compile_options(self):
+        self.optimizer = keras.optimizers.adam(lr=1e-2)
+        self.compile_options = {"loss": [keras.losses.mean_squared_error], "metrics": ["mse", "mae"]}
+
+
+class FCN(AbstractModelClass):
+    """
+    A customisable fully connected network (64, 32, 16, window_lead_time), where the last layer is the output layer depending
+    on the window_lead_time parameter.
+    """
+
+    _activation = {"relu": keras.layers.ReLU, "tanh": partial(keras.layers.Activation, "tanh"),
+                   "sigmoid": partial(keras.layers.Activation, "sigmoid"),
+                   "linear": partial(keras.layers.Activation, "linear")}
+    _optimizer = {"adam": keras.optimizers.adam, "sgd": keras.optimizers.SGD}
+    _requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov"]
+
+    def __init__(self, input_shape: list, output_shape: list, activation="relu", activation_output="linear",
+                 optimizer="adam", n_layer=1, n_hidden=10, **kwargs):
+        """
+        Sets model and loss depending on the given arguments.
+
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
+        """
+
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
+
+        # settings
+        self.activation = self._set_activation(activation)
+        self.activation_output = self._set_activation(activation_output)
+        self.optimizer = self._set_optimizer(optimizer, **kwargs)
+        self.layer_configuration = (n_layer, n_hidden)
+        self._update_model_name()
+
+        # apply to model
+        self.set_model()
+        self.set_compile_options()
+        # self.set_custom_objects(loss=self.compile_options['loss'])
+
+    def _set_activation(self, activation):
+        try:
+            return self._activation.get(activation.lower())
+        except KeyError:
+            raise AttributeError(f"Given activation {activation} is not supported in this model class.")
+
+    def _set_optimizer(self, optimizer, **kwargs):
+        try:
+            opt_name = optimizer.lower()
+            opt = self._optimizer.get(opt_name)
+            opt_kwargs = {}
+            if opt_name == "adam":
+                opt_kwargs = select_from_dict(kwargs, ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad"])
+            elif opt_name == "sgd":
+                opt_kwargs = select_from_dict(kwargs, ["lr", "momentum", "decay", "nesterov"])
+            return opt(**opt_kwargs)
+        except KeyError:
+            raise AttributeError(f"Given optimizer {optimizer} is not supported in this model class.")
+
+    def _update_model_name(self):
+        n_layer, n_hidden = self.layer_configuration
+        n_input = str(reduce(lambda x, y: x * y, self._input_shape))
+        n_output = str(self._output_shape)
+        self.model_name += "_".join(["", n_input, *[f"{n_hidden}" for _ in range(n_layer)], n_output])
+
+    def set_model(self):
+        """
+        Build the model.
+        """
+        x_input = keras.layers.Input(shape=self._input_shape)
+        x_in = keras.layers.Flatten()(x_input)
+        n_layer, n_hidden = self.layer_configuration
+        for layer in range(n_layer):
+            x_in = keras.layers.Dense(n_hidden)(x_in)
+            x_in = self.activation()(x_in)
+        x_in = keras.layers.Dense(self._output_shape)(x_in)
+        out = self.activation_output()(x_in)
+        self.model = keras.Model(inputs=x_input, outputs=[out])
+
+    def set_compile_options(self):
+        self.compile_options = {"loss": [keras.losses.mean_squared_error], "metrics": ["mse", "mae"]}
diff --git a/mlair/model_modules/model_class.py b/mlair/model_modules/model_class.py
index a2eda6e8287af2ce489bf75b02d7b205549ff144..f8e3a21a81351ac614e2275749bb85fa82a96e02 100644
--- a/mlair/model_modules/model_class.py
+++ b/mlair/model_modules/model_class.py
@@ -120,287 +120,14 @@ import mlair.model_modules.keras_extensions
 __author__ = "Lukas Leufen, Felix Kleinert"
 __date__ = '2020-05-12'
 
-from abc import ABC
-from typing import Any, Callable, Dict
-
 import keras
-import tensorflow as tf
+
+from mlair.model_modules import AbstractModelClass
 from mlair.model_modules.inception_model import InceptionModelBase
 from mlair.model_modules.flatten import flatten_tail
 from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, SymmetricPadding2D
 
 
-class AbstractModelClass(ABC):
-    """
-    The AbstractModelClass provides a unified skeleton for any model provided to the machine learning workflow.
-
-    The model can always be accessed by calling ModelClass.model or directly by an model method without parsing the
-    model attribute name (e.g. ModelClass.model.compile -> ModelClass.compile). Beside the model, this class provides
-    the corresponding loss function.
-    """
-
-    def __init__(self, input_shape, output_shape) -> None:
-        """Predefine internal attributes for model and loss."""
-        self.__model = None
-        self.model_name = self.__class__.__name__
-        self.__custom_objects = {}
-        self.__allowed_compile_options = {'optimizer': None,
-                                          'loss': None,
-                                          'metrics': None,
-                                          'loss_weights': None,
-                                          'sample_weight_mode': None,
-                                          'weighted_metrics': None,
-                                          'target_tensors': None
-                                          }
-        self.__compile_options = self.__allowed_compile_options
-        self.__compile_options_is_set = False
-        self._input_shape = input_shape
-        self._output_shape = self.__extract_from_tuple(output_shape)
-
-    def __getattr__(self, name: str) -> Any:
-        """
-        Is called if __getattribute__ is not able to find requested attribute.
-
-        Normally, the model class is saved into a variable like `model = ModelClass()`. To bypass a call like
-        `model.model` to access the _model attribute, this method tries to search for the named attribute in the
-        self.model namespace and returns this attribute if available. Therefore, following expression is true:
-        `ModelClass().compile == ModelClass().model.compile` as long the called attribute/method is not part if the
-        ModelClass itself.
-
-        :param name: name of the attribute or method to call
-
-        :return: attribute or method from self.model namespace
-        """
-        return self.model.__getattribute__(name)
-
-    @property
-    def model(self) -> keras.Model:
-        """
-        The model property containing a keras.Model instance.
-
-        :return: the keras model
-        """
-        return self.__model
-
-    @model.setter
-    def model(self, value):
-        self.__model = value
-
-    @property
-    def custom_objects(self) -> Dict:
-        """
-        The custom objects property collects all non-keras utilities that are used in the model class.
-
-        To load such a customised and already compiled model (e.g. from local disk), this information is required.
-
-        :return: custom objects in a dictionary
-        """
-        return self.__custom_objects
-
-    @custom_objects.setter
-    def custom_objects(self, value) -> None:
-        self.__custom_objects = value
-
-    @property
-    def compile_options(self) -> Callable:
-        """
-        The compile options property allows the user to use all keras.compile() arguments. They can ether be passed as
-        dictionary (1), as attribute, without setting compile_options (2) or as mixture (partly defined as instance
-        attributes and partly parsing a dictionary) of both of them (3).
-        The method will raise an Error when the same parameter is set differently.
-
-        Example (1) Recommended (includes check for valid keywords which are used as args in keras.compile)
-        .. code-block:: python
-            def set_compile_options(self):
-                self.compile_options = {"optimizer": keras.optimizers.SGD(),
-                                        "loss": keras.losses.mean_squared_error,
-                                        "metrics": ["mse", "mae"]}
-
-        Example (2)
-        .. code-block:: python
-            def set_compile_options(self):
-                self.optimizer = keras.optimizers.SGD()
-                self.loss = keras.losses.mean_squared_error
-                self.metrics = ["mse", "mae"]
-
-        Example (3)
-        Correct:
-        .. code-block:: python
-            def set_compile_options(self):
-                self.optimizer = keras.optimizers.SGD()
-                self.loss = keras.losses.mean_squared_error
-                self.compile_options = {"metrics": ["mse", "mae"]}
-
-        Incorrect: (Will raise an error)
-        .. code-block:: python
-            def set_compile_options(self):
-                self.optimizer = keras.optimizers.SGD()
-                self.loss = keras.losses.mean_squared_error
-                self.compile_options = {"optimizer" = keras.optimizers.Adam(), "metrics": ["mse", "mae"]}
-
-        Note:
-        * As long as the attribute and the dict value have exactly the same values, the setter method will not raise
-        an error
-        * For example (2) there is no check implemented, if the attributes are valid compile options
-
-
-        :return:
-        """
-        if self.__compile_options_is_set is False:
-            self.compile_options = None
-        return self.__compile_options
-
-    @compile_options.setter
-    def compile_options(self, value: Dict) -> None:
-        if isinstance(value, dict):
-            if not (set(value.keys()) <= set(self.__allowed_compile_options.keys())):
-                raise ValueError(f"Got invalid key for compile_options. {value.keys()}")
-
-        for allow_k in self.__allowed_compile_options.keys():
-            if hasattr(self, allow_k):
-                new_v_attr = getattr(self, allow_k)
-            else:
-                new_v_attr = None
-            if isinstance(value, dict):
-                new_v_dic = value.pop(allow_k, None)
-            elif value is None:
-                new_v_dic = None
-            else:
-                raise TypeError(f"`compile_options' must be `dict' or `None', but is {type(value)}.")
-            if (new_v_attr == new_v_dic or self.__compare_keras_optimizers(new_v_attr, new_v_dic)) or (
-                    (new_v_attr is None) ^ (new_v_dic is None)):
-                if new_v_attr is not None:
-                    self.__compile_options[allow_k] = new_v_attr
-                else:
-                    self.__compile_options[allow_k] = new_v_dic
-
-            else:
-                raise ValueError(
-                    f"Got different values or arguments for same argument: self.{allow_k}={new_v_attr.__class__} and '{allow_k}': {new_v_dic.__class__}")
-        self.__compile_options_is_set = True
-
-    @staticmethod
-    def __extract_from_tuple(tup):
-        """Return element of tuple if it contains only a single element."""
-        return tup[0] if isinstance(tup, tuple) and len(tup) == 1 else tup
-
-    @staticmethod
-    def __compare_keras_optimizers(first, second):
-        """
-        Compares if optimiser and all settings of the optimisers are exactly equal.
-
-        :return True if optimisers are interchangeable, or False if optimisers are distinguishable.
-        """
-        if first.__class__ == second.__class__ and first.__module__ == 'keras.optimizers':
-            res = True
-            init = tf.global_variables_initializer()
-            with tf.Session() as sess:
-                sess.run(init)
-                for k, v in first.__dict__.items():
-                    try:
-                        res *= sess.run(v) == sess.run(second.__dict__[k])
-                    except TypeError:
-                        res *= v == second.__dict__[k]
-        else:
-            res = False
-        return bool(res)
-
-    def get_settings(self) -> Dict:
-        """
-        Get all class attributes that are not protected in the AbstractModelClass as dictionary.
-
-        :return: all class attributes
-        """
-        return dict((k, v) for (k, v) in self.__dict__.items() if not k.startswith("_AbstractModelClass__"))
-
-    def set_model(self):
-        """Abstract method to set model."""
-        raise NotImplementedError
-
-    def set_compile_options(self):
-        """
-        This method only has to be defined in child class, when additional compile options should be used ()
-        (other options than optimizer and loss)
-        Has to be set as dictionary: {'optimizer': None,
-                                      'loss': None,
-                                      'metrics': None,
-                                      'loss_weights': None,
-                                      'sample_weight_mode': None,
-                                      'weighted_metrics': None,
-                                      'target_tensors': None
-                                      }
-
-        :return:
-        """
-        raise NotImplementedError
-
-    def set_custom_objects(self, **kwargs) -> None:
-        """
-        Set custom objects that are not part of keras framework.
-
-        These custom objects are needed if an already compiled model is loaded from disk. There is a special treatment
-        for the Padding2D class, which is a base class for different padding types. For a correct behaviour, all
-        supported subclasses are added as custom objects in addition to the given ones.
-
-        :param kwargs: all custom objects, that should be saved
-        """
-        if "Padding2D" in kwargs.keys():
-            kwargs.update(kwargs["Padding2D"].allowed_paddings)
-        self.custom_objects = kwargs
-
-
-class MyLittleModel(AbstractModelClass):
-    """
-    A customised model 4 Dense layers (64, 32, 16, window_lead_time), where the last layer is the output layer depending
-    on the window_lead_time parameter.
-    """
-
-    def __init__(self, input_shape: list, output_shape: list):
-        """
-        Sets model and loss depending on the given arguments.
-
-        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
-        """
-
-        assert len(input_shape) == 1
-        assert len(output_shape) == 1
-        super().__init__(input_shape[0], output_shape[0])
-
-        # settings
-        self.dropout_rate = 0.1
-        self.regularizer = keras.regularizers.l2(0.1)
-        self.activation = keras.layers.PReLU
-
-        # apply to model
-        self.set_model()
-        self.set_compile_options()
-        self.set_custom_objects(loss=self.compile_options['loss'])
-
-    def set_model(self):
-        """
-        Build the model.
-        """
-        x_input = keras.layers.Input(shape=self._input_shape)
-        x_in = keras.layers.Flatten(name='{}'.format("major"))(x_input)
-        x_in = keras.layers.Dense(64, name='{}_Dense_64'.format("major"))(x_in)
-        x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(32, name='{}_Dense_32'.format("major"))(x_in)
-        x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
-        x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
-        out_main = self.activation()(x_in)
-        self.model = keras.Model(inputs=x_input, outputs=[out_main])
-
-    def set_compile_options(self):
-        self.initial_lr = 1e-2
-        self.optimizer = keras.optimizers.adam(lr=self.initial_lr)
-        # self.lr_decay = mlair.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94,
-        #                                                                        epochs_drop=10)
-        self.compile_options = {"loss": [keras.losses.mean_squared_error], "metrics": ["mse", "mae"]}
-
-
 class MyLittleModelHourly(AbstractModelClass):
     """
     A customised model with a 1x1 Conv, and 4 Dense layers (64, 32, 16, window_lead_time), where the last layer is the
@@ -750,8 +477,3 @@ class MyPaperModel(AbstractModelClass):
         self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
         self.compile_options = {"loss": [keras.losses.mean_squared_error, keras.losses.mean_squared_error],
                                 "metrics": ['mse', 'mae']}
-
-
-if __name__ == "__main__":
-    model = MyLittleModel([(1, 3, 10)], [2])
-    print(model.compile_options)
diff --git a/mlair/plotting/training_monitoring.py b/mlair/plotting/training_monitoring.py
index 09f49c848675eb21bd172e40b09b265e47c443fb..9cad9fd0ee2b9f3d81bd91810abcd4f6eeefb05f 100644
--- a/mlair/plotting/training_monitoring.py
+++ b/mlair/plotting/training_monitoring.py
@@ -85,8 +85,9 @@ class PlotModelHistory:
         :param filename: name (including total path) of the plot to save.
         """
         ax = self._data[[self._plot_metric, f"val_{self._plot_metric}"]].plot(linewidth=0.7)
+        ax.set_yscale('log')
         if len(self._additional_columns) > 0:
-            self._data[self._additional_columns].plot(linewidth=0.7, secondary_y=True, ax=ax)
+            self._data[self._additional_columns].plot(linewidth=0.7, secondary_y=True, ax=ax, logy=True)
         title = f"Model {self._plot_metric}: best = {self._data[[f'val_{self._plot_metric}']].min().values}"
         ax.set(xlabel="epoch", ylabel=self._plot_metric, title=title)
         ax.axhline(y=0, color="gray", linewidth=0.5)
diff --git a/mlair/run_modules/experiment_setup.py b/mlair/run_modules/experiment_setup.py
index af540fc296f1d4b707b5373fdbcbb14dac1afc7f..30672ecc9206319896205d886157b2f2f8977f39 100644
--- a/mlair/run_modules/experiment_setup.py
+++ b/mlair/run_modules/experiment_setup.py
@@ -20,7 +20,7 @@ from mlair.configuration.defaults import DEFAULT_STATIONS, DEFAULT_VAR_ALL_DICT,
     DEFAULT_NUMBER_OF_BOOTSTRAPS, DEFAULT_PLOT_LIST, DEFAULT_SAMPLING, DEFAULT_DATA_ORIGIN, DEFAULT_ITER_DIM
 from mlair.data_handler import DefaultDataHandler
 from mlair.run_modules.run_environment import RunEnvironment
-from mlair.model_modules.model_class import MyLittleModel as VanillaModel
+from mlair.model_modules.fully_connected_networks import FCN_64_32_16 as VanillaModel
 
 
 class ExperimentSetup(RunEnvironment):
diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py
index dda18fac5d8546c6e399334f3d89415d246a1975..5dd73d50f711387a65a9bc7e4daa7c1d430bfb26 100644
--- a/mlair/run_modules/model_setup.py
+++ b/mlair/run_modules/model_setup.py
@@ -56,7 +56,6 @@ class ModelSetup(RunEnvironment):
         """Initialise and run model setup."""
         super().__init__()
         self.model = None
-        # path = self.data_store.get("experiment_path")
         exp_name = self.data_store.get("experiment_name")
         path = self.data_store.get("model_path")
         self.scope = "model"
@@ -138,9 +137,9 @@ class ModelSetup(RunEnvironment):
 
     def build_model(self):
         """Build model using input and output shapes from data store."""
-        args_list = ["input_shape", "output_shape"]
-        args = self.data_store.create_args_dict(args_list, self.scope)
         model = self.data_store.get("model_class")
+        args_list = model.requirements()
+        args = self.data_store.create_args_dict(args_list, self.scope)
         self.model = model(**args)
         self.get_model_settings()
 
@@ -170,6 +169,7 @@ class ModelSetup(RunEnvironment):
     def report_model(self):
         model_settings = self.model.get_settings()
         model_settings.update(self.model.compile_options)
+        model_settings.update(self.model.optimizer.get_config())
         df = pd.DataFrame(columns=["model setting"])
         for k, v in model_settings.items():
             if v is None:
diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py
index c810d3c5517643612f773bdbffc3e0e029d9150d..3b9b563426a80816f7cf1ea9e114a8395d9fbba0 100644
--- a/mlair/run_modules/post_processing.py
+++ b/mlair/run_modules/post_processing.py
@@ -13,13 +13,14 @@ import numpy as np
 import pandas as pd
 import xarray as xr
 
+from mlair.configuration import path_config
 from mlair.data_handler import BootStraps, KerasIterator
 from mlair.helpers.datastore import NameNotFoundInDataStore
-from mlair.helpers import TimeTracking, statistics, extract_value, remove_items, to_list
+from mlair.helpers import TimeTracking, statistics, extract_value, remove_items, to_list, tables
 from mlair.model_modules.linear_model import OrdinaryLeastSquaredModel
-from mlair.model_modules.model_class import AbstractModelClass
+from mlair.model_modules import AbstractModelClass
 from mlair.plotting.postprocessing_plotting import PlotMonthlySummary, PlotStationMap, PlotClimatologicalSkillScore, \
-    PlotCompetitiveSkillScore, PlotTimeSeries, PlotBootstrapSkillScore, PlotAvailability, PlotAvailabilityHistogram,  \
+    PlotCompetitiveSkillScore, PlotTimeSeries, PlotBootstrapSkillScore, PlotAvailability, PlotAvailabilityHistogram, \
     PlotConditionalQuantiles, PlotSeparationOfScales
 from mlair.run_modules.run_environment import RunEnvironment
 
@@ -102,9 +103,12 @@ class PostProcessing(RunEnvironment):
                 create_new_bootstraps = self.data_store.get("create_new_bootstraps", "postprocessing")
                 self.bootstrap_postprocessing(create_new_bootstraps)
 
-        # skill scores
+        # skill scores and error metrics
         with TimeTracking(name="calculate skill scores"):
-            self.skill_scores = self.calculate_skill_scores()
+            skill_score_competitive, skill_score_climatological, errors = self.calculate_error_metrics()
+            self.skill_scores = (skill_score_competitive, skill_score_climatological)
+        self.report_error_metrics(errors)
+        self.report_error_metrics(skill_score_climatological)
 
         # plotting
         self.plot()
@@ -386,12 +390,14 @@ class PostProcessing(RunEnvironment):
 
     def calculate_test_score(self):
         """Evaluate test score of model and save locally."""
+
+        # test scores on transformed data
         test_score = self.model.evaluate_generator(generator=self.test_data_distributed,
-                                                   use_multiprocessing=True, verbose=0, steps=1)
+                                                   use_multiprocessing=True, verbose=0)
         path = self.data_store.get("model_path")
         with open(os.path.join(path, "test_scores.txt"), "a") as f:
-            for index, item in enumerate(test_score):
-                logging.info(f"{self.model.metrics_names[index]}, {item}")
+            for index, item in enumerate(to_list(test_score)):
+                logging.info(f"{self.model.metrics_names[index]} (test), {item}")
                 f.write(f"{self.model.metrics_names[index]}, {item}\n")
 
     def train_ols_model(self):
@@ -407,7 +413,7 @@ class PostProcessing(RunEnvironment):
         be found inside `forecast_path`.
         """
         subset_type = subset.name
-        logging.debug(f"start make_prediction for {subset_type}")
+        logging.info(f"start make_prediction for {subset_type}")
         time_dimension = self.data_store.get("time_dim")
         window_dim = self.data_store.get("window_dim")
         subset_type = subset.name
@@ -627,7 +633,7 @@ class PostProcessing(RunEnvironment):
         try:
             file = os.path.join(path, f"forecasts_{str(station)}_train_val.nc")
             return xr.open_dataarray(file)
-        except (IndexError, KeyError):
+        except (IndexError, KeyError, FileNotFoundError):
             return None
 
     def _get_external_data(self, station: str, path: str) -> Union[xr.DataArray, None]:
@@ -642,30 +648,92 @@ class PostProcessing(RunEnvironment):
         try:
             file = os.path.join(path, f"forecasts_{str(station)}_test.nc")
             return xr.open_dataarray(file)
-        except (IndexError, KeyError):
+        except (IndexError, KeyError, FileNotFoundError):
             return None
 
-    def calculate_skill_scores(self) -> Tuple[Dict, Dict]:
+    @staticmethod
+    def _combine_forecasts(forecast, competitor, dim="type"):
+        """
+        Combine forecast and competitor if both are xarray. If competitor is None, this returns forecasts and vise
+        versa.
+        """
+        try:
+            return xr.concat([forecast, competitor], dim=dim)
+        except (TypeError, AttributeError):
+            return forecast if competitor is None else competitor
+
+    def calculate_error_metrics(self) -> Tuple[Dict, Dict, Dict]:
         """
-        Calculate skill scores of NN forecast.
+        Calculate error metrics and skill scores of NN forecast.
 
         The competitive skill score compares the NN prediction with persistence and ordinary least squares forecasts.
         Whereas, the climatological skill scores evaluates the NN prediction in terms of meaningfulness in comparison
         to different climatological references.
 
-        :return: competitive and climatological skill scores
+        :return: competitive and climatological skill scores, error metrics
         """
         path = self.data_store.get("forecast_path")
+        all_stations = self.data_store.get("stations")
         skill_score_competitive = {}
         skill_score_climatological = {}
-        for station in self.test_data:
-            external_data = self._get_external_data(station, path)
-            competitor = self.load_competitors(str(station))
-            combined = xr.concat([external_data, competitor], dim="type") if competitor is not None else external_data
-            skill_score = statistics.SkillScores(combined, models=remove_items(list(combined.type.values), "obs"))
-            skill_score_competitive[station] = skill_score.skill_scores(self.window_lead_time)
-
-            internal_data = self._get_internal_data(station, path)  # ToDo: check if external is still right?
-            skill_score_climatological[station] = skill_score.climatological_skill_scores(
-                internal_data, self.window_lead_time, forecast_name=self.forecast_indicator)
-        return skill_score_competitive, skill_score_climatological
+        errors = {}
+        for station in all_stations:
+            external_data = self._get_external_data(station, path)  # test data
+
+            # test errors
+            if external_data is not None:
+                errors[station] = statistics.calculate_error_metrics(*map(lambda x: external_data.sel(type=x),
+                                                                          [self.forecast_indicator, "obs"]),
+                                                                     dim="index")
+            # skill score
+            competitor = self.load_competitors(station)
+            combined = self._combine_forecasts(external_data, competitor, dim="type")
+            model_list = remove_items(list(combined.type.values), "obs") if combined is not None else None
+            skill_score = statistics.SkillScores(combined, models=model_list)
+            if external_data is not None:
+                skill_score_competitive[station] = skill_score.skill_scores(self.window_lead_time)
+
+            internal_data = self._get_internal_data(station, path)
+            if internal_data is not None:
+                skill_score_climatological[station] = skill_score.climatological_skill_scores(
+                    internal_data, self.window_lead_time, forecast_name=self.forecast_indicator)
+
+        errors.update({"total": self.calculate_average_errors(errors)})
+        return skill_score_competitive, skill_score_climatological, errors
+
+    @staticmethod
+    def calculate_average_errors(errors):
+        avg_error = {}
+        n_total = sum([x.get("n", 0) for _, x in errors.items()])
+        for station, station_errors in errors.items():
+            n_station = station_errors.get("n")
+            for error_metric, val in station_errors.items():
+                new_val = avg_error.get(error_metric, 0) + val * n_station / n_total
+                avg_error[error_metric] = new_val
+        return avg_error
+
+    def report_error_metrics(self, errors):
+        report_path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
+        path_config.check_path_and_create(report_path)
+        metric_collection = {}
+        for station, station_errors in errors.items():
+            if isinstance(station_errors, xr.DataArray):
+                dim = station_errors.dims[0]
+                sel_index = [sel for sel in station_errors.coords[dim] if "CASE" in str(sel)]
+                station_errors = {str(i.values): station_errors.sel(**{dim: i}) for i in sel_index}
+            for metric, vals in station_errors.items():
+                if metric == "n":
+                    continue
+                pd_vals = pd.DataFrame.from_dict({station: vals}).T
+                pd_vals.columns = [f"{metric}(t+{x})" for x in vals.coords["ahead"].values]
+                mc = metric_collection.get(metric, pd.DataFrame())
+                mc = mc.append(pd_vals)
+                metric_collection[metric] = mc
+        for metric, error_df in metric_collection.items():
+            df = error_df.sort_index()
+            if "total" in df.index:
+                df.reindex(df.index.drop(["total"]).to_list() + ["total"], )
+            column_format = tables.create_column_format_for_tex(df)
+            file_name = f"error_report_{metric}.%s".replace(' ', '_')
+            tables.save_to_tex(report_path, file_name % "tex", column_format=column_format, df=df)
+            tables.save_to_md(report_path, file_name % "md", df=df)
diff --git a/mlair/run_modules/pre_processing.py b/mlair/run_modules/pre_processing.py
index f696b0065b1db2692110488bd41513cd74aca233..813873b8181fcb78917c5ef4e697da63b2941845 100644
--- a/mlair/run_modules/pre_processing.py
+++ b/mlair/run_modules/pre_processing.py
@@ -8,12 +8,13 @@ import os
 from typing import Tuple
 import multiprocessing
 import requests
+import psutil
 
 import numpy as np
 import pandas as pd
 
 from mlair.data_handler import DataCollection, AbstractDataHandler
-from mlair.helpers import TimeTracking, to_list
+from mlair.helpers import TimeTracking, to_list, tables
 from mlair.configuration import path_config
 from mlair.helpers.join import EmptyQueryResult
 from mlair.run_modules.run_environment import RunEnvironment
@@ -118,19 +119,20 @@ class PreProcessing(RunEnvironment):
         path_config.check_path_and_create(path)
         names_of_set = ["train", "val", "test"]
         df = self.create_info_df(meta_data, meta_round, names_of_set, precision)
-        column_format = self.create_column_format_for_tex(df)
-        self.save_to_tex(path=path, filename="station_sample_size.tex", column_format=column_format, df=df)
-        self.save_to_md(path=path, filename="station_sample_size.md", df=df)
+        column_format = tables.create_column_format_for_tex(df)
+        tables.save_to_tex(path=path, filename="station_sample_size.tex", column_format=column_format, df=df)
+        tables.save_to_md(path=path, filename="station_sample_size.md", df=df)
         df_nometa = df.drop(meta_data, axis=1)
-        column_format = self.create_column_format_for_tex(df)
-        self.save_to_tex(path=path, filename="station_sample_size_short.tex", column_format=column_format, df=df_nometa)
-        self.save_to_md(path=path, filename="station_sample_size_short.md", df=df_nometa)
+        column_format = tables.create_column_format_for_tex(df)
+        tables.save_to_tex(path=path, filename="station_sample_size_short.tex", column_format=column_format,
+                           df=df_nometa)
+        tables.save_to_md(path=path, filename="station_sample_size_short.md", df=df_nometa)
         # df_nometa.to_latex(os.path.join(path, "station_sample_size_short.tex"), na_rep='---',
         #                    column_format=column_format)
         df_descr = self.create_describe_df(df_nometa)
-        column_format = self.create_column_format_for_tex(df_descr)
-        self.save_to_tex(path=path, filename="station_describe_short.tex", column_format=column_format, df=df_descr)
-        self.save_to_md(path=path, filename="station_describe_short.md", df=df_descr)
+        column_format = tables.create_column_format_for_tex(df_descr)
+        tables.save_to_tex(path=path, filename="station_describe_short.tex", column_format=column_format, df=df_descr)
+        tables.save_to_md(path=path, filename="station_describe_short.md", df=df_descr)
         # df_descr.to_latex(os.path.join(path, "station_describe_short.tex"), na_rep='---', column_format=column_format)
 
     @staticmethod
@@ -146,15 +148,6 @@ class PreProcessing(RunEnvironment):
         df_descr = df_descr[df_descr_colnames]
         return df_descr
 
-    @staticmethod
-    def save_to_tex(path, filename, column_format, df, na_rep='---'):
-        df.to_latex(os.path.join(path, filename), na_rep=na_rep, column_format=column_format)
-
-    @staticmethod
-    def save_to_md(path, filename, df, mode="w", encoding='utf-8', tablefmt="github"):
-        df.to_markdown(open(os.path.join(path, filename), mode=mode, encoding=encoding),
-                       tablefmt=tablefmt)
-
     def create_info_df(self, meta_data, meta_round, names_of_set, precision):
         df = pd.DataFrame(columns=meta_data + names_of_set)
         for set_name in names_of_set:
@@ -173,19 +166,6 @@ class PreProcessing(RunEnvironment):
         df.index.name = 'stat. ID'
         return df
 
-    @staticmethod
-    def create_column_format_for_tex(df: pd.DataFrame) -> str:
-        """
-        Creates column format for latex table based on the shape of a given DataFrame.
-
-        Calculates number of columns and uses 'c' as column position. First element is set to 'l', last to 'r'
-        """
-        column_format = np.repeat('c', df.shape[1] + 1)
-        column_format[0] = 'l'
-        column_format[-1] = 'r'
-        column_format = ''.join(column_format.tolist())
-        return column_format
-
     def split_train_val_test(self) -> None:
         """
         Split data into subsets.
@@ -264,13 +244,15 @@ class PreProcessing(RunEnvironment):
 
         if multiprocessing.cpu_count() > 1:  # parallel solution
             logging.info("use parallel validate station approach")
-            pool = multiprocessing.Pool()
+            pool = multiprocessing.Pool(
+                min([psutil.cpu_count(logical=False), len(set_stations), 16]))  # use only physical cpus
             logging.info(f"running {getattr(pool, '_processes')} processes in parallel")
             output = [
                 pool.apply_async(f_proc, args=(data_handler, station, set_name, store_processed_data), kwds=kwargs)
                 for station in set_stations]
-            for p in output:
+            for i, p in enumerate(output):
                 dh, s = p.get()
+                logging.info(f"...finished: {s} ({int((i + 1.) / len(output) * 100)}%)")
                 if dh is not None:
                     collection.add(dh)
                     valid_stations.append(s)
@@ -352,8 +334,7 @@ def f_proc(data_handler, station, name_affix, store, **kwargs):
     the station that was used. This function must be implemented globally to work together with multiprocessing.
     """
     try:
-        res = data_handler.build(station, name_affix=name_affix, store_processed_data=store,
-                                 **kwargs)
+        res = data_handler.build(station, name_affix=name_affix, store_processed_data=store, **kwargs)
     except (AttributeError, EmptyQueryResult, KeyError, requests.ConnectionError, ValueError) as e:
         logging.info(f"remove station {station} because it raised an error: {e}")
         res = None
diff --git a/mlair/run_modules/training.py b/mlair/run_modules/training.py
index 6c993d56b540cf3cf5b86d9c1920fc3a22557e46..5f895b77d53d45bedc255bc7ff051f9d6a8d20a3 100644
--- a/mlair/run_modules/training.py
+++ b/mlair/run_modules/training.py
@@ -10,12 +10,15 @@ from typing import Union
 
 import keras
 from keras.callbacks import Callback, History
+import psutil
+import pandas as pd
 
 from mlair.data_handler import KerasIterator
 from mlair.model_modules.keras_extensions import CallbackHandler
 from mlair.plotting.training_monitoring import PlotModelHistory, PlotModelLearningRate
 from mlair.run_modules.run_environment import RunEnvironment
 from mlair.configuration import path_config
+from mlair.helpers import to_list, tables
 
 
 class Training(RunEnvironment):
@@ -140,7 +143,8 @@ class Training(RunEnvironment):
                                                verbose=2,
                                                validation_data=self.val_set,
                                                validation_steps=len(self.val_set),
-                                               callbacks=self.callbacks.get_callbacks(as_dict=False))
+                                               callbacks=self.callbacks.get_callbacks(as_dict=False),
+                                               workers=psutil.cpu_count(logical=False))
         else:
             logging.info("Found locally stored model and checkpoints. Training is resumed from the last checkpoint.")
             self.callbacks.load_callbacks()
@@ -155,7 +159,8 @@ class Training(RunEnvironment):
                                          validation_data=self.val_set,
                                          validation_steps=len(self.val_set),
                                          callbacks=self.callbacks.get_callbacks(as_dict=False),
-                                         initial_epoch=initial_epoch)
+                                         initial_epoch=initial_epoch,
+                                         workers=psutil.cpu_count(logical=False))
             history = hist
         try:
             lr = self.callbacks.get_callback_by_name("lr")
@@ -232,18 +237,26 @@ class Training(RunEnvironment):
             PlotModelLearningRate(filename=os.path.join(path, f"{name}_history_learning_rate.pdf"), lr_sc=lr_sc)
 
     def report_training(self):
+        # create training summary
         data = {"mini batches": len(self.train_set),
                 "upsampling extremes": self.train_set.upsampling,
                 "shuffling": self.train_set.shuffle,
                 "created new model": self._create_new_model,
                 "epochs": self.epochs,
                 "batch size": self.batch_size}
-        import pandas as pd
         df = pd.DataFrame.from_dict(data, orient="index", columns=["training setting"])
         df.sort_index(inplace=True)
-        column_format = "ll"
         path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
         path_config.check_path_and_create(path)
-        df.to_latex(os.path.join(path, "training_settings.tex"), na_rep='---', column_format=column_format)
-        df.to_markdown(open(os.path.join(path, "training_settings.md"), mode="w", encoding='utf-8'),
-                       tablefmt="github")
\ No newline at end of file
+
+        # store as .tex and .md
+        tables.save_to_tex(path, "training_settings.tex", column_format="ll", df=df)
+        tables.save_to_md(path, "training_settings.md", df=df)
+
+        # calculate val scores
+        val_score = self.model.evaluate_generator(generator=self.val_set, use_multiprocessing=True, verbose=0)
+        path = self.data_store.get("model_path")
+        with open(os.path.join(path, "val_scores.txt"), "a") as f:
+            for index, item in enumerate(to_list(val_score)):
+                logging.info(f"{self.model.metrics_names[index]} (val), {item}")
+                f.write(f"{self.model.metrics_names[index]}, {item}\n")
diff --git a/requirements.txt b/requirements.txt
index a5854fea755d20cc95afd161f587e709a29cfd19..b0a6e7f59896fd0edf08977ee553c803f6c2e960 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -34,6 +34,7 @@ patsy==0.5.1
 Pillow==8.1.0
 pluggy==0.13.1
 protobuf==3.15.0
+psutil==5.8.0
 py==1.10.0
 pydot==1.4.2
 pyparsing==2.4.7
diff --git a/requirements_gpu.txt b/requirements_gpu.txt
index 809eb0b303a745ae9d68dfb5aa059aeebcf24ac6..35fe0d5ee2a03f01737bc185d2a5bbaf26383806 100644
--- a/requirements_gpu.txt
+++ b/requirements_gpu.txt
@@ -34,6 +34,7 @@ patsy==0.5.1
 Pillow==8.1.0
 pluggy==0.13.1
 protobuf==3.15.0
+psutil==5.8.0
 py==1.10.0
 pydot==1.4.2
 pyparsing==2.4.7
diff --git a/test/test_data_handler/test_iterator.py b/test/test_data_handler/test_iterator.py
index ade5c19215e61de5e209db900920187294ac9b18..e47d725a4fd78fec98e81a6de9c18869e7b47637 100644
--- a/test/test_data_handler/test_iterator.py
+++ b/test/test_data_handler/test_iterator.py
@@ -1,7 +1,7 @@
-
 from mlair.data_handler.iterator import DataCollection, StandardIterator, KerasIterator
 from mlair.helpers.testing import PyTestAllEqual
-from mlair.model_modules.model_class import MyLittleModel, MyBranchedModel
+from mlair.model_modules.model_class import MyBranchedModel
+from mlair.model_modules.fully_connected_networks import FCN_64_32_16
 
 import numpy as np
 import pytest
@@ -275,7 +275,7 @@ class TestKerasIterator:
 
     def test_get_model_rank_single_output_branch(self):
         iterator = object.__new__(KerasIterator)
-        iterator.model = MyLittleModel(input_shape=[(14, 1, 2)], output_shape=[(3,)])
+        iterator.model = FCN_64_32_16(input_shape=[(14, 1, 2)], output_shape=[(3,)])
         assert iterator._get_model_rank() == 1
 
     def test_get_model_rank_multiple_output_branch(self):
diff --git a/test/test_helpers/test_tables.py b/test/test_helpers/test_tables.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b19b19bac65a9c44b7a46d8ec0b4bff33598a55
--- /dev/null
+++ b/test/test_helpers/test_tables.py
@@ -0,0 +1,21 @@
+import pandas as pd
+import numpy as np
+
+from mlair.helpers import tables
+
+
+class TestTables:
+
+    def test_create_column_format_for_tex(self):
+        df = pd.DataFrame(np.ones((2, 1)))
+        df_col = tables.create_column_format_for_tex(df)  # len: 1+1
+        assert df_col == 'lr'
+        assert len(df_col) == 2
+        df = pd.DataFrame(np.ones((2, 2)))
+        df_col = tables.create_column_format_for_tex(df)  # len: 2+1
+        assert df_col == 'lcr'
+        assert len(df_col) == 3
+        df = pd.DataFrame(np.ones((2, 3)))
+        df_col = tables.create_column_format_for_tex(df)  # len: 3+1
+        assert df_col == 'lccr'
+        assert len(df_col) == 4
diff --git a/test/test_model_modules/test_abstract_model_class.py b/test/test_model_modules/test_abstract_model_class.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfef68d550b07f824ed38e5c7809c00e5386d115
--- /dev/null
+++ b/test/test_model_modules/test_abstract_model_class.py
@@ -0,0 +1,199 @@
+import keras
+import pytest
+
+from mlair import AbstractModelClass
+
+
+class Paddings:
+    allowed_paddings = {"pad1": 34, "another_pad": True}
+
+
+class AbstractModelSubClass(AbstractModelClass):
+
+    def __init__(self):
+        super().__init__(input_shape=(12, 1, 2), output_shape=3)
+        self.test_attr = "testAttr"
+
+
+class TestAbstractModelClass:
+
+    @pytest.fixture
+    def amc(self):
+        return AbstractModelClass(input_shape=(14, 1, 2), output_shape=(3,))
+
+    @pytest.fixture
+    def amsc(self):
+        return AbstractModelSubClass()
+
+    def test_init(self, amc):
+        assert amc.model is None
+        # assert amc.loss is None
+        assert amc.model_name == "AbstractModelClass"
+        assert amc.custom_objects == {}
+        assert amc._input_shape == (14, 1, 2)
+        assert amc._output_shape == 3
+
+    def test_model_property(self, amc):
+        amc.model = keras.Model()
+        assert isinstance(amc.model, keras.Model) is True
+
+    # def test_loss_property(self, amc):
+    #     amc.loss = keras.losses.mean_absolute_error
+    #     assert amc.loss == keras.losses.mean_absolute_error
+
+    def test_compile_options_setter_all_empty(self, amc):
+        amc.compile_options = None
+        assert amc.compile_options == {'optimizer': None,
+                                       'loss': None,
+                                       'metrics': None,
+                                       'loss_weights': None,
+                                       'sample_weight_mode': None,
+                                       'weighted_metrics': None,
+                                       'target_tensors': None
+                                       }
+
+    def test_compile_options_setter_as_dict(self, amc):
+        amc.compile_options = {"optimizer": keras.optimizers.SGD(),
+                               "loss": keras.losses.mean_absolute_error,
+                               "metrics": ["mse", "mae"]}
+        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
+        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
+        assert amc.compile_options["metrics"] == ["mse", "mae"]
+        assert amc.compile_options["loss_weights"] is None
+        assert amc.compile_options["sample_weight_mode"] is None
+        assert amc.compile_options["target_tensors"] is None
+        assert amc.compile_options["weighted_metrics"] is None
+
+    def test_compile_options_setter_as_attr(self, amc):
+        amc.optimizer = keras.optimizers.SGD()
+        amc.loss = keras.losses.mean_absolute_error
+        amc.compile_options = None  # This line has to be called!
+        # optimizer check
+        assert isinstance(amc.optimizer, keras.optimizers.SGD)
+        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
+        # loss check
+        assert amc.loss == keras.losses.mean_absolute_error
+        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
+        # check rest (all None as not set)
+        assert amc.compile_options["metrics"] is None
+        assert amc.compile_options["loss_weights"] is None
+        assert amc.compile_options["sample_weight_mode"] is None
+        assert amc.compile_options["target_tensors"] is None
+        assert amc.compile_options["weighted_metrics"] is None
+
+    def test_compile_options_setter_as_mix_attr_dict_no_duplicates(self, amc):
+        amc.optimizer = keras.optimizers.SGD()
+        amc.compile_options = {"loss": keras.losses.mean_absolute_error,
+                               "loss_weights": [0.2, 0.8]}
+        # check setting by attribute
+        assert isinstance(amc.optimizer, keras.optimizers.SGD)
+        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
+        # check setting by dict
+        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
+        assert amc.compile_options["loss_weights"] == [0.2, 0.8]
+        # check rest (all None as not set)
+        assert amc.compile_options["metrics"] is None
+        assert amc.compile_options["sample_weight_mode"] is None
+        assert amc.compile_options["target_tensors"] is None
+        assert amc.compile_options["weighted_metrics"] is None
+
+    def test_compile_options_setter_as_mix_attr_dict_valid_duplicates_optimizer(self, amc):
+        amc.optimizer = keras.optimizers.SGD()
+        amc.metrics = ['mse']
+        amc.compile_options = {"optimizer": keras.optimizers.SGD(),
+                               "loss": keras.losses.mean_absolute_error}
+        # check duplicate (attr and dic)
+        assert isinstance(amc.optimizer, keras.optimizers.SGD)
+        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
+        # check setting by dict
+        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
+        # check setting by attr
+        assert amc.metrics == ['mse']
+        assert amc.compile_options["metrics"] == ['mse']
+        # check rest (all None as not set)
+        assert amc.compile_options["loss_weights"] is None
+        assert amc.compile_options["sample_weight_mode"] is None
+        assert amc.compile_options["target_tensors"] is None
+        assert amc.compile_options["weighted_metrics"] is None
+
+    def test_compile_options_setter_as_mix_attr_dict_valid_duplicates_none_optimizer(self, amc):
+        amc.optimizer = keras.optimizers.SGD()
+        amc.metrics = ['mse']
+        amc.compile_options = {"metrics": ['mse'],
+                               "loss": keras.losses.mean_absolute_error}
+        # check duplicate (attr and dic)
+        assert amc.metrics == ['mse']
+        assert amc.compile_options["metrics"] == ['mse']
+        # check setting by dict
+        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
+        # check setting by attr
+        assert isinstance(amc.optimizer, keras.optimizers.SGD)
+        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
+        # check rest (all None as not set)
+        assert amc.compile_options["loss_weights"] is None
+        assert amc.compile_options["sample_weight_mode"] is None
+        assert amc.compile_options["target_tensors"] is None
+        assert amc.compile_options["weighted_metrics"] is None
+
+    def test_compile_options_property_type_error(self, amc):
+        with pytest.raises(TypeError) as einfo:
+            amc.compile_options = 'hello world'
+        assert "`compile_options' must be `dict' or `None', but is <class 'str'>." in str(einfo.value)
+
+    def test_compile_options_setter_as_mix_attr_dict_invalid_duplicates_other_optimizer(self, amc):
+        amc.optimizer = keras.optimizers.SGD()
+        with pytest.raises(ValueError) as einfo:
+            amc.compile_options = {"optimizer": keras.optimizers.Adam()}
+        assert "Got different values or arguments for same argument: self.optimizer=<class" \
+               " 'keras.optimizers.SGD'> and 'optimizer': <class 'keras.optimizers.Adam'>" in str(einfo.value)
+
+    def test_compile_options_setter_as_mix_attr_dict_invalid_duplicates_same_optimizer_other_args(self, amc):
+        amc.optimizer = keras.optimizers.SGD(lr=0.1)
+        with pytest.raises(ValueError) as einfo:
+            amc.compile_options = {"optimizer": keras.optimizers.SGD(lr=0.001)}
+        assert "Got different values or arguments for same argument: self.optimizer=<class" \
+               " 'keras.optimizers.SGD'> and 'optimizer': <class 'keras.optimizers.SGD'>" in str(einfo.value)
+
+    def test_compile_options_setter_as_dict_invalid_keys(self, amc):
+        with pytest.raises(ValueError) as einfo:
+            amc.compile_options = {"optimizer": keras.optimizers.SGD(), "InvalidKeyword": [1, 2, 3]}
+        assert "Got invalid key for compile_options. dict_keys(['optimizer', 'InvalidKeyword'])" in str(einfo.value)
+
+    def test_compare_keras_optimizers_equal(self, amc):
+        assert amc._AbstractModelClass__compare_keras_optimizers(keras.optimizers.SGD(), keras.optimizers.SGD()) is True
+
+    def test_compare_keras_optimizers_no_optimizer(self, amc):
+        assert amc._AbstractModelClass__compare_keras_optimizers('NoOptimizer', keras.optimizers.SGD()) is False
+
+    def test_compare_keras_optimizers_other_parameters_run_sess(self, amc):
+        assert amc._AbstractModelClass__compare_keras_optimizers(keras.optimizers.SGD(lr=0.1),
+                                                                 keras.optimizers.SGD(lr=0.01)) is False
+
+    def test_compare_keras_optimizers_other_parameters_none_sess(self, amc):
+        assert amc._AbstractModelClass__compare_keras_optimizers(keras.optimizers.SGD(decay=1),
+                                                                 keras.optimizers.SGD(decay=0.01)) is False
+
+    def test_getattr(self, amc):
+        amc.model = keras.Model()
+        assert hasattr(amc, "compile") is True
+        assert hasattr(amc.model, "compile") is True
+        assert amc.compile == amc.model.compile
+
+    def test_get_settings(self, amc, amsc):
+        assert amc.get_settings() == {"model_name": "AbstractModelClass", "_input_shape": (14, 1, 2),
+                                      "_output_shape": 3}
+        assert amsc.get_settings() == {"test_attr": "testAttr", "model_name": "AbstractModelSubClass",
+                                       "_input_shape": (12, 1, 2), "_output_shape": 3}
+
+    def test_custom_objects(self, amc):
+        amc.custom_objects = {"Test": 123}
+        assert amc.custom_objects == {"Test": 123}
+
+    def test_set_custom_objects(self, amc):
+        amc.set_custom_objects(Test=22, minor_param="minor")
+        assert amc.custom_objects == {"Test": 22, "minor_param": "minor"}
+        amc.set_custom_objects(Test=2, minor_param1="minor1")
+        assert amc.custom_objects == {"Test": 2, "minor_param1": "minor1"}
+        paddings = Paddings()
+        amc.set_custom_objects(Test=1, Padding2D=paddings)
+        assert amc.custom_objects == {"Test": 1, "Padding2D": paddings, "pad1": 34, "another_pad": True}
diff --git a/test/test_model_modules/test_model_class.py b/test/test_model_modules/test_model_class.py
index 28218eb60e23d6e5b0e361fc2617398aade799cc..cbff4cec6c5b002c3166954880e1008e7f4d7ae3 100644
--- a/test/test_model_modules/test_model_class.py
+++ b/test/test_model_modules/test_model_class.py
@@ -1,205 +1,9 @@
 import keras
 import pytest
 
-from mlair.model_modules.model_class import AbstractModelClass
 from mlair.model_modules.model_class import MyPaperModel
 
 
-class Paddings:
-    allowed_paddings = {"pad1": 34, "another_pad": True}
-
-
-class AbstractModelSubClass(AbstractModelClass):
-
-    def __init__(self):
-        super().__init__(input_shape=(12, 1, 2), output_shape=3)
-        self.test_attr = "testAttr"
-
-
-class TestAbstractModelClass:
-
-    @pytest.fixture
-    def amc(self):
-        return AbstractModelClass(input_shape=(14, 1, 2), output_shape=(3,))
-
-    @pytest.fixture
-    def amsc(self):
-        return AbstractModelSubClass()
-
-    def test_init(self, amc):
-        assert amc.model is None
-        # assert amc.loss is None
-        assert amc.model_name == "AbstractModelClass"
-        assert amc.custom_objects == {}
-        assert amc._input_shape == (14, 1, 2)
-        assert amc._output_shape == 3
-
-    def test_model_property(self, amc):
-        amc.model = keras.Model()
-        assert isinstance(amc.model, keras.Model) is True
-
-    # def test_loss_property(self, amc):
-    #     amc.loss = keras.losses.mean_absolute_error
-    #     assert amc.loss == keras.losses.mean_absolute_error
-
-    def test_compile_options_setter_all_empty(self, amc):
-        amc.compile_options = None
-        assert amc.compile_options == {'optimizer': None,
-                                       'loss': None,
-                                       'metrics': None,
-                                       'loss_weights': None,
-                                       'sample_weight_mode': None,
-                                       'weighted_metrics': None,
-                                       'target_tensors': None
-                                       }
-
-    def test_compile_options_setter_as_dict(self, amc):
-        amc.compile_options = {"optimizer": keras.optimizers.SGD(),
-                               "loss": keras.losses.mean_absolute_error,
-                               "metrics": ["mse", "mae"]}
-        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
-        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
-        assert amc.compile_options["metrics"] == ["mse", "mae"]
-        assert amc.compile_options["loss_weights"] is None
-        assert amc.compile_options["sample_weight_mode"] is None
-        assert amc.compile_options["target_tensors"] is None
-        assert amc.compile_options["weighted_metrics"] is None
-
-    def test_compile_options_setter_as_attr(self, amc):
-        amc.optimizer = keras.optimizers.SGD()
-        amc.loss = keras.losses.mean_absolute_error
-        amc.compile_options = None  # This line has to be called!
-        # optimizer check
-        assert isinstance(amc.optimizer, keras.optimizers.SGD)
-        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
-        # loss check
-        assert amc.loss == keras.losses.mean_absolute_error
-        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
-        # check rest (all None as not set)
-        assert amc.compile_options["metrics"] is None
-        assert amc.compile_options["loss_weights"] is None
-        assert amc.compile_options["sample_weight_mode"] is None
-        assert amc.compile_options["target_tensors"] is None
-        assert amc.compile_options["weighted_metrics"] is None
-
-    def test_compile_options_setter_as_mix_attr_dict_no_duplicates(self, amc):
-        amc.optimizer = keras.optimizers.SGD()
-        amc.compile_options = {"loss": keras.losses.mean_absolute_error,
-                               "loss_weights": [0.2, 0.8]}
-        # check setting by attribute
-        assert isinstance(amc.optimizer, keras.optimizers.SGD)
-        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
-        # check setting by dict
-        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
-        assert amc.compile_options["loss_weights"] == [0.2, 0.8]
-        # check rest (all None as not set)
-        assert amc.compile_options["metrics"] is None
-        assert amc.compile_options["sample_weight_mode"] is None
-        assert amc.compile_options["target_tensors"] is None
-        assert amc.compile_options["weighted_metrics"] is None
-
-    def test_compile_options_setter_as_mix_attr_dict_valid_duplicates_optimizer(self, amc):
-        amc.optimizer = keras.optimizers.SGD()
-        amc.metrics = ['mse']
-        amc.compile_options = {"optimizer": keras.optimizers.SGD(),
-                               "loss": keras.losses.mean_absolute_error}
-        # check duplicate (attr and dic)
-        assert isinstance(amc.optimizer, keras.optimizers.SGD)
-        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
-        # check setting by dict
-        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
-        # check setting by attr
-        assert amc.metrics == ['mse']
-        assert amc.compile_options["metrics"] == ['mse']
-        # check rest (all None as not set)
-        assert amc.compile_options["loss_weights"] is None
-        assert amc.compile_options["sample_weight_mode"] is None
-        assert amc.compile_options["target_tensors"] is None
-        assert amc.compile_options["weighted_metrics"] is None
-
-    def test_compile_options_setter_as_mix_attr_dict_valid_duplicates_none_optimizer(self, amc):
-        amc.optimizer = keras.optimizers.SGD()
-        amc.metrics = ['mse']
-        amc.compile_options = {"metrics": ['mse'],
-                               "loss": keras.losses.mean_absolute_error}
-        # check duplicate (attr and dic)
-        assert amc.metrics == ['mse']
-        assert amc.compile_options["metrics"] == ['mse']
-        # check setting by dict
-        assert amc.compile_options["loss"] == keras.losses.mean_absolute_error
-        # check setting by attr
-        assert isinstance(amc.optimizer, keras.optimizers.SGD)
-        assert isinstance(amc.compile_options["optimizer"], keras.optimizers.SGD)
-        # check rest (all None as not set)
-        assert amc.compile_options["loss_weights"] is None
-        assert amc.compile_options["sample_weight_mode"] is None
-        assert amc.compile_options["target_tensors"] is None
-        assert amc.compile_options["weighted_metrics"] is None
-
-    def test_compile_options_property_type_error(self, amc):
-        with pytest.raises(TypeError) as einfo:
-            amc.compile_options = 'hello world'
-        assert "`compile_options' must be `dict' or `None', but is <class 'str'>." in str(einfo.value)
-
-    def test_compile_options_setter_as_mix_attr_dict_invalid_duplicates_other_optimizer(self, amc):
-        amc.optimizer = keras.optimizers.SGD()
-        with pytest.raises(ValueError) as einfo:
-            amc.compile_options = {"optimizer": keras.optimizers.Adam()}
-        assert "Got different values or arguments for same argument: self.optimizer=<class" \
-               " 'keras.optimizers.SGD'> and 'optimizer': <class 'keras.optimizers.Adam'>" in str(einfo.value)
-
-    def test_compile_options_setter_as_mix_attr_dict_invalid_duplicates_same_optimizer_other_args(self, amc):
-        amc.optimizer = keras.optimizers.SGD(lr=0.1)
-        with pytest.raises(ValueError) as einfo:
-            amc.compile_options = {"optimizer": keras.optimizers.SGD(lr=0.001)}
-        assert "Got different values or arguments for same argument: self.optimizer=<class" \
-               " 'keras.optimizers.SGD'> and 'optimizer': <class 'keras.optimizers.SGD'>" in str(einfo.value)
-
-    def test_compile_options_setter_as_dict_invalid_keys(self, amc):
-        with pytest.raises(ValueError) as einfo:
-            amc.compile_options = {"optimizer": keras.optimizers.SGD(), "InvalidKeyword": [1, 2, 3]}
-        assert "Got invalid key for compile_options. dict_keys(['optimizer', 'InvalidKeyword'])" in str(einfo.value)
-
-    def test_compare_keras_optimizers_equal(self, amc):
-        assert amc._AbstractModelClass__compare_keras_optimizers(keras.optimizers.SGD(), keras.optimizers.SGD()) is True
-
-    def test_compare_keras_optimizers_no_optimizer(self, amc):
-        assert amc._AbstractModelClass__compare_keras_optimizers('NoOptimizer', keras.optimizers.SGD()) is False
-
-    def test_compare_keras_optimizers_other_parameters_run_sess(self, amc):
-        assert amc._AbstractModelClass__compare_keras_optimizers(keras.optimizers.SGD(lr=0.1),
-                                                                 keras.optimizers.SGD(lr=0.01)) is False
-
-    def test_compare_keras_optimizers_other_parameters_none_sess(self, amc):
-        assert amc._AbstractModelClass__compare_keras_optimizers(keras.optimizers.SGD(decay=1),
-                                                                 keras.optimizers.SGD(decay=0.01)) is False
-
-    def test_getattr(self, amc):
-        amc.model = keras.Model()
-        assert hasattr(amc, "compile") is True
-        assert hasattr(amc.model, "compile") is True
-        assert amc.compile == amc.model.compile
-
-    def test_get_settings(self, amc, amsc):
-        assert amc.get_settings() == {"model_name": "AbstractModelClass", "_input_shape": (14, 1, 2),
-                                      "_output_shape": 3}
-        assert amsc.get_settings() == {"test_attr": "testAttr", "model_name": "AbstractModelSubClass",
-                                       "_input_shape": (12, 1, 2), "_output_shape": 3}
-
-    def test_custom_objects(self, amc):
-        amc.custom_objects = {"Test": 123}
-        assert amc.custom_objects == {"Test": 123}
-
-    def test_set_custom_objects(self, amc):
-        amc.set_custom_objects(Test=22, minor_param="minor")
-        assert amc.custom_objects == {"Test": 22, "minor_param": "minor"}
-        amc.set_custom_objects(Test=2, minor_param1="minor1")
-        assert amc.custom_objects == {"Test": 2, "minor_param1": "minor1"}
-        paddings = Paddings()
-        amc.set_custom_objects(Test=1, Padding2D=paddings)
-        assert amc.custom_objects == {"Test": 1, "Padding2D": paddings, "pad1": 34, "another_pad": True}
-
-
 class TestMyPaperModel:
 
     @pytest.fixture
diff --git a/test/test_run_modules/test_model_setup.py b/test/test_run_modules/test_model_setup.py
index 382105344dfb9fffb37215f2706dda1f2ebd90ea..8a7572148869537b505b2bd8e7f16cfdf7af1cdd 100644
--- a/test/test_run_modules/test_model_setup.py
+++ b/test/test_run_modules/test_model_setup.py
@@ -8,7 +8,8 @@ from mlair.data_handler import KerasIterator
 from mlair.data_handler import DataCollection
 from mlair.helpers.datastore import EmptyScope
 from mlair.model_modules.keras_extensions import CallbackHandler
-from mlair.model_modules.model_class import AbstractModelClass, MyLittleModel
+from mlair.model_modules.fully_connected_networks import FCN_64_32_16
+from mlair.model_modules import AbstractModelClass
 from mlair.run_modules.model_setup import ModelSetup
 from mlair.run_modules.run_environment import RunEnvironment
 
@@ -22,7 +23,7 @@ class TestModelSetup:
         obj.scope = "general.model"
         obj.model = None
         obj.callbacks_name = "placeholder_%s_str.pickle"
-        obj.data_store.set("model_class", MyLittleModel)
+        obj.data_store.set("model_class", FCN_64_32_16)
         obj.data_store.set("lr_decay", "dummy_str", "general.model")
         obj.data_store.set("hist", "dummy_str", "general.model")
         obj.data_store.set("epochs", 2)
@@ -102,8 +103,7 @@ class TestModelSetup:
         assert setup_with_gen.model is None
         setup_with_gen.build_model()
         assert isinstance(setup_with_gen.model, AbstractModelClass)
-        expected = {"lr_decay", "model_name", "dropout_rate", "regularizer", "initial_lr", "optimizer", "activation",
-                    "input_shape", "output_shape"}
+        expected = {"lr_decay", "model_name", "optimizer", "activation", "input_shape", "output_shape"}
         assert expected <= self.current_scope_as_set(setup_with_gen)
 
     def test_set_shapes(self, setup_with_gen_tiny):
diff --git a/test/test_run_modules/test_pre_processing.py b/test/test_run_modules/test_pre_processing.py
index b5a1914e6b2aacd238f244d304184d9754326db7..5ae64bf3d535e72d9361394741ed8b8094091b1d 100644
--- a/test/test_run_modules/test_pre_processing.py
+++ b/test/test_run_modules/test_pre_processing.py
@@ -140,40 +140,28 @@ class TestPreProcessing:
         data_preparation = AbstractDataHandler
         stations = ['DEBW107', 'DEBY081']
         assert pre.transformation(data_preparation, stations) is None
+
         class data_preparation_no_trans: pass
+
         assert pre.transformation(data_preparation_no_trans, stations) is None
 
-    @pytest.fixture
-    def dummy_df(self):
-        data_dict = {'station_name': {'DEBW013': 'Stuttgart Bad Cannstatt', 'DEBW076': 'Baden-Baden',
-                                      'DEBW087': 'Schwäbische_Alb', 'DEBW107': 'Tübingen',
-                                      'DEBY081': 'Garmisch-Partenkirchen/Kreuzeckbahnstraße', '# Stations': np.nan,
-                                      '# Samples': np.nan},
-                     'station_lon': {'DEBW013': 9.2297, 'DEBW076': 8.2202, 'DEBW087': 9.2076, 'DEBW107': 9.0512,
-                                     'DEBY081': 11.0631, '# Stations': np.nan, '# Samples': np.nan},
-                     'station_lat': {'DEBW013': 48.8088, 'DEBW076': 48.7731, 'DEBW087': 48.3458, 'DEBW107': 48.5077,
-                                     'DEBY081': 47.4764, '# Stations': np.nan, '# Samples': np.nan},
-                     'station_alt': {'DEBW013': 235.0, 'DEBW076': 148.0, 'DEBW087': 798.0, 'DEBW107': 325.0,
-                                     'DEBY081': 735.0, '# Stations': np.nan, '# Samples': np.nan},
-                     'train': {'DEBW013': 1413, 'DEBW076': 3002, 'DEBW087': 3016, 'DEBW107': 1782, 'DEBY081': 2837,
-                               '# Stations': 6, '# Samples': 12050},
-                     'val': {'DEBW013': 698, 'DEBW076': 715, 'DEBW087': 700, 'DEBW107': 701, 'DEBY081': 456,
-                             '# Stations': 6, '# Samples': 3270},
-                     'test': {'DEBW013': 1066, 'DEBW076': 696, 'DEBW087': 1080, 'DEBW107': 1080, 'DEBY081': 700,
-                              '# Stations': 6, '# Samples': 4622}}
-        df = pd.DataFrame.from_dict(data_dict)
-        return df
-
-    def test_create_column_format_for_tex(self):
-        df = pd.DataFrame(np.ones((2, 1)))
-        df_col = PreProcessing.create_column_format_for_tex(df)  # len: 1+1
-        assert df_col == 'lr'
-        assert len(df_col) == 2
-        df = pd.DataFrame(np.ones((2, 2)))
-        df_col = PreProcessing.create_column_format_for_tex(df)  # len: 2+1
-        assert df_col == 'lcr'
-        assert len(df_col) == 3
-        df = pd.DataFrame(np.ones((2, 3)))
-        df_col = PreProcessing.create_column_format_for_tex(df) # len: 3+1
-        assert df_col == 'lccr'
-        assert len(df_col) == 4
+    # @pytest.fixture
+    # def dummy_df(self):
+    #     data_dict = {'station_name': {'DEBW013': 'Stuttgart Bad Cannstatt', 'DEBW076': 'Baden-Baden',
+    #                                   'DEBW087': 'Schwäbische_Alb', 'DEBW107': 'Tübingen',
+    #                                   'DEBY081': 'Garmisch-Partenkirchen/Kreuzeckbahnstraße', '# Stations': np.nan,
+    #                                   '# Samples': np.nan},
+    #                  'station_lon': {'DEBW013': 9.2297, 'DEBW076': 8.2202, 'DEBW087': 9.2076, 'DEBW107': 9.0512,
+    #                                  'DEBY081': 11.0631, '# Stations': np.nan, '# Samples': np.nan},
+    #                  'station_lat': {'DEBW013': 48.8088, 'DEBW076': 48.7731, 'DEBW087': 48.3458, 'DEBW107': 48.5077,
+    #                                  'DEBY081': 47.4764, '# Stations': np.nan, '# Samples': np.nan},
+    #                  'station_alt': {'DEBW013': 235.0, 'DEBW076': 148.0, 'DEBW087': 798.0, 'DEBW107': 325.0,
+    #                                  'DEBY081': 735.0, '# Stations': np.nan, '# Samples': np.nan},
+    #                  'train': {'DEBW013': 1413, 'DEBW076': 3002, 'DEBW087': 3016, 'DEBW107': 1782, 'DEBY081': 2837,
+    #                            '# Stations': 6, '# Samples': 12050},
+    #                  'val': {'DEBW013': 698, 'DEBW076': 715, 'DEBW087': 700, 'DEBW107': 701, 'DEBY081': 456,
+    #                          '# Stations': 6, '# Samples': 3270},
+    #                  'test': {'DEBW013': 1066, 'DEBW076': 696, 'DEBW087': 1080, 'DEBW107': 1080, 'DEBY081': 700,
+    #                           '# Stations': 6, '# Samples': 4622}}
+    #     df = pd.DataFrame.from_dict(data_dict)
+    #     return df