diff --git a/mlair/helpers/join.py b/mlair/helpers/join.py
index 8a8ca0b8c964268aa6043312cd1cc88bc0d50544..93cb0e7b1b34d1ebc13b914ac9626fb4466a7201 100644
--- a/mlair/helpers/join.py
+++ b/mlair/helpers/join.py
@@ -8,6 +8,8 @@ from typing import Iterator, Union, List, Dict
 
 import pandas as pd
 import requests
+from requests.adapters import HTTPAdapter
+from requests.packages.urllib3.util.retry import Retry
 
 from mlair import helpers
 from mlair.configuration.join_settings import join_settings
@@ -129,13 +131,25 @@ def get_data(opts: Dict, headers: Dict) -> Union[Dict, List]:
     :return: requested data (either as list or dictionary)
     """
     url = create_url(**opts)
-    response = requests.get(url, headers=headers)
+    response = retries_session().get(url, headers=headers, timeout=(5, None))  # timeout=(open, read)
     if response.status_code == 200:
         return response.json()
     else:
         raise EmptyQueryResult(f"There was an error (STATUS {response.status_code}) for request {url}")
 
 
+def retries_session(max_retries=3):
+    retry_strategy = Retry(total=max_retries,
+                           backoff_factor=0.1,
+                           status_forcelist=[429, 500, 502, 503, 504],
+                           method_whitelist=["HEAD", "GET", "OPTIONS"])
+    adapter = HTTPAdapter(max_retries=retry_strategy)
+    http = requests.Session()
+    http.mount("https://", adapter)
+    http.mount("http://", adapter)
+    return http
+
+
 def load_series_information(station_name: List[str], station_type: str_or_none, network_name: str_or_none,
                             join_url_base: str, headers: Dict, data_origin: Dict = None) -> [Dict, Dict]:
     """
diff --git a/mlair/model_modules/convolutional_networks.py b/mlair/model_modules/convolutional_networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..624cfa097a2ce562e9e2d2ae698a1e84bdef7309
--- /dev/null
+++ b/mlair/model_modules/convolutional_networks.py
@@ -0,0 +1,129 @@
+__author__ = "Lukas Leufen"
+__date__ = '2021-02-'
+
+from functools import reduce, partial
+
+from mlair.model_modules import AbstractModelClass
+from mlair.helpers import select_from_dict
+from mlair.model_modules.loss import var_loss, custom_loss
+from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, SymmetricPadding2D
+
+import keras
+
+
+class CNN(AbstractModelClass):
+
+    _activation = {"relu": keras.layers.ReLU, "tanh": partial(keras.layers.Activation, "tanh"),
+                   "sigmoid": partial(keras.layers.Activation, "sigmoid"),
+                   "linear": partial(keras.layers.Activation, "linear"),
+                   "selu": partial(keras.layers.Activation, "selu"),
+                   "prelu": partial(keras.layers.PReLU, alpha_initializer=keras.initializers.constant(value=0.25))}
+    _initializer = {"tanh": "glorot_uniform", "sigmoid": "glorot_uniform", "linear": "glorot_uniform",
+                    "relu": keras.initializers.he_normal(), "selu": keras.initializers.lecun_normal(),
+                    "prelu": keras.initializers.he_normal()}
+    _optimizer = {"adam": keras.optimizers.adam, "sgd": keras.optimizers.SGD}
+    _regularizer = {"l1": keras.regularizers.l1, "l2": keras.regularizers.l2, "l1_l2": keras.regularizers.l1_l2}
+    _requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov", "l1", "l2"]
+    _dropout = {"selu": keras.layers.AlphaDropout}
+
+    def __init__(self, input_shape: list, output_shape: list, activation="relu", activation_output="linear",
+                 optimizer="adam", regularizer=None, kernel_size=1, dropout=None, **kwargs):
+
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
+
+        # settings
+        self.activation = self._set_activation(activation)
+        self.activation_name = activation
+        self.activation_output = self._set_activation(activation_output)
+        self.activation_output_name = activation_output
+        self.kernel_initializer = self._initializer.get(activation, "glorot_uniform")
+        self.kernel_regularizer = self._set_regularizer(regularizer, **kwargs)
+        self.kernel_size = kernel_size
+        self.optimizer = self._set_optimizer(optimizer, **kwargs)
+        self.dropout, self.dropout_rate = self._set_dropout(activation, dropout)
+
+        # apply to model
+        self.set_model()
+        self.set_compile_options()
+        self.set_custom_objects(loss=custom_loss([keras.losses.mean_squared_error, var_loss]), var_loss=var_loss)
+
+    def _set_activation(self, activation):
+        try:
+            return self._activation.get(activation.lower())
+        except KeyError:
+            raise AttributeError(f"Given activation {activation} is not supported in this model class.")
+
+    def _set_optimizer(self, optimizer, **kwargs):
+        try:
+            opt_name = optimizer.lower()
+            opt = self._optimizer.get(opt_name)
+            opt_kwargs = {}
+            if opt_name == "adam":
+                opt_kwargs = select_from_dict(kwargs, ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad"])
+            elif opt_name == "sgd":
+                opt_kwargs = select_from_dict(kwargs, ["lr", "momentum", "decay", "nesterov"])
+            return opt(**opt_kwargs)
+        except KeyError:
+            raise AttributeError(f"Given optimizer {optimizer} is not supported in this model class.")
+
+    def _set_regularizer(self, regularizer, **kwargs):
+        if regularizer is None or (isinstance(regularizer, str) and regularizer.lower() == "none"):
+            return None
+        try:
+            reg_name = regularizer.lower()
+            reg = self._regularizer.get(reg_name)
+            reg_kwargs = {}
+            if reg_name in ["l1", "l2"]:
+                reg_kwargs = select_from_dict(kwargs, reg_name, remove_none=True)
+                if reg_name in reg_kwargs:
+                    reg_kwargs["l"] = reg_kwargs.pop(reg_name)
+            elif reg_name == "l1_l2":
+                reg_kwargs = select_from_dict(kwargs, ["l1", "l2"], remove_none=True)
+            return reg(**reg_kwargs)
+        except KeyError:
+            raise AttributeError(f"Given regularizer {regularizer} is not supported in this model class.")
+
+    def _set_dropout(self, activation, dropout_rate):
+        if dropout_rate is None:
+            return None, None
+        assert 0 <= dropout_rate < 1
+        return self._dropout.get(activation, keras.layers.Dropout), dropout_rate
+
+    def set_model(self):
+        """
+        Build the model.
+        """
+        x_input = keras.layers.Input(shape=self._input_shape)
+        x_in = keras.layers.Conv2D(filters=16, kernel_size=(73, 1),
+                                   kernel_initializer=self.kernel_initializer,
+                                   kernel_regularizer=self.kernel_regularizer)(x_input)
+        x_in = self.activation()(x_in)
+        x_in = keras.layers.Conv2D(filters=32, kernel_size=(49, 1),
+                                   kernel_initializer=self.kernel_initializer,
+                                   kernel_regularizer=self.kernel_regularizer)(x_in)
+        x_in = self.activation()(x_in)
+        if self.dropout is not None:
+            x_in = self.dropout(self.dropout_rate)(x_in)
+        x_in = keras.layers.MaxPooling2D((25, 1), strides=(1, 1), padding='valid')(x_in)
+        x_in = keras.layers.Conv2D(filters=64, kernel_size=(13, 1),
+                                   kernel_initializer=self.kernel_initializer,
+                                   kernel_regularizer=self.kernel_regularizer)(x_in)
+        x_in = self.activation()(x_in)
+        if self.dropout is not None:
+            x_in = self.dropout(self.dropout_rate)(x_in)
+        x_in = keras.layers.Flatten()(x_in)
+        x_in = keras.layers.Dense(128, kernel_initializer=self.kernel_initializer,
+                                  kernel_regularizer=self.kernel_regularizer)(x_in)
+        x_in = self.activation()(x_in)
+        x_in = keras.layers.Dense(32, kernel_initializer=self.kernel_initializer,
+                                  kernel_regularizer=self.kernel_regularizer)(x_in)
+        x_in = self.activation()(x_in)
+        x_in = keras.layers.Dense(self._output_shape)(x_in)
+        out = self.activation_output(name=f"{self.activation_output_name}_output")(x_in)
+        self.model = keras.Model(inputs=x_input, outputs=[out])
+
+    def set_compile_options(self):
+        self.compile_options = {"loss": [custom_loss([keras.losses.mean_squared_error, var_loss])],
+                                "metrics": ["mse", "mae", var_loss]}
diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py
index 5dd73d50f711387a65a9bc7e4daa7c1d430bfb26..8fae430fb48a28bdd8b21f8bfcfc7c569eb24f6c 100644
--- a/mlair/run_modules/model_setup.py
+++ b/mlair/run_modules/model_setup.py
@@ -6,6 +6,7 @@ __date__ = '2019-12-02'
 import logging
 import os
 import re
+from dill.source import getsource
 
 import keras
 import pandas as pd
@@ -57,12 +58,12 @@ class ModelSetup(RunEnvironment):
         super().__init__()
         self.model = None
         exp_name = self.data_store.get("experiment_name")
-        path = self.data_store.get("model_path")
+        self.path = self.data_store.get("model_path")
         self.scope = "model"
-        self.path = os.path.join(path, f"{exp_name}_%s")
-        self.model_name = self.path % "%s.h5"
-        self.checkpoint_name = self.path % "model-best.h5"
-        self.callbacks_name = self.path % "model-best-callbacks-%s.pickle"
+        path = os.path.join(self.path, f"{exp_name}_%s")
+        self.model_name = path % "%s.h5"
+        self.checkpoint_name = path % "model-best.h5"
+        self.callbacks_name = path % "model-best-callbacks-%s.pickle"
         self._train_model = self.data_store.get("train_model")
         self._create_new_model = self.data_store.get("create_new_model")
         self._run()
@@ -167,6 +168,7 @@ class ModelSetup(RunEnvironment):
             keras.utils.plot_model(self.model, to_file=file_name, show_shapes=True, show_layer_names=True)
 
     def report_model(self):
+        # report model settings
         model_settings = self.model.get_settings()
         model_settings.update(self.model.compile_options)
         model_settings.update(self.model.optimizer.get_config())
@@ -179,17 +181,23 @@ class ModelSetup(RunEnvironment):
             if "<" in str(v):
                 v = self._clean_name(str(v))
             df.loc[k] = str(v)
+        df.loc["count params"] = str(self.model.count_params())
         df.sort_index(inplace=True)
         column_format = "ll"
         path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
         path_config.check_path_and_create(path)
-        df.to_latex(os.path.join(path, "model_settings.tex"), na_rep='---', column_format=column_format)
-        df.to_markdown(open(os.path.join(path, "model_settings.md"), mode="w", encoding='utf-8'),
-                       tablefmt="github")
+        for p in [path, self.path]:  # log to `latex_report` and `model`
+            df.to_latex(os.path.join(p, "model_settings.tex"), na_rep='---', column_format=column_format)
+            df.to_markdown(open(os.path.join(p, "model_settings.md"), mode="w", encoding='utf-8'), tablefmt="github")
+        # report model summary to file
+        with open(os.path.join(self.path, "model_summary.txt"), "w") as fh:
+            self.model.summary(print_fn=lambda x: fh.write(x + "\n"))
+        # print model code to file
+        with open(os.path.join(self.path, "model_code.txt"), "w") as fh:
+            fh.write(getsource(self.data_store.get("model_class")))
 
     @staticmethod
     def _clean_name(orig_name: str):
         mod_name = re.sub(r'^{0}'.format(re.escape("<")), '', orig_name).replace("'", "").split(" ")
         mod_name = mod_name[1] if any(map(lambda x: x in mod_name[0], ["class", "function", "method"])) else mod_name[0]
         return mod_name[:-1] if mod_name[-1] == ">" else mod_name
-