diff --git a/run.py b/run.py
index 1d17b4c613850464f52745811292cba58ce5de30..6d9f1018f6eae671f36c3bac7443c902b35a3270 100644
--- a/run.py
+++ b/run.py
@@ -3,7 +3,6 @@ __date__ = '2019-11-14'
 
 
 import argparse
-import logging
 
 from src.run_modules.experiment_setup import ExperimentSetup
 from src.run_modules.model_setup import ModelSetup
@@ -29,10 +28,6 @@ def main(parser_args):
 
 if __name__ == "__main__":
 
-    formatter = '%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]'
-    logging.basicConfig(format=formatter, level=logging.INFO)
-    # logging.basicConfig(format=formatter, level=logging.DEBUG)
-
     parser = argparse.ArgumentParser()
     parser.add_argument('--experiment_date', metavar='--exp_date', type=str, default=None,
                         help="set experiment date as string")
diff --git a/run_hourly.py b/run_hourly.py
index af531aedbd275b133a087777334dba0ae24bd9c8..3c3135c46df9875633499bd17b237a23cdf6be55 100644
--- a/run_hourly.py
+++ b/run_hourly.py
@@ -29,10 +29,6 @@ def main(parser_args):
 
 if __name__ == "__main__":
 
-    formatter = '%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]'
-    logging.basicConfig(format=formatter, level=logging.INFO)
-    # logging.basicConfig(format=formatter, level=logging.DEBUG)
-
     parser = argparse.ArgumentParser()
     parser.add_argument('--experiment_date', metavar='--exp_date', type=str, default=None,
                         help="set experiment date as string")
diff --git a/src/data_handling/data_distributor.py b/src/data_handling/data_distributor.py
index b1624410e746ab779b20a60d6a7d19b4ae3b1267..e8c6044280799ded080ab4bff3627aeb9ffde2db 100644
--- a/src/data_handling/data_distributor.py
+++ b/src/data_handling/data_distributor.py
@@ -8,15 +8,18 @@ import math
 import keras
 import numpy as np
 
+from src.data_handling.data_generator import DataGenerator
+
 
 class Distributor(keras.utils.Sequence):
 
-    def __init__(self, generator: keras.utils.Sequence, model: keras.models, batch_size: int = 256,
-                 permute_data: bool = False):
+    def __init__(self, generator: DataGenerator, model: keras.models, batch_size: int = 256,
+                 permute_data: bool = False, upsampling: bool = False):
         self.generator = generator
         self.model = model
         self.batch_size = batch_size
         self.do_data_permutation = permute_data
+        self.upsampling = upsampling
 
     def _get_model_rank(self):
         mod_out = self.model.output_shape
@@ -31,7 +34,7 @@ class Distributor(keras.utils.Sequence):
         return mod_rank
 
     def _get_number_of_mini_batches(self, values):
-        return math.ceil(values[0].shape[0] / self.batch_size)
+        return math.ceil(values.shape[0] / self.batch_size)
 
     def _permute_data(self, x, y):
         """
@@ -48,10 +51,18 @@ class Distributor(keras.utils.Sequence):
             for k, v in enumerate(self.generator):
                 # get rank of output
                 mod_rank = self._get_model_rank()
-                # get number of mini batches
-                num_mini_batches = self._get_number_of_mini_batches(v)
+                # get data
                 x_total = np.copy(v[0])
                 y_total = np.copy(v[1])
+                if self.upsampling:
+                    try:
+                        s = self.generator.get_data_generator(k)
+                        x_total = np.concatenate([x_total, np.copy(s.get_extremes_history())], axis=0)
+                        y_total = np.concatenate([y_total, np.copy(s.get_extremes_label())], axis=0)
+                    except AttributeError:  # no extremes history / labels available, copy will fail
+                        pass
+                # get number of mini batches
+                num_mini_batches = self._get_number_of_mini_batches(x_total)
                 # permute order for mini-batches
                 x_total, y_total = self._permute_data(x_total, y_total)
                 for prev, curr in enumerate(range(1, num_mini_batches+1)):
diff --git a/src/data_handling/data_generator.py b/src/data_handling/data_generator.py
index 24c9ada65b4bfd71de12785b2714cc5de94dc21f..0bf0bc35344ecf0f040f5563ddbdbe291b64404d 100644
--- a/src/data_handling/data_generator.py
+++ b/src/data_handling/data_generator.py
@@ -14,6 +14,9 @@ from src import helpers
 from src.data_handling.data_preparation import DataPrep
 from src.join import EmptyQueryResult
 
+number = Union[float, int]
+num_or_list = Union[number, List[number]]
+
 
 class DataGenerator(keras.utils.Sequence):
     """
@@ -27,7 +30,7 @@ class DataGenerator(keras.utils.Sequence):
     def __init__(self, data_path: str, network: str, stations: Union[str, List[str]], variables: List[str],
                  interpolate_dim: str, target_dim: str, target_var: str, station_type: str = None,
                  interpolate_method: str = "linear", limit_nan_fill: int = 1, window_history_size: int = 7,
-                 window_lead_time: int = 4, transformation: Dict = None, **kwargs):
+                 window_lead_time: int = 4, transformation: Dict = None, extreme_values: num_or_list = None, **kwargs):
         self.data_path = os.path.abspath(data_path)
         self.data_path_tmp = os.path.join(os.path.abspath(data_path), "tmp")
         if not os.path.exists(self.data_path_tmp):
@@ -43,6 +46,7 @@ class DataGenerator(keras.utils.Sequence):
         self.limit_nan_fill = limit_nan_fill
         self.window_history_size = window_history_size
         self.window_lead_time = window_lead_time
+        self.extreme_values = extreme_values
         self.kwargs = kwargs
         self.transformation = self.setup_transformation(transformation)
 
@@ -188,6 +192,9 @@ class DataGenerator(keras.utils.Sequence):
             data.make_labels(self.target_dim, self.target_var, self.interpolate_dim, self.window_lead_time)
             data.make_observation(self.target_dim, self.target_var, self.interpolate_dim)
             data.remove_nan(self.interpolate_dim)
+            if self.extreme_values:
+                kwargs = {"extremes_on_right_tail_only": self.kwargs.get("extremes_on_right_tail_only", False)}
+                data.multiply_extremes(self.extreme_values, **kwargs)
             if save_local_tmp_storage:
                 self._save_pickle_data(data)
         return data
diff --git a/src/data_handling/data_preparation.py b/src/data_handling/data_preparation.py
index 3fae09306ab65d18f19d770b525cdc2296215bcd..490d661195aa017113f705da7b2e1e896e55fdc1 100644
--- a/src/data_handling/data_preparation.py
+++ b/src/data_handling/data_preparation.py
@@ -5,7 +5,7 @@ import datetime as dt
 from functools import reduce
 import logging
 import os
-from typing import Union, List, Iterable
+from typing import Union, List, Iterable, Tuple
 
 import numpy as np
 import pandas as pd
@@ -17,6 +17,8 @@ from src import statistics
 # define a more general date type for type hinting
 date = Union[dt.date, dt.datetime]
 str_or_list = Union[str, List[str]]
+number = Union[float, int]
+num_or_list = Union[number, List[number]]
 
 
 class DataPrep(object):
@@ -58,6 +60,8 @@ class DataPrep(object):
         self.history = None
         self.label = None
         self.observation = None
+        self.extremes_history = None
+        self.extremes_label = None
         self.kwargs = kwargs
         self.data = None
         self.meta = None
@@ -353,7 +357,8 @@ class DataPrep(object):
             non_nan_observation = self.observation.dropna(dim=dim)
             intersect = reduce(np.intersect1d, (non_nan_history.coords[dim].values, non_nan_label.coords[dim].values, non_nan_observation.coords[dim].values))
 
-        if len(intersect) == 0:
+        min_length = self.kwargs.get("min_length", 0)
+        if len(intersect) < max(min_length, 1):
             self.history = None
             self.label = None
             self.observation = None
@@ -419,6 +424,67 @@ class DataPrep(object):
     def get_transposed_label(self):
         return self.label.squeeze("Stations").transpose("datetime", "window").copy()
 
+    def get_extremes_history(self):
+        return self.extremes_history.transpose("datetime", "window", "Stations", "variables").copy()
+
+    def get_extremes_label(self):
+        return self.extremes_label.squeeze("Stations").transpose("datetime", "window").copy()
+
+    def multiply_extremes(self, extreme_values: num_or_list = 1., extremes_on_right_tail_only: bool = False,
+                          timedelta: Tuple[int, str] = (1, 'm')):
+        """
+        This method extracts extreme values from self.labels which are defined in the argument extreme_values. One can
+        also decide only to extract extremes on the right tail of the distribution. When extreme_values is a list of
+        floats/ints all values larger (and smaller than negative extreme_values; extraction is performed in standardised
+        space) than are extracted iteratively. If for example extreme_values = [1.,2.] then a value of 1.5 would be
+        extracted once (for 0th entry in list), while a 2.5 would be extracted twice (once for each entry). Timedelta is
+        used to mark those extracted values by adding one min to each timestamp. As TOAR Data are hourly one can
+        identify those "artificial" data points later easily. Extreme inputs and labels are stored in
+        self.extremes_history and self.extreme_labels, respectively.
+
+        :param extreme_values: user definition of extreme
+        :param extremes_on_right_tail_only: if False also multiply values which are smaller then -extreme_values,
+            if True only extract values larger than extreme_values
+        :param timedelta: used as arguments for np.timedelta in order to mark extreme values on datetime
+        """
+        # check type if inputs
+        extreme_values = helpers.to_list(extreme_values)
+        for i in extreme_values:
+            if not isinstance(i, number.__args__):
+                raise TypeError(f"Elements of list extreme_values have to be {number.__args__}, but at least element "
+                                f"{i} is type {type(i)}")
+
+        for extr_val in sorted(extreme_values):
+            # check if some extreme values are already extracted
+            if (self.extremes_label is None) or (self.extremes_history is None):
+                # extract extremes based on occurance in labels
+                if extremes_on_right_tail_only:
+                    extreme_label_idx = (self.label > extr_val).any(axis=0).values.reshape(-1,)
+                else:
+                    extreme_label_idx = np.concatenate(((self.label < -extr_val).any(axis=0).values.reshape(-1, 1),
+                                                        (self.label > extr_val).any(axis=0).values.reshape(-1, 1)),
+                                                       axis=1).any(axis=1)
+                extremes_label = self.label[..., extreme_label_idx]
+                extremes_history = self.history[..., extreme_label_idx, :]
+                extremes_label.datetime.values += np.timedelta64(*timedelta)
+                extremes_history.datetime.values += np.timedelta64(*timedelta)
+                self.extremes_label = extremes_label#.squeeze('Stations').transpose('datetime', 'window')
+                self.extremes_history = extremes_history#.transpose('datetime', 'window', 'Stations', 'variables')
+            else:  # one extr value iteration is done already: self.extremes_label is NOT None...
+                if extremes_on_right_tail_only:
+                    extreme_label_idx = (self.extremes_label > extr_val).any(axis=0).values.reshape(-1, )
+                else:
+                    extreme_label_idx = np.concatenate(((self.extremes_label < -extr_val).any(axis=0).values.reshape(-1, 1),
+                                                        (self.extremes_label > extr_val).any(axis=0).values.reshape(-1, 1)
+                                                        ), axis=1).any(axis=1)
+                # check on existing extracted extremes to minimise computational costs for comparison
+                extremes_label = self.extremes_label[..., extreme_label_idx]
+                extremes_history = self.extremes_history[..., extreme_label_idx, :]
+                extremes_label.datetime.values += np.timedelta64(*timedelta)
+                extremes_history.datetime.values += np.timedelta64(*timedelta)
+                self.extremes_label = xr.concat([self.extremes_label, extremes_label], dim='datetime')
+                self.extremes_history = xr.concat([self.extremes_history, extremes_history], dim='datetime')
+
 
 if __name__ == "__main__":
     dp = DataPrep('data/', 'dummy', 'DEBW107', ['o3', 'temp'], statistics_per_var={'o3': 'dma8eu', 'temp': 'maximum'})
diff --git a/src/helpers.py b/src/helpers.py
index 073a7bbf9ae3b7041591d48e4e5b7f3ef0efae42..6e9d47d1040aa803358cb60439197fd48641e9e1 100644
--- a/src/helpers.py
+++ b/src/helpers.py
@@ -4,12 +4,13 @@ __author__ = 'Lukas Leufen, Felix Kleinert'
 __date__ = '2019-10-21'
 
 
+import datetime as dt
 import logging
 import math
 import os
-import time
 import socket
-import datetime as dt
+import sys
+import time
 
 import keras.backend as K
 import xarray as xr
@@ -218,3 +219,59 @@ def list_pop(list_full: list, pop_items):
 def dict_pop(dict_orig: Dict, pop_keys):
     pop_keys = to_list(pop_keys)
     return {k: v for k, v in dict_orig.items() if k not in pop_keys}
+
+
+class Logger:
+    """
+    Basic logger class to unify all logging outputs. Logs are saved in local file and returned to std output. In default
+    settings, logging level of file logger is DEBUG, logging level of stream logger is INFO. Class must be imported
+    and initialised in starting script, all subscripts should log with logging.info(), debug, ...
+    """
+
+    def __init__(self, log_path=None, level_file=logging.DEBUG, level_stream=logging.INFO):
+
+        # define shared logger format
+        self.formatter = '%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]'
+
+        # set log path
+        self.log_file = self.setup_logging_path(log_path)
+        # set root logger as file handler
+        logging.basicConfig(level=level_file,
+                            format=self.formatter,
+                            filename=self.log_file,
+                            filemode='a')
+        # add stream handler to the root logger
+        logging.getLogger('').addHandler(self.logger_console(level_stream))
+
+    @staticmethod
+    def setup_logging_path(path: str = None):
+        """
+        Check if given path exists and creates if not. If path is None, use path from main. The logging file is named
+        like `logging_<runtime>.log` where runtime=`%Y-%m-%d_%H-%M-%S` of current run.
+        :param path: path to logfile
+        :return: path of logfile
+        """
+        if not path:  # set default path
+            path = os.path.dirname(sys.modules["__main__"].__file__)
+            path = os.path.join(path, "logging")
+        if not os.path.exists(path):
+            os.makedirs(path)
+        runtime = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
+        log_file = os.path.join(path, f'logging_{runtime}.log')
+        return log_file
+
+    def logger_console(self, level: int):
+        """
+        Defines a stream handler which writes messages of given level or higher to std out
+        :param level: logging level as integer, e.g. logging.DEBUG or 10
+        :return: defines stream handler
+        """
+        # define Handler
+        console = logging.StreamHandler()
+        # set level of Handler
+        console.setLevel(level)
+        # set a format which is simpler for console use
+        formatter = logging.Formatter(self.formatter)
+        # tell the handler to use this format
+        console.setFormatter(formatter)
+        return console
diff --git a/src/model_modules/advanced_paddings.py b/src/model_modules/advanced_paddings.py
index 1d48dfc0a87c05183fc5b8b7755f48efaf7b5428..d9e55c78fb6c78bbe219c820078c46a235627897 100644
--- a/src/model_modules/advanced_paddings.py
+++ b/src/model_modules/advanced_paddings.py
@@ -6,6 +6,7 @@ import numpy as np
 import keras.backend as K
 
 from keras.layers.convolutional import _ZeroPadding
+from keras.layers import ZeroPadding2D
 from keras.legacy import interfaces
 from keras.utils import conv_utils
 from keras.utils.generic_utils import transpose_shape
@@ -252,13 +253,50 @@ class SymmetricPadding2D(_ZeroPadding):
         return tf.pad(inputs, pattern, 'SYMMETRIC')
 
 
+class Padding2D:
+    '''
+    This class combines the implemented padding methods. You can call this method by defining a specific padding type.
+    The __call__ method will return the corresponding Padding layer.
+    '''
+
+    allowed_paddings = {
+        **dict.fromkeys(("RefPad2D", "ReflectionPadding2D"), ReflectionPadding2D),
+        **dict.fromkeys(("SymPad2D", "SymmetricPadding2D"), SymmetricPadding2D),
+        **dict.fromkeys(("ZeroPad2D", "ZeroPadding2D"), ZeroPadding2D)
+    }
+
+    def __init__(self, padding_type):
+        self.padding_type = padding_type
+
+    def _check_and_get_padding(self):
+        if isinstance(self.padding_type, str):
+            try:
+                pad2d = self.allowed_paddings[self.padding_type]
+            except KeyError as einfo:
+                raise NotImplementedError(
+                    f"`{einfo}' is not implemented as padding. "
+                    "Use one of those: i) `RefPad2D', ii) `SymPad2D', iii) `ZeroPad2D'")
+        else:
+            if self.padding_type in self.allowed_paddings.values():
+                pad2d = self.padding_type
+            else:
+                raise TypeError(f"`{self.padding_type.__name__}' is not a valid padding layer type. "
+                                "Use one of those: "
+                                "i) ReflectionPadding2D, ii) SymmetricPadding2D, iii) ZeroPadding2D")
+        return pad2d
+
+    def __call__(self, *args, **kwargs):
+        return self._check_and_get_padding()(*args, **kwargs)
+
+
 if __name__ == '__main__':
     from keras.models import Model
     from keras.layers import Conv2D, Flatten, Dense, Input
 
     kernel_1 = (3, 3)
     kernel_2 = (5, 5)
-    x = np.array(range(2000)).reshape(-1, 10, 10, 1)
+    kernel_3 = (3, 3)
+    x = np.array(range(2000)).reshape((-1, 10, 10, 1))
     y = x.mean(axis=(1, 2))
 
     x_input = Input(shape=x.shape[1:])
@@ -269,6 +307,10 @@ if __name__ == '__main__':
     pad2 = PadUtils.get_padding_for_same(kernel_size=kernel_2)
     x_out = SymmetricPadding2D(padding=pad2, name="SymPAD")(x_out)
     x_out = Conv2D(2, kernel_size=kernel_2, activation='relu')(x_out)
+
+    pad3 = PadUtils.get_padding_for_same(kernel_size=kernel_3)
+    x_out = Padding2D('RefPad2D')(padding=pad3, name="Padding2D_RefPad")(x_out)
+    x_out = Conv2D(2, kernel_size=kernel_3, activation='relu')(x_out)
     x_out = Flatten()(x_out)
     x_out = Dense(1, activation='linear')(x_out)
 
diff --git a/src/model_modules/inception_model.py b/src/model_modules/inception_model.py
index 1cb7656335495f0261abb434e4a203cb4e63887e..6467b3245ad097af6ef17e596f85264eef383d7a 100644
--- a/src/model_modules/inception_model.py
+++ b/src/model_modules/inception_model.py
@@ -5,7 +5,7 @@ import logging
 
 import keras
 import keras.layers as layers
-from src.model_modules.advanced_paddings import PadUtils, ReflectionPadding2D, SymmetricPadding2D
+from src.model_modules.advanced_paddings import PadUtils, ReflectionPadding2D, SymmetricPadding2D, Padding2D
 
 
 class InceptionModelBase:
@@ -75,7 +75,10 @@ class InceptionModelBase:
                                   name=f'Block_{self.number_of_blocks}{self.block_part_name()}_1x1')(input_x)
             tower = self.act(tower, activation, **act_settings)
 
-            tower = self.padding_layer(padding)(padding=padding_size,
+            # tower = self.padding_layer(padding)(padding=padding_size,
+            #                                     name=f'Block_{self.number_of_blocks}{self.block_part_name()}_Pad'
+            #                                     )(tower)
+            tower = Padding2D(padding)(padding=padding_size,
                                                 name=f'Block_{self.number_of_blocks}{self.block_part_name()}_Pad'
                                                 )(tower)
 
@@ -108,28 +111,28 @@ class InceptionModelBase:
         else:
             return act_name.__name__
 
-    @staticmethod
-    def padding_layer(padding):
-        allowed_paddings = {
-            'RefPad2D': ReflectionPadding2D, 'ReflectionPadding2D': ReflectionPadding2D,
-            'SymPad2D': SymmetricPadding2D, 'SymmetricPadding2D': SymmetricPadding2D,
-            'ZeroPad2D': keras.layers.ZeroPadding2D, 'ZeroPadding2D': keras.layers.ZeroPadding2D
-        }
-        if isinstance(padding, str):
-            try:
-                pad2d = allowed_paddings[padding]
-            except KeyError as einfo:
-                raise NotImplementedError(
-                    f"`{einfo}' is not implemented as padding. " 
-                    "Use one of those: i) `RefPad2D', ii) `SymPad2D', iii) `ZeroPad2D'")
-        else:
-            if padding in allowed_paddings.values():
-                pad2d = padding
-            else:
-                raise TypeError(f"`{padding.__name__}' is not a valid padding layer type. "
-                                "Use one of those: "
-                                "i) ReflectionPadding2D, ii) SymmetricPadding2D, iii) ZeroPadding2D")
-        return pad2d
+    # @staticmethod
+    # def padding_layer(padding):
+    #     allowed_paddings = {
+    #         'RefPad2D': ReflectionPadding2D, 'ReflectionPadding2D': ReflectionPadding2D,
+    #         'SymPad2D': SymmetricPadding2D, 'SymmetricPadding2D': SymmetricPadding2D,
+    #         'ZeroPad2D': keras.layers.ZeroPadding2D, 'ZeroPadding2D': keras.layers.ZeroPadding2D
+    #     }
+    #     if isinstance(padding, str):
+    #         try:
+    #             pad2d = allowed_paddings[padding]
+    #         except KeyError as einfo:
+    #             raise NotImplementedError(
+    #                 f"`{einfo}' is not implemented as padding. "
+    #                 "Use one of those: i) `RefPad2D', ii) `SymPad2D', iii) `ZeroPad2D'")
+    #     else:
+    #         if padding in allowed_paddings.values():
+    #             pad2d = padding
+    #         else:
+    #             raise TypeError(f"`{padding.__name__}' is not a valid padding layer type. "
+    #                             "Use one of those: "
+    #                             "i) ReflectionPadding2D, ii) SymmetricPadding2D, iii) ZeroPadding2D")
+    #     return pad2d
 
     def create_pool_tower(self, input_x, pool_kernel, tower_filter, activation='relu', max_pooling=True, **kwargs):
         """
@@ -156,7 +159,8 @@ class InceptionModelBase:
             block_type = "AvgPool"
             pooling = layers.AveragePooling2D
 
-        tower = self.padding_layer(padding)(padding=padding_size, name=block_name+'Pad')(input_x)
+        # tower = self.padding_layer(padding)(padding=padding_size, name=block_name+'Pad')(input_x)
+        tower = Padding2D(padding)(padding=padding_size, name=block_name+'Pad')(input_x)
         tower = pooling(pool_kernel, strides=(1, 1), padding='valid', name=block_name+block_type)(tower)
 
         # convolution block
@@ -262,7 +266,7 @@ if __name__ == '__main__':
                                       'padding': 'SymPad2D'},
                           'tower_3': {'reduction_filter': 64,
                                       'tower_filter': 64,
-                                      'tower_kernel': (1, 1),
+                                      'tower_kernel': (7, 7),
                                       'activation': ELU,
                                       'padding': ReflectionPadding2D}
                           }
diff --git a/src/model_modules/model_class.py b/src/model_modules/model_class.py
index ebbd7a25cef9031436d932a6502c9726bfe3e318..d6dcea179bcfa8a6ec41518db34b186e30d908fc 100644
--- a/src/model_modules/model_class.py
+++ b/src/model_modules/model_class.py
@@ -5,11 +5,12 @@ __date__ = '2019-12-12'
 
 
 from abc import ABC
-from typing import Any, Callable
+from typing import Any, Callable, Dict
 
 import keras
 from src.model_modules.inception_model import InceptionModelBase
 from src.model_modules.flatten import flatten_tail
+from src.model_modules.advanced_paddings import PadUtils, Padding2D
 
 
 class AbstractModelClass(ABC):
@@ -30,6 +31,7 @@ class AbstractModelClass(ABC):
         self.__model = None
         self.__loss = None
         self.model_name = self.__class__.__name__
+        self.__custom_objects = {}
 
     def __getattr__(self, name: str) -> Any:
 
@@ -78,9 +80,44 @@ class AbstractModelClass(ABC):
     def loss(self, value) -> None:
         self.__loss = value
 
-    def get_settings(self):
+    @property
+    def custom_objects(self) -> Dict:
+        """
+        The custom objects property collects all non-keras utilities that are used in the model class. To load such a
+        customised and already compiled model (e.g. from local disk), this information is required.
+        :return: the custom objects in a dictionary
+        """
+        return self.__custom_objects
+
+    @custom_objects.setter
+    def custom_objects(self, value) -> None:
+        self.__custom_objects = value
+
+    def get_settings(self) -> Dict:
+        """
+        Get all class attributes that are not protected in the AbstractModelClass as dictionary.
+        :return: all class attributes
+        """
         return dict((k, v) for (k, v) in self.__dict__.items() if not k.startswith("_AbstractModelClass__"))
 
+    def set_model(self):
+        pass
+
+    def set_loss(self):
+        pass
+
+    def set_custom_objects(self, **kwargs) -> None:
+        """
+        Set custom objects that are not part of keras framework. These custom objects are needed if an already compiled
+        model is loaded from disk. There is a special treatment for the Padding2D class, which is a base class for
+        different padding types. For a correct behaviour, all supported subclasses are added as custom objects in
+        addition to the given ones.
+        :param kwargs: all custom objects, that should be saved
+        """
+        if "Padding2D" in kwargs.keys():
+            kwargs.update(kwargs["Padding2D"].allowed_paddings)
+        self.custom_objects = kwargs
+
 
 class MyLittleModel(AbstractModelClass):
 
@@ -120,6 +157,7 @@ class MyLittleModel(AbstractModelClass):
         # apply to model
         self.set_model()
         self.set_loss()
+        self.set_custom_objects(loss=self.loss)
 
     def set_model(self):
 
@@ -200,6 +238,7 @@ class MyBranchedModel(AbstractModelClass):
         # apply to model
         self.set_model()
         self.set_loss()
+        self.set_custom_objects(loss=self.loss)
 
     def set_model(self):
 
@@ -276,6 +315,7 @@ class MyTowerModel(AbstractModelClass):
         # apply to model
         self.set_model()
         self.set_loss()
+        self.set_custom_objects(loss=self.loss)
 
     def set_model(self):
 
@@ -351,3 +391,137 @@ class MyTowerModel(AbstractModelClass):
         """
 
         self.loss = [keras.losses.mean_squared_error]
+
+
+class MyPaperModel(AbstractModelClass):
+
+    def __init__(self, window_history_size, window_lead_time, channels):
+
+        """
+        Sets model and loss depending on the given arguments.
+        :param activation: activation function
+        :param window_history_size: number of historical time steps included in the input data
+        :param channels: number of variables used in input data
+        :param regularizer: <not used here>
+        :param dropout_rate: dropout rate used in the model [0, 1)
+        :param window_lead_time: number of time steps to forecast in the output layer
+        """
+
+        super().__init__()
+
+        # settings
+        self.window_history_size = window_history_size
+        self.window_lead_time = window_lead_time
+        self.channels = channels
+        self.dropout_rate = .3
+        self.regularizer = keras.regularizers.l2(0.001)
+        self.initial_lr = 1e-3
+        # self.optimizer = keras.optimizers.adam(lr=self.initial_lr, amsgrad=True)
+        self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
+        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
+        self.epochs = 150
+        self.batch_size = int(256 * 2)
+        self.activation = keras.layers.ELU
+        self.padding = "SymPad2D"
+
+        # apply to model
+        self.set_model()
+        self.set_loss()
+        self.set_custom_objects(loss=self.loss, Padding2D=Padding2D)
+
+    def set_model(self):
+
+        """
+        Build the model.
+        :param activation: activation function
+        :param window_history_size: number of historical time steps included in the input data
+        :param channels: number of variables used in input data
+        :param dropout_rate: dropout rate used in the model [0, 1)
+        :param window_lead_time: number of time steps to forecast in the output layer
+        :return: built keras model
+        """
+        activation = self.activation
+        first_kernel = (3,1)
+        first_filters = 16
+
+        conv_settings_dict1 = {
+            'tower_1': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (3, 1),
+                        'activation': activation},
+            'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
+                        'activation': activation},
+            'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
+                        'activation': activation},
+            # 'tower_4':{'reduction_filter':8, 'tower_filter':8*2, 'tower_kernel':(7,1), 'activation':activation},
+        }
+        pool_settings_dict1 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
+
+        conv_settings_dict2 = {
+            'tower_1': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (3, 1),
+                        'activation': activation},
+            'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
+                        'activation': activation},
+            'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
+                        'activation': activation},
+            # 'tower_4':{'reduction_filter':8*2, 'tower_filter':16*2, 'tower_kernel':(7,1), 'activation':activation},
+        }
+        pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
+
+        conv_settings_dict3 = {
+            'tower_1': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (3, 1),
+                        'activation': activation},
+            'tower_2': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (5, 1),
+                        'activation': activation},
+            'tower_3': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (1, 1),
+                        'activation': activation},
+            # 'tower_4':{'reduction_filter':16*4, 'tower_filter':32, 'tower_kernel':(7,1), 'activation':activation},
+        }
+        pool_settings_dict3 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
+
+        ##########################################
+        inception_model = InceptionModelBase()
+
+        X_input = keras.layers.Input(
+            shape=(self.window_history_size + 1, 1, self.channels))  # add 1 to window_size to include current time step t0
+
+        pad_size = PadUtils.get_padding_for_same(first_kernel)
+        # X_in = adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
+        # X_in = inception_model.padding_layer("SymPad2D")(padding=pad_size, name="SymPad")(X_input)  # adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
+        X_in = Padding2D("SymPad2D")(padding=pad_size, name="SymPad")(X_input)
+        X_in = keras.layers.Conv2D(filters=first_filters,
+                                   kernel_size=first_kernel,
+                                   kernel_regularizer=self.regularizer,
+                                   name="First_conv_{}x{}".format(first_kernel[0], first_kernel[1]))(X_in)
+        X_in = self.activation(name='FirstAct')(X_in)
+
+
+        X_in = inception_model.inception_block(X_in, conv_settings_dict1, pool_settings_dict1,
+                                               regularizer=self.regularizer,
+                                               batch_normalisation=True,
+                                               padding=self.padding)
+        out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
+                                  self.activation, 32, 64)
+
+        X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
+
+        X_in = inception_model.inception_block(X_in, conv_settings_dict2, pool_settings_dict2, regularizer=self.regularizer,
+                                               batch_normalisation=True, padding=self.padding)
+
+        # X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
+        #
+        # X_in = inception_model.inception_block(X_in, conv_settings_dict3, pool_settings_dict3, regularizer=self.regularizer,
+        #                                        batch_normalisation=True)
+        #############################################
+
+        out_main = flatten_tail(X_in, 'Main', activation=activation, bound_weight=False, dropout_rate=self.dropout_rate,
+                                reduction_filter=64 * 2, first_dense=64 * 2, window_lead_time=self.window_lead_time)
+
+        self.model = keras.Model(inputs=X_input, outputs=[out_minor1, out_main])
+
+    def set_loss(self):
+
+        """
+        Set the loss
+        :return: loss function
+        """
+
+        self.loss = [keras.losses.mean_squared_error, keras.losses.mean_squared_error]
diff --git a/src/run_modules/experiment_setup.py b/src/run_modules/experiment_setup.py
index a420e2871e25812fb7053fefce74fb0ab5c039a1..dd0ec3619d855de80336fcb0b33d4a656760768d 100644
--- a/src/run_modules/experiment_setup.py
+++ b/src/run_modules/experiment_setup.py
@@ -36,8 +36,9 @@ class ExperimentSetup(RunEnvironment):
                  limit_nan_fill=None, train_start=None, train_end=None, val_start=None, val_end=None, test_start=None,
                  test_end=None, use_all_stations_on_all_data_sets=True, trainable=None, fraction_of_train=None,
                  experiment_path=None, plot_path=None, forecast_path=None, overwrite_local_data=None, sampling="daily",
-                 create_new_model=None, bootstrap_path=None, permute_data_on_training=None, transformation=None,
-                 evaluate_bootstraps=True, plot_list=None, number_of_bootstraps=None):
+                 create_new_model=None, bootstrap_path=None, permute_data_on_training=False, transformation=None,
+                 train_min_length=None, val_min_length=None, test_min_length=None, extreme_values=None,
+                 extremes_on_right_tail_only=None, evaluate_bootstraps=True, plot_list=None, number_of_bootstraps=None):
 
         # create run framework
         super().__init__()
@@ -52,7 +53,11 @@ class ExperimentSetup(RunEnvironment):
         self._set_param("bootstrap_path", bootstrap_path)
         self._set_param("trainable", trainable, default=True)
         self._set_param("fraction_of_training", fraction_of_train, default=0.8)
-        self._set_param("permute_data", permute_data_on_training, default=False, scope="general.train")
+        self._set_param("extreme_values", extreme_values, default=None, scope="general.train")
+        self._set_param("extremes_on_right_tail_only", extremes_on_right_tail_only, default=False, scope="general.train")
+        self._set_param("upsampling", extreme_values is not None, scope="general.train")
+        upsampling = self.data_store.get("upsampling", "general.train")
+        self._set_param("permute_data", max([permute_data_on_training, upsampling]), scope="general.train")
 
         # set experiment name
         exp_date = self._get_parser_args(parser_args).get("experiment_date")
@@ -102,18 +107,23 @@ class ExperimentSetup(RunEnvironment):
         # train set parameters
         self._set_param("start", train_start, default="1997-01-01", scope="general.train")
         self._set_param("end", train_end, default="2007-12-31", scope="general.train")
+        self._set_param("min_length", train_min_length, default=90, scope="general.train")
 
         # validation set parameters
         self._set_param("start", val_start, default="2008-01-01", scope="general.val")
         self._set_param("end", val_end, default="2009-12-31", scope="general.val")
+        self._set_param("min_length", val_min_length, default=90, scope="general.val")
 
         # test set parameters
         self._set_param("start", test_start, default="2010-01-01", scope="general.test")
         self._set_param("end", test_end, default="2017-12-31", scope="general.test")
+        self._set_param("min_length", test_min_length, default=90, scope="general.test")
 
         # train_val set parameters
         self._set_param("start", self.data_store.get("start", "general.train"), scope="general.train_val")
         self._set_param("end", self.data_store.get("end", "general.val"), scope="general.train_val")
+        train_val_min_length = sum([self.data_store.get("min_length", f"general.{s}") for s in ["train", "val"]])
+        self._set_param("min_length", train_val_min_length, default=180, scope="general.train_val")
 
         # use all stations on all data sets (train, val, test)
         self._set_param("use_all_stations_on_all_data_sets", use_all_stations_on_all_data_sets, default=True)
diff --git a/src/run_modules/model_setup.py b/src/run_modules/model_setup.py
index fe22e37f1f1cfdb48d8136fe53a6ce5ee7f4be97..135f9aff26becc70b6f07e5938732eae27dc538c 100644
--- a/src/run_modules/model_setup.py
+++ b/src/run_modules/model_setup.py
@@ -12,6 +12,7 @@ from src.model_modules.keras_extensions import HistoryAdvanced, CallbackHandler
 # from src.model_modules.model_class import MyBranchedModel as MyModel
 # from src.model_modules.model_class import MyLittleModel as MyModel
 from src.model_modules.model_class import MyTowerModel as MyModel
+# from src.model_modules.model_class import MyPaperModel as MyModel
 from src.run_modules.run_environment import RunEnvironment
 
 
@@ -69,11 +70,12 @@ class ModelSetup(RunEnvironment):
         Set all callbacks for the training phase. Add all callbacks with the .add_callback statement. Finally, the
         advanced model checkpoint is added.
         """
-        lr = self.data_store.get("lr_decay", scope="general.model")
+        lr = self.data_store.get_default("lr_decay", scope="general.model", default=None)
         hist = HistoryAdvanced()
         self.data_store.set("hist", hist, scope="general.model")
         callbacks = CallbackHandler()
-        callbacks.add_callback(lr, self.callbacks_name % "lr", "lr")
+        if lr:
+            callbacks.add_callback(lr, self.callbacks_name % "lr", "lr")
         callbacks.add_callback(hist, self.callbacks_name % "hist", "hist")
         callbacks.create_model_checkpoint(filepath=self.checkpoint_name, verbose=1, monitor='val_loss',
                                           save_best_only=True, mode='auto')
diff --git a/src/run_modules/post_processing.py b/src/run_modules/post_processing.py
index 53e2ca7a2c9d201caa048c5050f69110052d20fb..0073f74293765402b7f5fcc7c778fa220e671784 100644
--- a/src/run_modules/post_processing.py
+++ b/src/run_modules/post_processing.py
@@ -17,6 +17,7 @@ from src.data_handling.bootstraps import BootStraps
 from src.datastore import NameNotFoundInDataStore
 from src.helpers import TimeTracking
 from src.model_modules.linear_model import OrdinaryLeastSquaredModel
+from src.model_modules.model_class import AbstractModelClass
 from src.plotting.postprocessing_plotting import PlotMonthlySummary, PlotStationMap, PlotClimatologicalSkillScore, \
     PlotCompetitiveSkillScore, PlotTimeSeries, PlotBootstrapSkillScore
 from src.plotting.postprocessing_plotting import plot_conditional_quantiles
@@ -245,7 +246,8 @@ class PostProcessing(RunEnvironment):
         except NameNotFoundInDataStore:
             logging.info("no model saved in data store. trying to load model from experiment path")
             model_name = self.data_store.get("model_name", "general.model")
-            model = keras.models.load_model(model_name)
+            model_class: AbstractModelClass = self.data_store.get("model", "general.model")
+            model = keras.models.load_model(model_name, custom_objects=model_class.custom_objects)
         return model
 
     def plot(self):
diff --git a/src/run_modules/pre_processing.py b/src/run_modules/pre_processing.py
index 1f84db143130357e8b3b044bcb5aacc345a9c0f6..b53469ecc9d0ca9c7b816d5362b9d0a225b78124 100644
--- a/src/run_modules/pre_processing.py
+++ b/src/run_modules/pre_processing.py
@@ -11,8 +11,9 @@ from src.join import EmptyQueryResult
 from src.run_modules.run_environment import RunEnvironment
 
 DEFAULT_ARGS_LIST = ["data_path", "network", "stations", "variables", "interpolate_dim", "target_dim", "target_var"]
-DEFAULT_KWARGS_LIST = ["limit_nan_fill", "window_history_size", "window_lead_time", "statistics_per_var",
-                       "station_type", "overwrite_local_data", "start", "end", "sampling", "transformation"]
+DEFAULT_KWARGS_LIST = ["limit_nan_fill", "window_history_size", "window_lead_time", "statistics_per_var", "min_length",
+                       "station_type", "overwrite_local_data", "start", "end", "sampling", "transformation",
+                       "extreme_values", "extremes_on_right_tail_only"]
 
 
 class PreProcessing(RunEnvironment):
diff --git a/src/run_modules/run_environment.py b/src/run_modules/run_environment.py
index 56c017290eea4d11881b9b131378d8c5995f0b29..1c44786dfd4830c8053ae1673eac1473fbd19338 100644
--- a/src/run_modules/run_environment.py
+++ b/src/run_modules/run_environment.py
@@ -2,9 +2,13 @@ __author__ = "Lukas Leufen"
 __date__ = '2019-11-25'
 
 import logging
+import os
+import shutil
 import time
 
+from src.helpers import Logger
 from src.datastore import DataStoreByScope as DataStoreObject
+from src.datastore import NameNotFoundInDataStore
 from src.helpers import TimeTracking
 
 
@@ -16,6 +20,7 @@ class RunEnvironment(object):
 
     del_by_exit = False
     data_store = DataStoreObject()
+    logger = Logger()
 
     def __init__(self):
         """
@@ -34,6 +39,11 @@ class RunEnvironment(object):
             logging.info(f"{self.__class__.__name__} finished after {self.time}")
             self.del_by_exit = True
         if self.__class__.__name__ == "RunEnvironment":
+            try:
+                new_file = os.path.join(self.data_store.get("experiment_path", "general"), "logging.log")
+                shutil.copyfile(self.logger.log_file, new_file)
+            except (NameNotFoundInDataStore, FileNotFoundError):
+                pass
             self.data_store.clear_data_store()
 
     def __enter__(self):
diff --git a/src/run_modules/training.py b/src/run_modules/training.py
index df60c4f2f8dff4a9acb82920ad3c1d203813033d..0d6279b132b64f287f541088c2675012a2d1e933 100644
--- a/src/run_modules/training.py
+++ b/src/run_modules/training.py
@@ -9,19 +9,21 @@ import pickle
 import keras
 
 from src.data_handling.data_distributor import Distributor
-from src.model_modules.keras_extensions import LearningRateDecay, ModelCheckpointAdvanced, CallbackHandler
+from src.model_modules.keras_extensions import LearningRateDecay, CallbackHandler
 from src.plotting.training_monitoring import PlotModelHistory, PlotModelLearningRate
 from src.run_modules.run_environment import RunEnvironment
 
+from typing import Union
+
 
 class Training(RunEnvironment):
 
     def __init__(self):
         super().__init__()
         self.model: keras.Model = self.data_store.get("model", "general.model")
-        self.train_set = None
-        self.val_set = None
-        self.test_set = None
+        self.train_set: Union[Distributor, None] = None
+        self.val_set: Union[Distributor, None] = None
+        self.test_set: Union[Distributor, None] = None
         self.batch_size = self.data_store.get("batch_size", "general.model")
         self.epochs = self.data_store.get("epochs", "general.model")
         self.callbacks: CallbackHandler = self.data_store.get("callbacks", "general.model")
@@ -65,8 +67,9 @@ class Training(RunEnvironment):
         :param mode: name of set, should be from ["train", "val", "test"]
         """
         gen = self.data_store.get("generator", f"general.{mode}")
-        permute_data = self.data_store.get_default("permute_data", f"general.{mode}", default=False)
-        setattr(self, f"{mode}_set", Distributor(gen, self.model, self.batch_size, permute_data=permute_data))
+        # permute_data = self.data_store.get_default("permute_data", f"general.{mode}", default=False)
+        kwargs = self.data_store.create_args_dict(["permute_data", "upsampling"], scope=f"general.{mode}")
+        setattr(self, f"{mode}_set", Distributor(gen, self.model, self.batch_size, **kwargs))
 
     def set_generators(self) -> None:
         """
@@ -86,6 +89,9 @@ class Training(RunEnvironment):
         locally stored information and the corresponding model and proceed with the already started training.
         """
         logging.info(f"Train with {len(self.train_set)} mini batches.")
+        logging.info(f"Train with option upsampling={self.train_set.upsampling}.")
+        logging.info(f"Train with option data_permutation={self.train_set.do_data_permutation}.")
+
         checkpoint = self.callbacks.get_checkpoint()
         if not os.path.exists(checkpoint.filepath) or self._create_new_model:
             history = self.model.fit_generator(generator=self.train_set.distribute_on_batches(),
@@ -111,7 +117,10 @@ class Training(RunEnvironment):
                                          callbacks=self.callbacks.get_callbacks(as_dict=False),
                                          initial_epoch=initial_epoch)
             history = hist
-        lr = self.callbacks.get_callback_by_name("lr")
+        try:
+            lr = self.callbacks.get_callback_by_name("lr")
+        except IndexError:
+            lr = None
         self.save_callbacks_as_json(history, lr)
         self.load_best_model(checkpoint.filepath)
         self.create_monitoring_plots(history, lr)
@@ -148,8 +157,9 @@ class Training(RunEnvironment):
         path = self.data_store.get("experiment_path", "general")
         with open(os.path.join(path, "history.json"), "w") as f:
             json.dump(history.history, f)
-        with open(os.path.join(path, "history_lr.json"), "w") as f:
-            json.dump(lr_sc.lr, f)
+        if lr_sc:
+            with open(os.path.join(path, "history_lr.json"), "w") as f:
+                json.dump(lr_sc.lr, f)
 
     def create_monitoring_plots(self, history: keras.callbacks.History, lr_sc: LearningRateDecay) -> None:
         """
@@ -174,4 +184,5 @@ class Training(RunEnvironment):
             PlotModelHistory(filename=filename, history=history, plot_metric="mse", main_branch=multiple_branches_used)
 
         # plot learning rate
-        PlotModelLearningRate(filename=os.path.join(path, f"{name}_history_learning_rate.pdf"), lr_sc=lr_sc)
+        if lr_sc:
+            PlotModelLearningRate(filename=os.path.join(path, f"{name}_history_learning_rate.pdf"), lr_sc=lr_sc)
diff --git a/test/test_data_handling/test_bootstraps.py b/test/test_data_handling/test_bootstraps.py
index e66c13e43e7e311e0eef36b4c7561ca01b8a5d86..00530bf4d025126c527085454ee6f7a3550ad997 100644
--- a/test/test_data_handling/test_bootstraps.py
+++ b/test/test_data_handling/test_bootstraps.py
@@ -54,12 +54,12 @@ class TestBootstraps:
         boot_no_init.number_bootstraps = 50
         assert boot_no_init.valid_bootstrap_file(station, variables, 20) == (False, 60)
 
-    def test_shuffle_single_variale(self, boot_no_init):
+    def test_shuffle_single_variable(self, boot_no_init):
         data = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
         res = boot_no_init.shuffle_single_variable(data, chunks=(2, 3)).compute()
         assert res.shape == data.shape
-        assert res.max() == data.max()
-        assert res.min() == data.min()
+        assert res.max() <= data.max()
+        assert res.min() >= data.min()
         assert set(np.unique(res)).issubset({1, 2, 3})
 
     def test_create_shuffled_data(self):
diff --git a/test/test_data_handling/test_data_distributor.py b/test/test_data_handling/test_data_distributor.py
index a26e76a0e7f3ef0f5cdbedc07d73a690116966c9..15344fd808a4aa9ee5774ad8ba647bf5ce06d015 100644
--- a/test/test_data_handling/test_data_distributor.py
+++ b/test/test_data_handling/test_data_distributor.py
@@ -46,7 +46,7 @@ class TestDistributor:
         distributor.model = 1
 
     def test_get_number_of_mini_batches(self, distributor):
-        values = np.zeros((2, 2311, 19))
+        values = np.zeros((2311, 19))
         assert distributor._get_number_of_mini_batches(values) == math.ceil(2311 / distributor.batch_size)
 
     def test_distribute_on_batches_single_loop(self,  generator_two_stations, model):
@@ -98,3 +98,21 @@ class TestDistributor:
         assert np.testing.assert_equal(x, x_perm) is None
         assert np.testing.assert_equal(y, y_perm) is None
 
+    def test_distribute_on_batches_upsampling_no_extremes_given(self,  generator, model):
+        d = Distributor(generator, model, upsampling=True)
+        gen_len = d.generator.get_data_generator(0, load_local_tmp_storage=False).get_transposed_label().shape[0]
+        num_mini_batches = math.ceil(gen_len / d.batch_size)
+        i = 0
+        for i, e in enumerate(d.distribute_on_batches(fit_call=False)):
+            assert e[0].shape[0] <= d.batch_size
+        assert i + 1 == num_mini_batches
+
+    def test_distribute_on_batches_upsampling(self, generator, model):
+        generator.extreme_values = [1]
+        d = Distributor(generator, model, upsampling=True)
+        gen_len = d.generator.get_data_generator(0, load_local_tmp_storage=False).get_transposed_label().shape[0]
+        extr_len = d.generator.get_data_generator(0, load_local_tmp_storage=False).get_extremes_label().shape[0]
+        i = 0
+        for i, e in enumerate(d.distribute_on_batches(fit_call=False)):
+            assert e[0].shape[0] <= d.batch_size
+        assert i + 1 == math.ceil((gen_len + extr_len) / d.batch_size)
diff --git a/test/test_data_handling/test_data_generator.py b/test/test_data_handling/test_data_generator.py
index 9bf11154609afa9ada2b488455f7a341a41d21ae..939f93cc9ee01c76a282e755aca14b39c6fc4ac9 100644
--- a/test/test_data_handling/test_data_generator.py
+++ b/test/test_data_handling/test_data_generator.py
@@ -238,6 +238,20 @@ class TestDataGenerator:
         assert data._transform_method == "standardise"
         assert data.mean is not None
 
+    def test_get_data_generator_extremes(self, gen_with_transformation):
+        gen = gen_with_transformation
+        gen.kwargs = {"statistics_per_var": {'o3': 'dma8eu', 'temp': 'maximum'}}
+        gen.extreme_values = [1.]
+        data = gen.get_data_generator("DEBW107", load_local_tmp_storage=False, save_local_tmp_storage=False)
+        assert data.extremes_label is not None
+        assert data.extremes_history is not None
+        assert data.extremes_label.shape[:2] == data.label.shape[:2]
+        assert data.extremes_label.shape[2] <= data.label.shape[2]
+        len_both_tails = data.extremes_label.shape[2]
+        gen.kwargs["extremes_on_right_tail_only"] = True
+        data = gen.get_data_generator("DEBW107", load_local_tmp_storage=False, save_local_tmp_storage=False)
+        assert data.extremes_label.shape[2] <= len_both_tails
+
     def test_save_pickle_data(self, gen):
         file = os.path.join(gen.data_path_tmp, f"DEBW107_{'_'.join(sorted(gen.variables))}_2010_2014_.pickle")
         if os.path.exists(file):
diff --git a/test/test_data_handling/test_data_preparation.py b/test/test_data_handling/test_data_preparation.py
index 91719f3dd16326ee6281c4db8ef3aa87e238d70f..71f3a1d6a0a675a155b517901aef1f3c359b104b 100644
--- a/test/test_data_handling/test_data_preparation.py
+++ b/test/test_data_handling/test_data_preparation.py
@@ -1,6 +1,6 @@
 import datetime as dt
 import os
-from operator import itemgetter
+from operator import itemgetter, lt, gt
 import logging
 
 import numpy as np
@@ -287,6 +287,14 @@ class TestDataPrep:
         assert remaining_len == data.label.datetime.shape
         assert remaining_len == data.observation.datetime.shape
 
+    def test_remove_nan_too_short(self, data):
+        data.kwargs["min_length"] = 4000  # actual length of series is 3940
+        data.make_history_window('variables', -12, 'datetime')
+        data.make_labels('variables', 'o3', 'datetime', 3)
+        data.make_observation('variables', 'o3', 'datetime')
+        data.remove_nan('datetime')
+        assert not any([data.history, data.label, data.observation])
+
     def test_create_index_array(self, data):
         index_array = data.create_index_array('window', range(1, 4))
         assert np.testing.assert_array_equal(index_array.data, [1, 2, 3]) is None
@@ -395,3 +403,64 @@ class TestDataPrep:
         data.make_labels("variables", "o3", "datetime", 2)
         transposed = data.get_transposed_label()
         assert transposed.coords.dims == ("datetime", "window")
+
+    def test_multiply_extremes(self, data):
+        data.transform("datetime")
+        data.make_history_window("variables", 3, "datetime")
+        data.make_labels("variables", "o3", "datetime", 2)
+        orig = data.label
+        data.multiply_extremes(1)
+        upsampled = data.extremes_label
+        assert (upsampled > 1).sum() == (orig > 1).sum()
+        assert (upsampled < -1).sum() == (orig < -1).sum()
+
+    def test_multiply_extremes_from_list(self, data):
+        data.transform("datetime")
+        data.make_history_window("variables", 3, "datetime")
+        data.make_labels("variables", "o3", "datetime", 2)
+        orig = data.label
+        data.multiply_extremes([1, 1.5, 2, 3])
+        upsampled = data.extremes_label
+        def f(d, op, n):
+            return op(d, n).any(dim="window").sum()
+        assert f(upsampled, gt, 1) == sum([f(orig, gt, 1), f(orig, gt, 1.5), f(orig, gt, 2) * 2, f(orig, gt, 3) * 4])
+        assert f(upsampled, lt, -1) == sum([f(orig, lt, -1), f(orig, lt, -1.5), f(orig, lt, -2) * 2, f(orig, lt, -3) * 4])
+
+    def test_multiply_extremes_wrong_extremes(self, data):
+        with pytest.raises(TypeError) as e:
+            data.multiply_extremes([1, "1.5", 2])
+        assert "Elements of list extreme_values have to be (<class 'float'>, <class 'int'>), but at least element 1.5" \
+               " is type <class 'str'>" in e.value.args[0]
+
+    def test_multiply_extremes_right_tail(self, data):
+        data.transform("datetime")
+        data.make_history_window("variables", 3, "datetime")
+        data.make_labels("variables", "o3", "datetime", 2)
+        orig = data.label
+        data.multiply_extremes([1, 2], extremes_on_right_tail_only=True)
+        upsampled = data.extremes_label
+        def f(d, op, n):
+            return op(d, n).any(dim="window").sum()
+        assert f(upsampled, gt, 1) == sum([f(orig, gt, 1), f(orig, gt, 2)])
+        assert upsampled.shape[2] == sum([f(orig, gt, 1), f(orig, gt, 2)])
+        assert f(upsampled, lt, -1) == 0
+
+    def test_get_extremes_history(self, data):
+        data.transform("datetime")
+        data.make_history_window("variables", 3, "datetime")
+        data.make_labels("variables", "o3", "datetime", 2)
+        data.make_observation("variables", "o3", "datetime")
+        data.remove_nan("datetime")
+        data.multiply_extremes([1, 2], extremes_on_right_tail_only=True)
+        assert (data.get_extremes_history() ==
+                data.extremes_history.transpose("datetime", "window", "Stations", "variables")).all()
+
+    def test_get_extremes_label(self, data):
+        data.transform("datetime")
+        data.make_history_window("variables", 3, "datetime")
+        data.make_labels("variables", "o3", "datetime", 2)
+        data.make_observation("variables", "o3", "datetime")
+        data.remove_nan("datetime")
+        data.multiply_extremes([1, 2], extremes_on_right_tail_only=True)
+        assert (data.get_extremes_label() ==
+                data.extremes_label.squeeze("Stations").transpose("datetime", "window")).all()
diff --git a/test/test_model_modules/test_advanced_paddings.py b/test/test_model_modules/test_advanced_paddings.py
index 5282eb6df34d4d395dbbdd1fd76fd71a95e9c8df..bbeaf1c745a63b3607062b0c4052088c9af06b92 100644
--- a/test/test_model_modules/test_advanced_paddings.py
+++ b/test/test_model_modules/test_advanced_paddings.py
@@ -417,3 +417,61 @@ class TestSymmerticPadding2D:
         sym_pad = SymmetricPadding2D(padding=pad, name=layer_name)(input_x)
         assert sym_pad.get_shape().as_list() == [None, 12, 10, 3]
         assert sym_pad.name == 'SymPad_3x1/MirrorPad:0'
+
+
+class TestPadding2D:
+
+    @pytest.fixture
+    def input_x(self):
+        return keras.Input(shape=(32, 32, 3))
+
+    def test_init(self):
+        padding_layer = Padding2D('SymPad2D')
+        assert padding_layer.padding_type == 'SymPad2D'
+        assert padding_layer.allowed_paddings == {
+            'RefPad2D': ReflectionPadding2D, 'ReflectionPadding2D': ReflectionPadding2D,
+            'SymPad2D': SymmetricPadding2D, 'SymmetricPadding2D': SymmetricPadding2D,
+            'ZeroPad2D': ZeroPadding2D, 'ZeroPadding2D': ZeroPadding2D
+        }
+
+
+    def test_check_and_get_padding_zero_padding(self):
+        assert Padding2D('ZeroPad2D')._check_and_get_padding() == ZeroPadding2D
+        assert Padding2D('ZeroPadding2D')._check_and_get_padding() == ZeroPadding2D
+        assert Padding2D(keras.layers.ZeroPadding2D)._check_and_get_padding() == ZeroPadding2D
+
+    def test_check_and_get_padding_sym_padding(self):
+        assert Padding2D('SymPad2D')._check_and_get_padding() == SymmetricPadding2D
+        assert Padding2D('SymmetricPadding2D')._check_and_get_padding() == SymmetricPadding2D
+        assert Padding2D(SymmetricPadding2D)._check_and_get_padding() == SymmetricPadding2D
+
+    def test_check_and_get_padding_ref_padding(self):
+        assert Padding2D('RefPad2D')._check_and_get_padding() == ReflectionPadding2D
+        assert Padding2D('ReflectionPadding2D')._check_and_get_padding() == ReflectionPadding2D
+        assert Padding2D(ReflectionPadding2D)._check_and_get_padding() == ReflectionPadding2D
+
+    def test_check_and_get_padding_raises(self,):
+        with pytest.raises(NotImplementedError) as einfo:
+            Padding2D('FalsePadding2D')._check_and_get_padding()
+        assert "`'FalsePadding2D'' is not implemented as padding. " \
+               "Use one of those: i) `RefPad2D', ii) `SymPad2D', iii) `ZeroPad2D'" in str(einfo.value)
+        with pytest.raises(TypeError) as einfo:
+            Padding2D(keras.layers.Conv2D)._check_and_get_padding()
+        assert "`Conv2D' is not a valid padding layer type. Use one of those: "\
+               "i) ReflectionPadding2D, ii) SymmetricPadding2D, iii) ZeroPadding2D" in str(einfo.value)
+
+    @pytest.mark.parametrize("pad_type", ["SymPad2D", "SymmetricPadding2D", SymmetricPadding2D,
+                                          "RefPad2D", "ReflectionPadding2D", ReflectionPadding2D,
+                                          "ZeroPad2D", "ZeroPadding2D", ZeroPadding2D])
+    def test_call(self, pad_type, input_x):
+        pd = Padding2D(pad_type)
+        if hasattr(pad_type, "__name__"):
+            layer_name = pad_type.__name__
+        else:
+            layer_name = pad_type
+        pd_ap = pd(padding=(1,2), name=f"{layer_name}_layer")(input_x)
+        assert pd_ap._keras_history[0].input_shape == (None, 32, 32, 3)
+        assert pd_ap._keras_history[0].output_shape == (None, 34, 36, 3)
+        assert pd_ap._keras_history[0].padding == ((1, 1), (2, 2))
+        assert pd_ap._keras_history[0].name == f"{layer_name}_layer"
+
diff --git a/test/test_model_modules/test_inception_model.py b/test/test_model_modules/test_inception_model.py
index 9dee30788c34cd8d1a7572947ea2e568ac2006b7..e5e92158425a73c5af1c6d1623d970e1037bbd80 100644
--- a/test/test_model_modules/test_inception_model.py
+++ b/test/test_model_modules/test_inception_model.py
@@ -277,48 +277,3 @@ class TestInceptionModelBase:
         bn = base.batch_normalisation(input_x)._keras_history[0]
         assert isinstance(bn, keras.layers.normalization.BatchNormalization)
         assert bn.name == "Block_0a_BN"
-
-    def test_padding_layer_zero_padding(self, base, input_x):
-        padding_size = ((1, 1), (0, 0))
-        zp = base.padding_layer('ZeroPad2D')
-        assert zp == keras.layers.convolutional.ZeroPadding2D
-        assert base.padding_layer('ZeroPadding2D') == keras.layers.convolutional.ZeroPadding2D
-        assert base.padding_layer(keras.layers.ZeroPadding2D) == keras.layers.convolutional.ZeroPadding2D
-        assert zp.__name__ == 'ZeroPadding2D'
-        zp_ap = zp(padding=padding_size)(input_x)
-        assert zp_ap._keras_history[0].padding == ((1, 1), (0, 0))
-
-    def test_padding_layer_sym_padding(self, base, input_x):
-        padding_size = ((1, 1), (0, 0))
-        zp = base.padding_layer('SymPad2D')
-        assert zp == SymmetricPadding2D
-        assert base.padding_layer('SymmetricPadding2D') == SymmetricPadding2D
-        assert base.padding_layer(SymmetricPadding2D) == SymmetricPadding2D
-        assert zp.__name__ == 'SymmetricPadding2D'
-        zp_ap = zp(padding=padding_size)(input_x)
-        assert zp_ap._keras_history[0].padding == ((1, 1), (0, 0))
-
-    def test_padding_layer_ref_padding(self, base, input_x):
-        padding_size = ((1, 1), (0, 0))
-        zp = base.padding_layer('RefPad2D')
-        assert zp == ReflectionPadding2D
-        assert base.padding_layer('ReflectionPadding2D') == ReflectionPadding2D
-        assert base.padding_layer(ReflectionPadding2D) == ReflectionPadding2D
-        assert zp.__name__ == 'ReflectionPadding2D'
-        zp_ap = zp(padding=padding_size)(input_x)
-        assert zp_ap._keras_history[0].padding == ((1, 1), (0, 0))
-
-    def test_padding_layer_raises(self, base, input_x):
-        with pytest.raises(NotImplementedError) as einfo:
-            base.padding_layer('FalsePadding2D')
-        assert "`'FalsePadding2D'' is not implemented as padding. " \
-               "Use one of those: i) `RefPad2D', ii) `SymPad2D', iii) `ZeroPad2D'" in str(einfo.value)
-        with pytest.raises(TypeError) as einfo:
-            base.padding_layer(keras.layers.Conv2D)
-        assert "`Conv2D' is not a valid padding layer type. Use one of those: "\
-               "i) ReflectionPadding2D, ii) SymmetricPadding2D, iii) ZeroPadding2D" in str(einfo.value)
-
-
-
-
-
diff --git a/test/test_model_modules/test_model_class.py b/test/test_model_modules/test_model_class.py
index 0dbd2d9b67a0748bf09eb4f59e1888aae1ea405d..cee031749b193b91bd1cf16c02acfb3050eaed61 100644
--- a/test/test_model_modules/test_model_class.py
+++ b/test/test_model_modules/test_model_class.py
@@ -2,6 +2,18 @@ import keras
 import pytest
 
 from src.model_modules.model_class import AbstractModelClass
+from src.model_modules.model_class import MyPaperModel, MyTowerModel, MyLittleModel, MyBranchedModel
+
+
+class Paddings:
+    allowed_paddings = {"pad1": 34, "another_pad": True}
+
+
+class AbstractModelSubClass(AbstractModelClass):
+
+    def __init__(self):
+        super().__init__()
+        self.test_attr = "testAttr"
 
 
 class TestAbstractModelClass:
@@ -10,9 +22,15 @@ class TestAbstractModelClass:
     def amc(self):
         return AbstractModelClass()
 
+    @pytest.fixture
+    def amsc(self):
+        return AbstractModelSubClass()
+
     def test_init(self, amc):
         assert amc.model is None
         assert amc.loss is None
+        assert amc.model_name == "AbstractModelClass"
+        assert amc.custom_objects == {}
 
     def test_model_property(self, amc):
         amc.model = keras.Model()
@@ -27,3 +45,52 @@ class TestAbstractModelClass:
         assert hasattr(amc, "compile") is True
         assert hasattr(amc.model, "compile") is True
         assert amc.compile == amc.model.compile
+
+    def test_get_settings(self, amc, amsc):
+        assert amc.get_settings() == {"model_name": "AbstractModelClass"}
+        assert amsc.get_settings() == {"test_attr": "testAttr", "model_name": "AbstractModelSubClass"}
+
+    def test_custom_objects(self, amc):
+        amc.custom_objects = {"Test": 123}
+        assert amc.custom_objects == {"Test": 123}
+
+    def test_set_custom_objects(self, amc):
+        amc.set_custom_objects(Test=22, minor_param="minor")
+        assert amc.custom_objects == {"Test": 22, "minor_param": "minor"}
+        amc.set_custom_objects(Test=2, minor_param1="minor1")
+        assert amc.custom_objects == {"Test": 2, "minor_param1": "minor1"}
+        paddings = Paddings()
+        amc.set_custom_objects(Test=1, Padding2D=paddings)
+        assert amc.custom_objects == {"Test": 1, "Padding2D": paddings, "pad1": 34, "another_pad": True}
+
+
+class TestMyPaperModel:
+
+    @pytest.fixture
+    def mpm(self):
+        return MyPaperModel(window_history_size=6, window_lead_time=4, channels=9)
+
+    def test_init(self, mpm):
+        # check if loss number of loss functions fit to model outputs
+        #       same loss fkts. for all tails               or different fkts. per tail
+        if isinstance(mpm.model.output_shape, list):
+            assert (callable(mpm.loss) or (len(mpm.loss) == 1)) or (len(mpm.loss) == len(mpm.model.output_shape))
+        elif isinstance(mpm.model.output_shape, tuple):
+            assert callable(mpm.loss) or (len(mpm.loss) == 1)
+
+    def test_set_model(self, mpm):
+        assert isinstance(mpm.model, keras.Model)
+        assert mpm.model.layers[0].output_shape == (None, 7, 1, 9)
+        # check output dimensions
+        if isinstance(mpm.model.output_shape, tuple):
+            assert mpm.model.output_shape == (None, 4)
+        elif isinstance(mpm.model.output_shape, list):
+            for tail_shape in mpm.model.output_shape:
+                assert tail_shape == (None, 4)
+        else:
+            raise TypeError(f"Type of model.output_shape as to be a tuple (one tail)"
+                            f" or a list of tuples (multiple tails). Received: {type(mpm.model.output_shape)}")
+
+    def test_set_loss(self, mpm):
+        assert callable(mpm.loss) or (len(mpm.loss) > 0)
+
diff --git a/test/test_modules/test_experiment_setup.py b/test/test_modules/test_experiment_setup.py
index 894e4b552af4231ccc12fb85aaaebf5bbc23edf3..a3a83acf84e286d1f5da9b5caffa256fc0ca3327 100644
--- a/test/test_modules/test_experiment_setup.py
+++ b/test/test_modules/test_experiment_setup.py
@@ -85,12 +85,19 @@ class TestExperimentSetup:
         # train parameters
         assert data_store.get("start", "general.train") == "1997-01-01"
         assert data_store.get("end", "general.train") == "2007-12-31"
+        assert data_store.get("min_length", "general.train") == 90
         # validation parameters
         assert data_store.get("start", "general.val") == "2008-01-01"
         assert data_store.get("end", "general.val") == "2009-12-31"
+        assert data_store.get("min_length", "general.val") == 90
         # test parameters
         assert data_store.get("start", "general.test") == "2010-01-01"
         assert data_store.get("end", "general.test") == "2017-12-31"
+        assert data_store.get("min_length", "general.test") == 90
+        # train_val parameters
+        assert data_store.get("start", "general.train_val") == "1997-01-01"
+        assert data_store.get("end", "general.train_val") == "2009-12-31"
+        assert data_store.get("min_length", "general.train_val") == 180
         # use all stations on all data sets (train, val, test)
         assert data_store.get("use_all_stations_on_all_data_sets", "general") is True
 
@@ -104,7 +111,7 @@ class TestExperimentSetup:
                       interpolate_dim="int_dim", interpolate_method="cubic", limit_nan_fill=5, train_start="2000-01-01",
                       train_end="2000-01-02", val_start="2000-01-03", val_end="2000-01-04", test_start="2000-01-05",
                       test_end="2000-01-06", use_all_stations_on_all_data_sets=False, trainable=False,
-                      fraction_of_train=0.5, experiment_path=experiment_path, create_new_model=True)
+                      fraction_of_train=0.5, experiment_path=experiment_path, create_new_model=True, val_min_length=20)
         exp_setup = ExperimentSetup(**kwargs)
         data_store = exp_setup.data_store
         # experiment setup
@@ -139,12 +146,19 @@ class TestExperimentSetup:
         # train parameters
         assert data_store.get("start", "general.train") == "2000-01-01"
         assert data_store.get("end", "general.train") == "2000-01-02"
+        assert data_store.get("min_length", "general.train") == 90
         # validation parameters
         assert data_store.get("start", "general.val") == "2000-01-03"
         assert data_store.get("end", "general.val") == "2000-01-04"
+        assert data_store.get("min_length", "general.val") == 20
         # test parameters
         assert data_store.get("start", "general.test") == "2000-01-05"
         assert data_store.get("end", "general.test") == "2000-01-06"
+        assert data_store.get("min_length", "general.test") == 90
+        # train_val parameters
+        assert data_store.get("start", "general.train_val") == "2000-01-01"
+        assert data_store.get("end", "general.train_val") == "2000-01-04"
+        assert data_store.get("min_length", "general.train_val") == 110
         # use all stations on all data sets (train, val, test)
         assert data_store.get("use_all_stations_on_all_data_sets", "general.test") is False
 
diff --git a/test/test_modules/test_model_setup.py b/test/test_modules/test_model_setup.py
index ade35a244601d138d22af6305e67b5aeae964680..9ff7494ff0540c9c96c1343b4f44fece08bfe4ce 100644
--- a/test/test_modules/test_model_setup.py
+++ b/test/test_modules/test_model_setup.py
@@ -4,6 +4,7 @@ import pytest
 
 from src.data_handling.data_generator import DataGenerator
 from src.datastore import EmptyScope
+from src.model_modules.keras_extensions import CallbackHandler
 from src.model_modules.model_class import AbstractModelClass
 from src.run_modules.model_setup import ModelSetup
 from src.run_modules.run_environment import RunEnvironment
@@ -61,6 +62,18 @@ class TestModelSetup:
         setup.checkpoint_name = "TestName"
         setup._set_callbacks()
         assert "general.modeltest" in setup.data_store.search_name("callbacks")
+        callbacks = setup.data_store.get("callbacks", "general.modeltest")
+        assert len(callbacks.get_callbacks()) == 3
+
+    def test_set_callbacks_no_lr_decay(self, setup):
+        setup.data_store.set("lr_decay", None, "general.model")
+        assert "general.modeltest" not in setup.data_store.search_name("callbacks")
+        setup.checkpoint_name = "TestName"
+        setup._set_callbacks()
+        callbacks: CallbackHandler = setup.data_store.get("callbacks", "general.modeltest")
+        assert len(callbacks.get_callbacks()) == 2
+        with pytest.raises(IndexError):
+            callbacks.get_callback_by_name("lr_decay")
 
     def test_get_model_settings(self, setup_with_model):
         with pytest.raises(EmptyScope):
@@ -73,7 +86,7 @@ class TestModelSetup:
         setup_with_gen.build_model()
         assert isinstance(setup_with_gen.model, AbstractModelClass)
         expected = {"window_history_size", "window_lead_time", "channels", "dropout_rate", "regularizer", "initial_lr",
-                    "optimizer", "lr_decay", "epochs", "batch_size", "activation"}
+                    "optimizer", "epochs", "batch_size", "activation"}
         assert expected <= self.current_scope_as_set(setup_with_gen)
 
     def test_set_channels(self, setup_with_gen_tiny):
diff --git a/test/test_modules/test_pre_processing.py b/test/test_modules/test_pre_processing.py
index d9c7ba6ffb75ca09a74fec6016e90a649a73f66f..b29ed1e21480a869e4c118332c18b6edd8ac23a5 100644
--- a/test/test_modules/test_pre_processing.py
+++ b/test/test_modules/test_pre_processing.py
@@ -36,6 +36,7 @@ class TestPreProcessing:
     def test_init(self, caplog):
         ExperimentSetup(parser_args={}, stations=['DEBW107', 'DEBY081', 'DEBW013', 'DEBW076', 'DEBW087'],
                         statistics_per_var={'o3': 'dma8eu', 'temp': 'maximum'})
+        caplog.clear()
         caplog.set_level(logging.INFO)
         with PreProcessing():
             assert caplog.record_tuples[0] == ('root', 20, 'PreProcessing started')
@@ -54,7 +55,8 @@ class TestPreProcessing:
         assert obj_with_exp_setup.data_store.search_name("generator") == []
         obj_with_exp_setup.split_train_val_test()
         data_store = obj_with_exp_setup.data_store
-        expected_params = ["generator", "start", "end", "stations", "permute_data"]
+        expected_params = ["generator", "start", "end", "stations", "permute_data", "min_length", "extreme_values",
+                           "extremes_on_right_tail_only", "upsampling"]
         assert data_store.search_scope("general.train") == sorted(expected_params)
         assert data_store.search_name("generator") == sorted(["general.train", "general.val", "general.test",
                                                               "general.train_val"])