From 16d8ee512b8fcee76d7d8e38acbd258fd4d32f6e Mon Sep 17 00:00:00 2001
From: lukas leufen <l.leufen@fz-juelich.de>
Date: Thu, 23 Apr 2020 16:53:18 +0200
Subject: [PATCH] moved helpers into new location, this is a commit only on the
 already finished doc strings, tests fail potentially fail with this part-wise
 commit

---
 src/configuration/__init__.py                 |   2 +
 src/configuration/path_config.py              |  70 ++++
 src/data_handling/bootstraps.py               |   2 +-
 src/data_handling/data_generator.py           |   2 +-
 src/data_handling/data_preparation.py         |   3 +-
 src/helpers.py                                | 306 --------------
 src/helpers/__init__.py                       |   6 +
 src/helpers/helpers.py                        |  92 ++++
 src/helpers/logger.py                         |  70 ++++
 src/helpers/testing.py                        |  79 ++++
 src/helpers/time_tracking.py                  | 126 ++++++
 src/join.py                                   |  12 +-
 src/join_settings.py                          |  11 -
 src/plotting/postprocessing_plotting.py       |   2 +-
 src/run_modules/experiment_setup.py           |  22 +-
 src/run_modules/model_setup.py                |   7 +-
 src/run_modules/pre_processing.py             |  24 +-
 src/run_modules/run_environment.py            |   2 +-
 test/test_configuration/test_init.py          |   0
 test/test_configuration/test_path_config.py   | 115 +++++
 test/test_data_handling/test_bootstraps.py    |  17 +-
 .../test_data_distributor.py                  |   6 +-
 .../test_data_handling/test_data_generator.py |  15 +-
 .../test_data_preparation.py                  |  21 +-
 test/test_datastore.py                        |  23 +-
 test/test_helpers.py                          | 393 ------------------
 test/test_helpers/test_helpers.py             | 265 ++++++++++++
 test/test_join.py                             |   6 +-
 .../test_advanced_paddings.py                 |  21 +-
 .../test_inception_model.py                   |   4 +-
 .../test_keras_extensions.py                  |  14 +-
 test/test_model_modules/test_linear_model.py  |   4 -
 test/test_model_modules/test_loss.py          |  17 +
 test/test_model_modules/test_model_class.py   |   3 +-
 test/test_modules/test_experiment_setup.py    |   3 +-
 test/test_modules/test_model_setup.py         |   1 -
 test/test_modules/test_training.py            |   8 +-
 .../test_plotting/test_training_monitoring.py |   1 -
 test/test_statistics.py                       |   2 +-
 39 files changed, 960 insertions(+), 817 deletions(-)
 create mode 100644 src/configuration/__init__.py
 create mode 100644 src/configuration/path_config.py
 delete mode 100644 src/helpers.py
 create mode 100644 src/helpers/__init__.py
 create mode 100644 src/helpers/helpers.py
 create mode 100644 src/helpers/logger.py
 create mode 100644 src/helpers/testing.py
 create mode 100644 src/helpers/time_tracking.py
 delete mode 100644 src/join_settings.py
 create mode 100644 test/test_configuration/test_init.py
 create mode 100644 test/test_configuration/test_path_config.py
 delete mode 100644 test/test_helpers.py
 create mode 100644 test/test_helpers/test_helpers.py
 create mode 100644 test/test_model_modules/test_loss.py

diff --git a/src/configuration/__init__.py b/src/configuration/__init__.py
new file mode 100644
index 00000000..d48bf3ac
--- /dev/null
+++ b/src/configuration/__init__.py
@@ -0,0 +1,2 @@
+
+from .path_config import ROOT_PATH, prepare_host, set_experiment_name, set_bootstrap_path, check_path_and_create
\ No newline at end of file
diff --git a/src/configuration/path_config.py b/src/configuration/path_config.py
new file mode 100644
index 00000000..6b838472
--- /dev/null
+++ b/src/configuration/path_config.py
@@ -0,0 +1,70 @@
+import logging
+import os
+import re
+import socket
+
+ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
+
+
+def prepare_host(create_new=True, sampling="daily"):
+    hostname = socket.gethostname()
+    runner_regex = re.compile(r"runner-.*-project-2411-concurrent-\d+")
+    try:
+        user = os.getlogin()
+    except OSError:
+        user = "default"
+    if hostname == "ZAM144":
+        path = f"/home/{user}/Data/toar_{sampling}/"
+    elif hostname == "zam347":
+        path = f"/home/{user}/Data/toar_{sampling}/"
+    elif hostname == "linux-aa9b":
+        path = f"/home/{user}/machinelearningtools/data/toar_{sampling}/"
+    elif (len(hostname) > 2) and (hostname[:2] == "jr"):
+        path = f"/p/project/cjjsc42/{user}/DATA/toar_{sampling}/"
+    elif (len(hostname) > 2) and (hostname[:2] == "jw"):
+        path = f"/p/home/jusers/{user}/juwels/intelliaq/DATA/toar_{sampling}/"
+    elif runner_regex.match(hostname) is not None:
+        path = f"/home/{user}/machinelearningtools/data/toar_{sampling}/"
+    else:
+        raise OSError(f"unknown host '{hostname}'")
+    if not os.path.exists(path):
+        try:
+            if create_new:
+                check_path_and_create(path)
+                return path
+            else:
+                raise PermissionError
+        except PermissionError:
+            raise NotADirectoryError(f"path '{path}' does not exist for host '{hostname}'.")
+    else:
+        logging.debug(f"set path to: {path}")
+        return path
+
+
+def set_experiment_name(experiment_date=None, experiment_path=None, sampling=None):
+    if experiment_date is None:
+        experiment_name = "TestExperiment"
+    else:
+        experiment_name = f"{experiment_date}_network"
+    if sampling == "hourly":
+        experiment_name += f"_{sampling}"
+    if experiment_path is None:
+        experiment_path = os.path.abspath(os.path.join(ROOT_PATH, experiment_name))
+    else:
+        experiment_path = os.path.join(os.path.abspath(experiment_path), experiment_name)
+    return experiment_name, experiment_path
+
+
+def set_bootstrap_path(bootstrap_path, data_path, sampling):
+    if bootstrap_path is None:
+        bootstrap_path = os.path.join(data_path, "..", f"bootstrap_{sampling}")
+    check_path_and_create(bootstrap_path)
+    return bootstrap_path
+
+
+def check_path_and_create(path):
+    try:
+        os.makedirs(path)
+        logging.debug(f"Created path: {path}")
+    except FileExistsError:
+        logging.debug(f"Path already exists: {path}")
\ No newline at end of file
diff --git a/src/data_handling/bootstraps.py b/src/data_handling/bootstraps.py
index bf125d87..f5077590 100644
--- a/src/data_handling/bootstraps.py
+++ b/src/data_handling/bootstraps.py
@@ -51,7 +51,7 @@ class BootStrapGenerator(keras.utils.Sequence):
         self.number_of_boots = number_of_boots
         self.variables = variables
         self.history_orig = history
-        self.history = history.sel(variables=helpers.list_pop(self.variables, shuffled_variable))
+        self.history = history.sel(variables=helpers.remove_items(self.variables, shuffled_variable))
         self.shuffled = shuffled.sel(variables=shuffled_variable)
 
     def __len__(self) -> int:
diff --git a/src/data_handling/data_generator.py b/src/data_handling/data_generator.py
index b2c2549b..de2bb39f 100644
--- a/src/data_handling/data_generator.py
+++ b/src/data_handling/data_generator.py
@@ -290,7 +290,7 @@ class DataGenerator(keras.utils.Sequence):
             data = DataPrep(self.data_path, self.network, station, self.variables, station_type=self.station_type,
                             **self.kwargs)
             if self.transformation is not None:
-                data.transform("datetime", **helpers.dict_pop(self.transformation, "scope"))
+                data.transform("datetime", **helpers.remove_items(self.transformation, "scope"))
             data.interpolate(self.interpolate_dim, method=self.interpolate_method, limit=self.limit_nan_fill)
             data.make_history_window(self.target_dim, self.window_history_size, self.interpolate_dim)
             data.make_labels(self.target_dim, self.target_var, self.interpolate_dim, self.window_lead_time)
diff --git a/src/data_handling/data_preparation.py b/src/data_handling/data_preparation.py
index eccb66b3..2c83316b 100644
--- a/src/data_handling/data_preparation.py
+++ b/src/data_handling/data_preparation.py
@@ -13,6 +13,7 @@ import numpy as np
 import pandas as pd
 import xarray as xr
 
+from src.configuration import check_path_and_create
 from src import join, helpers
 from src import statistics
 
@@ -88,7 +89,7 @@ class DataPrep(object):
         cases, downloaded data is only stored locally if store_data_locally is not disabled. If this parameter is not
         set, it is assumed, that data should be saved locally.
         """
-        helpers.check_path_and_create(self.path)
+        check_path_and_create(self.path)
         file_name = self._set_file_name()
         meta_file = self._set_meta_file_name()
         if self.kwargs.get('overwrite_local_data', False):
diff --git a/src/helpers.py b/src/helpers.py
deleted file mode 100644
index be736143..00000000
--- a/src/helpers.py
+++ /dev/null
@@ -1,306 +0,0 @@
-import re
-
-__author__ = 'Lukas Leufen, Felix Kleinert'
-__date__ = '2019-10-21'
-
-
-import datetime as dt
-from functools import wraps
-import logging
-import math
-import os
-import socket
-import time
-
-import keras.backend as K
-import xarray as xr
-
-from typing import Dict, Callable, Pattern, Union
-
-
-def to_list(arg):
-    if not isinstance(arg, list):
-        arg = [arg]
-    return arg
-
-
-def check_path_and_create(path):
-    try:
-        os.makedirs(path)
-        logging.debug(f"Created path: {path}")
-    except FileExistsError:
-        logging.debug(f"Path already exists: {path}")
-
-
-def l_p_loss(power: int):
-    """
-    Calculate the L<p> loss for given power p. L1 (p=1) is equal to mean absolute error (MAE), L2 (p=2) is to mean
-    squared error (MSE), ...
-    :param power: set the power of the error calculus
-    :return: loss for given power
-    """
-    def loss(y_true, y_pred):
-        return K.mean(K.pow(K.abs(y_pred - y_true), power), axis=-1)
-    return loss
-
-
-class TimeTrackingWrapper:
-
-    def __init__(self, func):
-        wraps(func)(self)
-
-    def __call__(self, *args, **kwargs):
-        with TimeTracking(name=self.__wrapped__.__name__):
-            return self.__wrapped__(*args, **kwargs)
-
-
-class TimeTracking(object):
-    """
-    Track time to measure execution time. Time tracking automatically starts on initialisation and ends by calling stop
-    method. Duration can always be shown by printing the time tracking object or calling get_current_duration.
-    """
-
-    def __init__(self, start=True, name="undefined job"):
-        self.start = None
-        self.end = None
-        self._name = name
-        if start:
-            self._start()
-
-    def _start(self):
-        self.start = time.time()
-        self.end = None
-
-    def _end(self):
-        self.end = time.time()
-
-    def _duration(self):
-        if self.end:
-            return self.end - self.start
-        else:
-            return time.time() - self.start
-
-    def __repr__(self):
-        # return f"{round(self._duration(), 2)}s"
-        return f"{dt.timedelta(seconds=math.ceil(self._duration()))} (hh:mm:ss)"
-
-    def run(self):
-        self._start()
-
-    def stop(self, get_duration=False):
-        if self.end is None:
-            self._end()
-        else:
-            msg = f"Time was already stopped {time.time() - self.end}s ago."
-            raise AssertionError(msg)
-        if get_duration:
-            return self.duration()
-
-    def duration(self):
-        return self._duration()
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        self.stop()
-        logging.info(f"{self._name} finished after {self}")
-
-
-def prepare_host(create_new=True, sampling="daily"):
-    hostname = socket.gethostname()
-    runner_regex = re.compile(r"runner-.*-project-2411-concurrent-\d+")
-    try:
-        user = os.getlogin()
-    except OSError:
-        user = "default"
-    if hostname == "ZAM144":
-        path = f"/home/{user}/Data/toar_{sampling}/"
-    elif hostname == "zam347":
-        path = f"/home/{user}/Data/toar_{sampling}/"
-    elif hostname == "linux-aa9b":
-        path = f"/home/{user}/machinelearningtools/data/toar_{sampling}/"
-    elif (len(hostname) > 2) and (hostname[:2] == "jr"):
-        path = f"/p/project/cjjsc42/{user}/DATA/toar_{sampling}/"
-    elif (len(hostname) > 2) and (hostname[:2] == "jw"):
-        path = f"/p/home/jusers/{user}/juwels/intelliaq/DATA/toar_{sampling}/"
-    elif runner_regex.match(hostname) is not None:
-        path = f"/home/{user}/machinelearningtools/data/toar_{sampling}/"
-    else:
-        raise OSError(f"unknown host '{hostname}'")
-    if not os.path.exists(path):
-        try:
-            if create_new:
-                check_path_and_create(path)
-                return path
-            else:
-                raise PermissionError
-        except PermissionError:
-            raise NotADirectoryError(f"path '{path}' does not exist for host '{hostname}'.")
-    else:
-        logging.debug(f"set path to: {path}")
-        return path
-
-
-def set_experiment_name(experiment_date=None, experiment_path=None, sampling=None):
-
-    if experiment_date is None:
-        experiment_name = "TestExperiment"
-    else:
-        experiment_name = f"{experiment_date}_network"
-    if sampling == "hourly":
-        experiment_name += f"_{sampling}"
-    if experiment_path is None:
-        experiment_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", experiment_name))
-    else:
-        experiment_path = os.path.join(os.path.abspath(experiment_path), experiment_name)
-    return experiment_name, experiment_path
-
-
-def set_bootstrap_path(bootstrap_path, data_path, sampling):
-    if bootstrap_path is None:
-        bootstrap_path = os.path.join(data_path, "..", f"bootstrap_{sampling}")
-    check_path_and_create(bootstrap_path)
-    return bootstrap_path
-
-
-class PyTestRegex:
-    """Assert that a given string meets some expectations."""
-
-    def __init__(self, pattern: Union[str, Pattern], flags: int = 0):
-        self._regex = re.compile(pattern, flags)
-
-    def __eq__(self, actual: str) -> bool:
-        return bool(self._regex.match(actual))
-
-    def __repr__(self) -> str:
-        return self._regex.pattern
-
-
-class PyTestAllEqual:
-
-    def __init__(self, check_list):
-        self._list = check_list
-
-    def _check_all_equal(self):
-        equal = True
-        for b in self._list:
-            equal *= xr.testing.assert_equal(self._list[0], b) is None
-        return equal == 1
-
-    def is_true(self):
-        return self._check_all_equal()
-
-
-def xr_all_equal(check_list):
-    equal = True
-    for b in check_list:
-        equal *= xr.testing.assert_equal(check_list[0], b) is None
-    return equal == 1
-
-
-def dict_to_xarray(d: Dict, coordinate_name: str) -> xr.DataArray:
-    """
-    Convert a dictionary of 2D-xarrays to single 3D-xarray. The name of new coordinate axis follows <coordinate_name>.
-    :param d: dictionary with 2D-xarrays
-    :param coordinate_name: name of the new created axis (2D -> 3D)
-    :return: combined xarray
-    """
-    xarray = None
-    for k, v in d.items():
-        if xarray is None:
-            xarray = v
-            xarray.coords[coordinate_name] = k
-        else:
-            tmp_xarray = v
-            tmp_xarray.coords[coordinate_name] = k
-            xarray = xr.concat([xarray, tmp_xarray], coordinate_name)
-    return xarray
-
-
-def float_round(number: float, decimals: int = 0, round_type: Callable = math.ceil) -> float:
-    """
-    Perform given rounding operation on number with the precision of decimals.
-    :param number: the number to round
-    :param decimals: numbers of decimals of the rounding operations (default 0 -> round to next integer value)
-    :param round_type: the actual rounding operation. Can be any callable function like math.ceil, math.floor or python
-        built-in round operation.
-    :return: rounded number with desired precision
-    """
-    multiplier = 10. ** decimals
-    return round_type(number * multiplier) / multiplier
-
-
-def list_pop(list_full: list, pop_items):
-    pop_items = to_list(pop_items)
-    if len(pop_items) > 1:
-        return [e for e in list_full if e not in pop_items]
-    else:
-        l_pop = list_full.copy()
-        try:
-            l_pop.remove(pop_items[0])
-        except ValueError:
-            pass
-        return l_pop
-
-
-def dict_pop(dict_orig: Dict, pop_keys):
-    pop_keys = to_list(pop_keys)
-    return {k: v for k, v in dict_orig.items() if k not in pop_keys}
-
-
-class Logger:
-    """
-    Basic logger class to unify all logging outputs. Logs are saved in local file and returned to std output. In default
-    settings, logging level of file logger is DEBUG, logging level of stream logger is INFO. Class must be imported
-    and initialised in starting script, all subscripts should log with logging.info(), debug, ...
-    """
-
-    def __init__(self, log_path=None, level_file=logging.DEBUG, level_stream=logging.INFO):
-
-        # define shared logger format
-        self.formatter = '%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]'
-
-        # set log path
-        self.log_file = self.setup_logging_path(log_path)
-        # set root logger as file handler
-        logging.basicConfig(level=level_file,
-                            format=self.formatter,
-                            filename=self.log_file,
-                            filemode='a')
-        # add stream handler to the root logger
-        logging.getLogger('').addHandler(self.logger_console(level_stream))
-        # print logger path
-        logging.info(f"File logger: {self.log_file}")
-
-    @staticmethod
-    def setup_logging_path(path: str = None):
-        """
-        Check if given path exists and creates if not. If path is None, use path from main. The logging file is named
-        like `logging_<runtime>.log` where runtime=`%Y-%m-%d_%H-%M-%S` of current run.
-        :param path: path to logfile
-        :return: path of logfile
-        """
-        if not path:  # set default path
-            path = os.path.join(os.path.dirname(__file__), "..", "logging")
-        if not os.path.exists(path):
-            os.makedirs(path)
-        runtime = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
-        log_file = os.path.join(path, f'logging_{runtime}.log')
-        return log_file
-
-    def logger_console(self, level: int):
-        """
-        Defines a stream handler which writes messages of given level or higher to std out
-        :param level: logging level as integer, e.g. logging.DEBUG or 10
-        :return: defines stream handler
-        """
-        # define Handler
-        console = logging.StreamHandler()
-        # set level of Handler
-        console.setLevel(level)
-        # set a format which is simpler for console use
-        formatter = logging.Formatter(self.formatter)
-        # tell the handler to use this format
-        console.setFormatter(formatter)
-        return console
diff --git a/src/helpers/__init__.py b/src/helpers/__init__.py
new file mode 100644
index 00000000..4a428fd2
--- /dev/null
+++ b/src/helpers/__init__.py
@@ -0,0 +1,6 @@
+"""Collection of different supporting functions and classes."""
+
+from .testing import PyTestRegex, PyTestAllEqual, xr_all_equal
+from .time_tracking import TimeTracking, TimeTrackingWrapper
+from .logger import Logger
+from .helpers import remove_items, float_round, dict_to_xarray, to_list
diff --git a/src/helpers/helpers.py b/src/helpers/helpers.py
new file mode 100644
index 00000000..a6023bd8
--- /dev/null
+++ b/src/helpers/helpers.py
@@ -0,0 +1,92 @@
+"""Collection of different help functions."""
+__author__ = 'Lukas Leufen, Felix Kleinert'
+__date__ = '2019-10-21'
+
+import inspect
+import math
+
+import xarray as xr
+
+from typing import Dict, Callable, Union, List, Any
+
+
+def to_list(obj: Any) -> List:
+    """
+    Transform given object to list if obj is not already a list.
+
+    :param obj: object to transform to list
+
+    :return: list containing obj, or obj itself (if obj was already a list)
+    """
+    if not isinstance(obj, list):
+        obj = [obj]
+    return obj
+
+
+def dict_to_xarray(d: Dict, coordinate_name: str) -> xr.DataArray:
+    """
+    Convert a dictionary of 2D-xarrays to single 3D-xarray. The name of new coordinate axis follows <coordinate_name>.
+
+    :param d: dictionary with 2D-xarrays
+    :param coordinate_name: name of the new created axis (2D -> 3D)
+    :return: combined xarray
+    """
+    xarray = None
+    for k, v in d.items():
+        if xarray is None:
+            xarray = v
+            xarray.coords[coordinate_name] = k
+        else:
+            tmp_xarray = v
+            tmp_xarray.coords[coordinate_name] = k
+            xarray = xr.concat([xarray, tmp_xarray], coordinate_name)
+    return xarray
+
+
+def float_round(number: float, decimals: int = 0, round_type: Callable = math.ceil) -> float:
+    """
+    Perform given rounding operation on number with the precision of decimals.
+
+    :param number: the number to round
+    :param decimals: numbers of decimals of the rounding operations (default 0 -> round to next integer value)
+    :param round_type: the actual rounding operation. Can be any callable function like math.ceil, math.floor or python
+        built-in round operation.
+    :return: rounded number with desired precision
+    """
+    multiplier = 10. ** decimals
+    return round_type(number * multiplier) / multiplier
+
+
+def remove_items(obj: Union[List, Dict], items: Any):
+    """
+    Remove item(s) from either list or dictionary.
+
+    :param obj: object to remove items from (either dictionary or list)
+    :param items: elements to remove from obj. Can either be a list or single entry / key
+
+    :return: object without items
+    """
+
+    def remove_from_list(list_obj, item_list):
+        """Remove implementation for lists."""
+        if len(items) > 1:
+            return [e for e in list_obj if e not in item_list]
+        else:
+            list_obj = list_obj.copy()
+            try:
+                list_obj.remove(item_list[0])
+            except ValueError:
+                pass
+            return list_obj
+
+    def remove_from_dict(dict_obj, key_list):
+        """Remove implementation for dictionaries."""
+        return {k: v for k, v in dict_obj.items() if k not in key_list}
+
+    items = to_list(items)
+    if isinstance(obj, list):
+        return remove_from_list(obj, items)
+    elif isinstance(obj, dict):
+        return remove_from_dict(obj, items)
+    else:
+        raise TypeError(f"{inspect.stack()[0][3]} does not support type {type(obj)}.")
diff --git a/src/helpers/logger.py b/src/helpers/logger.py
new file mode 100644
index 00000000..51ecde41
--- /dev/null
+++ b/src/helpers/logger.py
@@ -0,0 +1,70 @@
+"""Logger class."""
+import logging
+import os
+import time
+from ..configuration import ROOT_PATH
+
+
+class Logger:
+    """
+    Basic logger class to unify all logging outputs.
+
+    Logs are saved in local file and returned to std output. In default settings, logging level of file logger is DEBUG,
+    logging level of stream logger is INFO. Class must be imported and initialised in starting script, all subscripts
+    should log with logging.info(), debug, ...
+    """
+
+    def __init__(self, log_path=None, level_file=logging.DEBUG, level_stream=logging.INFO):
+        """Construct logger."""
+        # define shared logger format
+        self.formatter = '%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]'
+
+        # set log path
+        self.log_file = self.setup_logging_path(log_path)
+        # set root logger as file handler
+        logging.basicConfig(level=level_file,
+                            format=self.formatter,
+                            filename=self.log_file,
+                            filemode='a')
+        # add stream handler to the root logger
+        logging.getLogger('').addHandler(self.logger_console(level_stream))
+        # print logger path
+        logging.info(f"File logger: {self.log_file}")
+
+    @staticmethod
+    def setup_logging_path(path: str = None):
+        """
+        Check if given path exists and creates if not.
+
+        If path is None, use path from main. The logging file is named like `logging_<runtime>.log` where
+        runtime=`%Y-%m-%d_%H-%M-%S` of current run.
+
+        :param path: path to logfile
+
+        :return: path of logfile
+        """
+        if not path:  # set default path
+            path = os.path.join(ROOT_PATH, "logging")
+        if not os.path.exists(path):
+            os.makedirs(path)
+        runtime = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
+        log_file = os.path.join(path, f'logging_{runtime}.log')
+        return log_file
+
+    def logger_console(self, level: int):
+        """
+        Define a stream handler which writes messages of given level or higher to std out.
+
+        :param level: logging level as integer, e.g. logging.DEBUG or 10
+
+        :return: defines stream handler
+        """
+        # define Handler
+        console = logging.StreamHandler()
+        # set level of Handler
+        console.setLevel(level)
+        # set a format which is simpler for console use
+        formatter = logging.Formatter(self.formatter)
+        # tell the handler to use this format
+        console.setFormatter(formatter)
+        return console
\ No newline at end of file
diff --git a/src/helpers/testing.py b/src/helpers/testing.py
new file mode 100644
index 00000000..3eea56bd
--- /dev/null
+++ b/src/helpers/testing.py
@@ -0,0 +1,79 @@
+"""Helper functions that are used to simplify testing."""
+import re
+from typing import Union, Pattern, List
+
+import xarray as xr
+
+
+class PyTestRegex:
+    r"""
+    Assert that a given string meets some expectations.
+
+    Use like
+
+        >>> PyTestRegex(r"TestString\d+") == "TestString"
+        False
+        >>> PyTestRegex(r"TestString\d+") == "TestString2"
+        True
+
+
+    :param pattern: pattern or string to use for regular expresssion
+    :param flags: python re flags
+    """
+
+    def __init__(self, pattern: Union[str, Pattern], flags: int = 0):
+        """Construct PyTestRegex."""
+        self._regex = re.compile(pattern, flags)
+
+    def __eq__(self, actual: str) -> bool:
+        """Return whether regex matches given string actual or not."""
+        return bool(self._regex.match(actual))
+
+    def __repr__(self) -> str:
+        """Show regex pattern."""
+        return self._regex.pattern
+
+
+class PyTestAllEqual:
+    """
+    Check if all elements in list are the same.
+
+    :param check_list: list with elements to check
+    """
+
+    def __init__(self, check_list: List):
+        """Construct class."""
+        self._list = check_list
+
+    def _check_all_equal(self) -> bool:
+        """
+        Check if all elements are equal.
+
+        :return boolean if elements are equal
+        """
+        equal = True
+        for b in self._list:
+            equal *= xr.testing.assert_equal(self._list[0], b) is None
+        return bool(equal == 1)
+
+    def is_true(self) -> bool:
+        """
+        Start equality check.
+
+        :return: true if equality test is passed, false otherwise
+        """
+        return self._check_all_equal()
+
+
+def xr_all_equal(check_list: List) -> bool:
+    """
+    Check if all given elements (preferably xarray's) in list are equal.
+
+    :param check_list: list with elements to check
+
+    :return: boolean if all elements are the same or not
+    """
+    equal = True
+    for b in check_list:
+        equal *= xr.testing.assert_equal(check_list[0], b) is None
+    return equal == 1
\ No newline at end of file
diff --git a/src/helpers/time_tracking.py b/src/helpers/time_tracking.py
new file mode 100644
index 00000000..3a4dad59
--- /dev/null
+++ b/src/helpers/time_tracking.py
@@ -0,0 +1,126 @@
+"""Track time either as decorator or explicit."""
+import datetime as dt
+import logging
+import math
+import time
+from functools import wraps
+from typing import Optional
+
+
+class TimeTrackingWrapper:
+    r"""
+    Wrapper implementation of TimeTracking class.
+
+    Use this implementation easily as decorator for functions, classes and class methods. Implement a custom function
+    and decorate it for automatic time measure.
+
+    .. code-block:: python
+
+        @TimeTrackingWrapper
+        def sleeper():
+            print("start")
+            time.sleep(1)
+            print("end")
+
+        >>> sleeper()
+        start
+        end
+        INFO: foo finished after 00:00:01 (hh:mm:ss)
+
+    """
+
+    def __init__(self, func):
+        """Construct."""
+        wraps(func)(self)
+
+    def __call__(self, *args, **kwargs):
+        """Start time tracking."""
+        with TimeTracking(name=self.__wrapped__.__name__):
+            return self.__wrapped__(*args, **kwargs)
+
+
+class TimeTracking(object):
+    """
+    Track time to measure execution time.
+
+    Time tracking automatically starts on initialisation and ends by calling stop method. Duration can always be shown
+    by printing the time tracking object or calling get_current_duration. It is possible to start and stop time tracking
+    by hand like
+
+    .. code-block:: python
+
+        time = TimeTracking(start=True)  # start=True is default and not required to set
+        do_something()
+        time.stop(get_duration=True)
+
+    A more comfortable way is to use TimeTracking in a with statement like:
+
+    .. code-block:: python
+
+        with TimeTracking():
+            do_something()
+
+    The only disadvantage of the latter implementation is, that the duration is logged but not returned.
+    """
+
+    def __init__(self, start=True, name="undefined job"):
+        """Construct time tracking and start if enabled."""
+        self.start = None
+        self.end = None
+        self._name = name
+        if start:
+            self._start()
+
+    def _start(self) -> None:
+        """Start time tracking."""
+        self.start = time.time()
+        self.end = None
+
+    def _end(self) -> None:
+        """Stop time tracking."""
+        self.end = time.time()
+
+    def _duration(self) -> float:
+        """Get duration in seconds."""
+        if self.end:
+            return self.end - self.start
+        else:
+            return time.time() - self.start
+
+    def __repr__(self) -> str:
+        """Display current passed time."""
+        return f"{dt.timedelta(seconds=math.ceil(self._duration()))} (hh:mm:ss)"
+
+    def run(self) -> None:
+        """Start time tracking."""
+        self._start()
+
+    def stop(self, get_duration=False) -> Optional[float]:
+        """
+        Stop time tracking.
+
+        Will raise an error if time tracking was already stopped.
+        :param get_duration: return passed time if enabled.
+
+        :return: duration if enabled or None
+        """
+        if self.end is None:
+            self._end()
+        else:
+            msg = f"Time was already stopped {time.time() - self.end}s ago."
+            raise AssertionError(msg)
+        if get_duration:
+            return self.duration()
+
+    def duration(self) -> float:
+        """Return duration in seconds."""
+        return self._duration()
+
+    def __enter__(self):
+        """Context manager."""
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb) -> None:
+        """Stop time tracking on exit and log info about passed time."""
+        self.stop()
+        logging.info(f"{self._name} finished after {self}")
\ No newline at end of file
diff --git a/src/join.py b/src/join.py
index 351060f7..90b3bfc4 100644
--- a/src/join.py
+++ b/src/join.py
@@ -1,7 +1,6 @@
 __author__ = 'Felix Kleinert, Lukas Leufen'
 __date__ = '2019-10-16'
 
-
 import datetime as dt
 import logging
 from typing import Iterator, Union, List, Dict
@@ -10,7 +9,7 @@ import pandas as pd
 import requests
 
 from src import helpers
-from src.join_settings import join_settings
+from src.configuration.join_settings import join_settings
 
 # join_url_base = 'https://join.fz-juelich.de/services/rest/surfacedata/'
 str_or_none = Union[str, None]
@@ -25,9 +24,9 @@ class EmptyQueryResult(Exception):
 
 def download_join(station_name: Union[str, List[str]], stat_var: dict, station_type: str = None,
                   network_name: str = None, sampling: str = "daily") -> [pd.DataFrame, pd.DataFrame]:
-
     """
     read data from JOIN/TOAR
+
     :param station_name: Station name e.g. DEBY122
     :param stat_var: key as variable like 'O3', values as statistics on keys like 'mean'
     :param station_type: set the station type like "traffic" or "background", can be none
@@ -92,6 +91,7 @@ def correct_data_format(data):
     Transform to the standard data format. For some cases (e.g. hourly data), the data is returned as list instead of
     a dictionary with keys datetime, values and metadata. This functions addresses this issue and transforms the data
     into the dictionary version.
+
     :param data: data in hourly format
     :return: the same data but formatted to fit with aggregated format
     """
@@ -108,6 +108,7 @@ def get_data(opts: Dict, headers: Dict) -> Union[Dict, List]:
     """
     Download join data using requests framework. Data is returned as json like structure. Depending on the response
     structure, this can lead to a list or dictionary.
+
     :param opts: options to create the request url
     :param headers: additional headers information like authorization, can be empty
     :return: requested data (either as list or dictionary)
@@ -121,6 +122,7 @@ def load_series_information(station_name: List[str], station_type: str_or_none,
                             join_url_base: str, headers: Dict) -> Dict:
     """
     List all series ids that are available for given station id and network name.
+
     :param station_name: Station name e.g. DEBW107
     :param station_type: station type like "traffic" or "background"
     :param network_name: measurement network of the station like "UBA" or "AIRBASE"
@@ -139,6 +141,7 @@ def load_series_information(station_name: List[str], station_type: str_or_none,
 def _save_to_pandas(df: Union[pd.DataFrame, None], data: dict, stat: str, var: str) -> pd.DataFrame:
     """
     Save given data in data frame. If given data frame is not empty, the data is appened as new column.
+
     :param df: data frame to append the new data, can be none
     :param data: new data to append or format as data frame containing the keys 'datetime' and '<stat>'
     :param stat: extracted statistic to get values from data (e.g. 'mean', 'dma8eu')
@@ -161,6 +164,7 @@ def _correct_stat_name(stat: str) -> str:
     """
     Map given statistic name to new namespace defined by mapping dict. Return given name stat if not element of mapping
     namespace.
+
     :param stat: namespace from JOIN server
     :return: stat mapped to local namespace
     """
@@ -171,6 +175,7 @@ def _correct_stat_name(stat: str) -> str:
 def _lower_list(args: List[str]) -> Iterator[str]:
     """
     lower all elements of given list
+
     :param args: list with string entries to lower
     :return: iterator that lowers all list entries
     """
@@ -181,6 +186,7 @@ def _lower_list(args: List[str]) -> Iterator[str]:
 def create_url(base: str, service: str, **kwargs: Union[str, int, float, None]) -> str:
     """
     create a request url with given base url, service type and arbitrarily many additional keyword arguments
+
     :param base: basic url of the rest service
     :param service: service type, e.g. series, stats
     :param kwargs: keyword pairs for optional request specifications, e.g. 'statistics=maximum'
diff --git a/src/join_settings.py b/src/join_settings.py
deleted file mode 100644
index 365e8f39..00000000
--- a/src/join_settings.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-def join_settings(sampling="daily"):
-    if sampling == "daily":  # pragma: no branch
-        TOAR_SERVICE_URL = 'https://join.fz-juelich.de/services/rest/surfacedata/'
-        headers = {}
-    elif sampling == "hourly":
-        TOAR_SERVICE_URL = 'https://join.fz-juelich.de/services/rest/surfacedata/'
-        headers = {}
-    else:
-        raise NameError(f"Given sampling {sampling} is not supported, choose from either daily or hourly sampling.")
-    return TOAR_SERVICE_URL, headers
diff --git a/src/plotting/postprocessing_plotting.py b/src/plotting/postprocessing_plotting.py
index 14e3074a..92ff9f2b 100644
--- a/src/plotting/postprocessing_plotting.py
+++ b/src/plotting/postprocessing_plotting.py
@@ -19,8 +19,8 @@ from matplotlib.backends.backend_pdf import PdfPages
 import matplotlib.patches as mpatches
 
 from src import helpers
-from src.helpers import TimeTracking, TimeTrackingWrapper
 from src.data_handling.data_generator import DataGenerator
+from src.helpers import TimeTrackingWrapper
 
 logging.getLogger('matplotlib').setLevel(logging.WARNING)
 
diff --git a/src/run_modules/experiment_setup.py b/src/run_modules/experiment_setup.py
index 150399cb..02cc9cd0 100644
--- a/src/run_modules/experiment_setup.py
+++ b/src/run_modules/experiment_setup.py
@@ -1,12 +1,12 @@
 __author__ = "Lukas Leufen, Felix Kleinert"
 __date__ = '2019-11-15'
 
-
 import argparse
 import logging
 import os
 from typing import Union, Dict, Any
 
+import src.configuration.path_config
 from src import helpers
 from src.run_modules.run_environment import RunEnvironment
 
@@ -32,7 +32,8 @@ class ExperimentSetup(RunEnvironment):
     """
 
     def __init__(self, parser_args=None, stations=None, network=None, station_type=None, variables=None,
-                 statistics_per_var=None, start=None, end=None, window_history_size=None, target_var="o3", target_dim=None,
+                 statistics_per_var=None, start=None, end=None, window_history_size=None, target_var="o3",
+                 target_dim=None,
                  window_lead_time=None, dimensions=None, interpolate_dim=None, interpolate_method=None,
                  limit_nan_fill=None, train_start=None, train_end=None, val_start=None, val_end=None, test_start=None,
                  test_end=None, use_all_stations_on_all_data_sets=True, trainable=None, fraction_of_train=None,
@@ -46,12 +47,12 @@ class ExperimentSetup(RunEnvironment):
         super().__init__()
 
         # experiment setup
-        self._set_param("data_path", helpers.prepare_host(sampling=sampling))
+        self._set_param("data_path", src.configuration.path_config.prepare_host(sampling=sampling))
         self._set_param("create_new_model", create_new_model, default=True)
         if self.data_store.get("create_new_model"):
             trainable = True
         data_path = self.data_store.get("data_path")
-        bootstrap_path = helpers.set_bootstrap_path(bootstrap_path, data_path, sampling)
+        bootstrap_path = src.configuration.path_config.set_bootstrap_path(bootstrap_path, data_path, sampling)
         self._set_param("bootstrap_path", bootstrap_path)
         self._set_param("trainable", trainable, default=True)
         self._set_param("fraction_of_training", fraction_of_train, default=0.8)
@@ -63,21 +64,21 @@ class ExperimentSetup(RunEnvironment):
 
         # set experiment name
         exp_date = self._get_parser_args(parser_args).get("experiment_date")
-        exp_name, exp_path = helpers.set_experiment_name(experiment_date=exp_date, experiment_path=experiment_path,
-                                                         sampling=sampling)
+        exp_name, exp_path = src.configuration.path_config.set_experiment_name(experiment_date=exp_date, experiment_path=experiment_path,
+                                                                               sampling=sampling)
         self._set_param("experiment_name", exp_name)
         self._set_param("experiment_path", exp_path)
-        helpers.check_path_and_create(self.data_store.get("experiment_path"))
+        src.configuration.path_config.check_path_and_create(self.data_store.get("experiment_path"))
 
         # set plot path
         default_plot_path = os.path.join(exp_path, "plots")
         self._set_param("plot_path", plot_path, default=default_plot_path)
-        helpers.check_path_and_create(self.data_store.get("plot_path"))
+        src.configuration.path_config.check_path_and_create(self.data_store.get("plot_path"))
 
         # set results path
         default_forecast_path = os.path.join(exp_path, "forecasts")
         self._set_param("forecast_path", forecast_path, default_forecast_path)
-        helpers.check_path_and_create(self.data_store.get("forecast_path"))
+        src.configuration.path_config.check_path_and_create(self.data_store.get("forecast_path"))
 
         # setup for data
         self._set_param("stations", stations, default=DEFAULT_STATIONS)
@@ -176,12 +177,11 @@ class ExperimentSetup(RunEnvironment):
         unused_vars = set(stat.keys()).difference(set(var).union(target_var))
         if len(unused_vars) > 0:
             logging.info(f"There are unused keys in statistics_per_var. Therefore remove keys: {unused_vars}")
-            stat_new = helpers.dict_pop(stat, list(unused_vars))
+            stat_new = helpers.remove_items(stat, list(unused_vars))
             self._set_param("statistics_per_var", stat_new)
 
 
 if __name__ == "__main__":
-
     formatter = '%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]'
     logging.basicConfig(format=formatter, level=logging.DEBUG)
 
diff --git a/src/run_modules/model_setup.py b/src/run_modules/model_setup.py
index c558b5fc..92357ab9 100644
--- a/src/run_modules/model_setup.py
+++ b/src/run_modules/model_setup.py
@@ -1,7 +1,6 @@
 __author__ = "Lukas Leufen, Felix Kleinert"
 __date__ = '2019-12-02'
 
-
 import logging
 import os
 
@@ -10,8 +9,8 @@ import tensorflow as tf
 
 from src.model_modules.keras_extensions import HistoryAdvanced, CallbackHandler
 # from src.model_modules.model_class import MyBranchedModel as MyModel
-# from src.model_modules.model_class import MyLittleModel as MyModel
-from src.model_modules.model_class import MyTowerModel as MyModel
+from src.model_modules.model_class import MyLittleModel as MyModel
+# from src.model_modules.model_class import MyTowerModel as MyModel
 # from src.model_modules.model_class import MyPaperModel as MyModel
 from src.run_modules.run_environment import RunEnvironment
 
@@ -96,7 +95,7 @@ class ModelSetup(RunEnvironment):
 
     def get_model_settings(self):
         model_settings = self.model.get_settings()
-        self.data_store.set_args_from_dict(model_settings, self.scope)
+        self.data_store.set_from_dict(model_settings, self.scope)
         self.model_name = self.model_name % self.data_store.get_default("model_name", self.scope, "my_model")
         self.data_store.set("model_name", self.model_name, self.scope)
 
diff --git a/src/run_modules/pre_processing.py b/src/run_modules/pre_processing.py
index 551ea599..ce9b8699 100644
--- a/src/run_modules/pre_processing.py
+++ b/src/run_modules/pre_processing.py
@@ -1,7 +1,6 @@
 __author__ = "Lukas Leufen, Felix Kleinert"
 __date__ = '2019-11-25'
 
-
 import logging
 import os
 from typing import Tuple, Dict, List
@@ -10,7 +9,8 @@ import numpy as np
 import pandas as pd
 
 from src.data_handling.data_generator import DataGenerator
-from src.helpers import TimeTracking, check_path_and_create
+from src.helpers import TimeTracking
+from src.configuration.path_config import check_path_and_create
 from src.join import EmptyQueryResult
 from src.run_modules.run_environment import RunEnvironment
 
@@ -21,7 +21,6 @@ DEFAULT_KWARGS_LIST = ["limit_nan_fill", "window_history_size", "window_lead_tim
 
 
 class PreProcessing(RunEnvironment):
-
     """
     Pre-process your data by using this class. It includes time tracking and uses the experiment setup to look for data
     and stores it if not already in local disk. Further, it provides this data as a generator and checks for valid
@@ -89,7 +88,7 @@ class PreProcessing(RunEnvironment):
         path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
         check_path_and_create(path)
         set_names = ["train", "val", "test"]
-        df = pd.DataFrame(columns=meta_data+set_names)
+        df = pd.DataFrame(columns=meta_data + set_names)
         for set_name in set_names:
             data: DataGenerator = self.data_store.get("generator", set_name)
             for station in data.stations:
@@ -102,12 +101,13 @@ class PreProcessing(RunEnvironment):
         df.sort_index(inplace=True)
         df = df.reindex(df.index.drop(["# Stations", "# Samples"]).to_list() + ["# Stations", "# Samples"], )
         df.index.name = 'stat. ID'
-        column_format = np.repeat('c', df.shape[1]+1)
+        column_format = np.repeat('c', df.shape[1] + 1)
         column_format[0] = 'l'
         column_format[-1] = 'r'
         column_format = ''.join(column_format.tolist())
         df.to_latex(os.path.join(path, "station_sample_size.tex"), na_rep='---', column_format=column_format)
-        df.to_markdown(open(os.path.join(path, "station_sample_size.md"), mode="w", encoding='utf-8'), tablefmt="github")
+        df.to_markdown(open(os.path.join(path, "station_sample_size.md"), mode="w", encoding='utf-8'),
+                       tablefmt="github")
         df.drop(meta_data, axis=1).to_latex(os.path.join(path, "station_sample_size_short.tex"), na_rep='---',
                                             column_format=column_format)
 
@@ -119,7 +119,8 @@ class PreProcessing(RunEnvironment):
         """
         fraction_of_training = self.data_store.get("fraction_of_training")
         stations = self.data_store.get("stations")
-        train_index, val_index, test_index, train_val_index = self.split_set_indices(len(stations), fraction_of_training)
+        train_index, val_index, test_index, train_val_index = self.split_set_indices(len(stations),
+                                                                                     fraction_of_training)
         subset_names = ["train", "val", "test", "train_val"]
         if subset_names[0] != "train":  # pragma: no cover
             raise AssertionError(f"Make sure, that the train subset is always at first execution position! Given subset"
@@ -134,6 +135,7 @@ class PreProcessing(RunEnvironment):
         (1-fraction) of total_length (fraction*len:end). Train and validation data therefore are made from fraction of
         total_length (0:fraction*len). Train and validation data is split by the factor 0.8 for train and 0.2 for
         validation. In addition, split_set_indices returns also the combination of training and validation subset.
+
         :param total_length: list with all objects to split
         :param fraction: ratio between test and union of train/val data
         :return: slices for each subset in the order: train, val, test, train_val
@@ -151,6 +153,7 @@ class PreProcessing(RunEnvironment):
         `generator`. Checks for all valid stations using the default (kw)args for given scope and creates the
         DataGenerator for all valid stations. Also sets all transformation information, if subset is training set. Make
         sure, that the train set is executed first, and all other subsets afterwards.
+
         :param index_list: list of all stations to use for the set. If attribute use_all_stations_on_all_data_sets=True,
             this list is ignored.
         :param set_name: name to load/save all information from/to data store.
@@ -172,10 +175,12 @@ class PreProcessing(RunEnvironment):
             self.data_store.set("transformation", data_set.transformation)
 
     @staticmethod
-    def check_valid_stations(args: Dict, kwargs: Dict, all_stations: List[str], load_tmp=True, save_tmp=True, name=None):
+    def check_valid_stations(args: Dict, kwargs: Dict, all_stations: List[str], load_tmp=True, save_tmp=True,
+                             name=None):
         """
         Check if all given stations in `all_stations` are valid. Valid means, that there is data available for the given
         time range (is included in `kwargs`). The shape and the loading time are logged in debug mode.
+
         :param args: Dictionary with required parameters for DataGenerator class (`data_path`, `network`, `stations`,
             `variables`, `interpolate_dim`, `target_dim`, `target_var`).
         :param kwargs: positional parameters for the DataGenerator class (e.g. `start`, `interpolate_method`,
@@ -200,7 +205,8 @@ class PreProcessing(RunEnvironment):
                 if data.history is None:
                     raise AttributeError
                 valid_stations.append(station)
-                logging.debug(f'{station}: history_shape = {data.history.transpose("datetime", "window", "Stations", "variables").shape}')
+                logging.debug(
+                    f'{station}: history_shape = {data.history.transpose("datetime", "window", "Stations", "variables").shape}')
                 logging.debug(f"{station}: loading time = {t_inner}")
             except (AttributeError, EmptyQueryResult):
                 continue
diff --git a/src/run_modules/run_environment.py b/src/run_modules/run_environment.py
index 7bd50277..63a3cd3a 100644
--- a/src/run_modules/run_environment.py
+++ b/src/run_modules/run_environment.py
@@ -6,9 +6,9 @@ import os
 import shutil
 import time
 
-from src.helpers import Logger
 from src.datastore import DataStoreByScope as DataStoreObject
 from src.datastore import NameNotFoundInDataStore
+from src.helpers import Logger
 from src.helpers import TimeTracking
 
 
diff --git a/test/test_configuration/test_init.py b/test/test_configuration/test_init.py
new file mode 100644
index 00000000..e69de29b
diff --git a/test/test_configuration/test_path_config.py b/test/test_configuration/test_path_config.py
new file mode 100644
index 00000000..55ec6edf
--- /dev/null
+++ b/test/test_configuration/test_path_config.py
@@ -0,0 +1,115 @@
+import logging
+import os
+
+import mock
+import pytest
+
+from src.configuration import prepare_host, set_experiment_name, set_bootstrap_path, check_path_and_create
+from src.helpers import PyTestRegex
+
+
+class TestPrepareHost:
+
+    @mock.patch("socket.gethostname", side_effect=["linux-aa9b", "ZAM144", "zam347", "jrtest", "jwtest",
+                                                   "runner-6HmDp9Qd-project-2411-concurrent-01"])
+    @mock.patch("os.getlogin", return_value="testUser")
+    @mock.patch("os.path.exists", return_value=True)
+    def test_prepare_host(self, mock_host, mock_user, mock_path):
+        assert prepare_host() == "/home/testUser/machinelearningtools/data/toar_daily/"
+        assert prepare_host() == "/home/testUser/Data/toar_daily/"
+        assert prepare_host() == "/home/testUser/Data/toar_daily/"
+        assert prepare_host() == "/p/project/cjjsc42/testUser/DATA/toar_daily/"
+        assert prepare_host() == "/p/home/jusers/testUser/juwels/intelliaq/DATA/toar_daily/"
+        assert prepare_host() == '/home/testUser/machinelearningtools/data/toar_daily/'
+
+    @mock.patch("socket.gethostname", return_value="NotExistingHostName")
+    @mock.patch("os.getlogin", return_value="zombie21")
+    def test_error_handling_unknown_host(self, mock_user, mock_host):
+        with pytest.raises(OSError) as e:
+            prepare_host()
+        assert "unknown host 'NotExistingHostName'" in e.value.args[0]
+
+    @mock.patch("os.getlogin", return_value="zombie21")
+    @mock.patch("src.configuration.check_path_and_create", side_effect=PermissionError)
+    def test_error_handling(self, mock_cpath, mock_user):
+        # if "runner-6HmDp9Qd-project-2411-concurrent" not in platform.node():
+        # mock_host.return_value = "linux-aa9b"
+        with pytest.raises(NotADirectoryError) as e:
+            prepare_host()
+        assert PyTestRegex(r"path '.*' does not exist for host '.*'\.") == e.value.args[0]
+        with pytest.raises(NotADirectoryError) as e:
+            prepare_host(False)
+        # assert "does not exist for host 'linux-aa9b'" in e.value.args[0]
+        assert PyTestRegex(r"path '.*' does not exist for host '.*'\.") == e.value.args[0]
+
+    @mock.patch("socket.gethostname", side_effect=["linux-aa9b", "ZAM144", "zam347", "jrtest", "jwtest",
+                                                   "runner-6HmDp9Qd-project-2411-concurrent-01"])
+    @mock.patch("os.getlogin", side_effect=OSError)
+    @mock.patch("os.path.exists", return_value=True)
+    def test_os_error(self, mock_path, mock_user, mock_host):
+        path = prepare_host()
+        assert path == "/home/default/machinelearningtools/data/toar_daily/"
+        path = prepare_host()
+        assert path == "/home/default/Data/toar_daily/"
+        path = prepare_host()
+        assert path == "/home/default/Data/toar_daily/"
+        path = prepare_host()
+        assert path == "/p/project/cjjsc42/default/DATA/toar_daily/"
+        path = prepare_host()
+        assert path == "/p/home/jusers/default/juwels/intelliaq/DATA/toar_daily/"
+        path = prepare_host()
+        assert path == '/home/default/machinelearningtools/data/toar_daily/'
+
+    @mock.patch("socket.gethostname", side_effect=["linux-aa9b"])
+    @mock.patch("os.getlogin", return_value="testUser")
+    @mock.patch("os.path.exists", return_value=False)
+    @mock.patch("os.makedirs", side_effect=None)
+    def test_os_path_exists(self, mock_host, mock_user, mock_path, mock_check):
+        path = prepare_host()
+        assert path == "/home/testUser/machinelearningtools/data/toar_daily/"
+
+
+class TestSetExperimentName:
+
+    def test_set_experiment(self):
+        exp_name, exp_path = set_experiment_name()
+        assert exp_name == "TestExperiment"
+        assert exp_path == os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "TestExperiment"))
+        exp_name, exp_path = set_experiment_name(experiment_date="2019-11-14", experiment_path="./test2")
+        assert exp_name == "2019-11-14_network"
+        assert exp_path == os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "test2", exp_name))
+
+    def test_set_experiment_from_sys(self):
+        exp_name, _ = set_experiment_name(experiment_date="2019-11-14")
+        assert exp_name == "2019-11-14_network"
+
+    def test_set_experiment_hourly(self):
+        exp_name, exp_path = set_experiment_name(sampling="hourly")
+        assert exp_name == "TestExperiment_hourly"
+        assert exp_path == os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "TestExperiment_hourly"))
+
+
+class TestSetBootstrapPath:
+
+    def test_bootstrap_path_is_none(self):
+        bootstrap_path = set_bootstrap_path(None, 'TestDataPath/', 'daily')
+        assert bootstrap_path == 'TestDataPath/../bootstrap_daily'
+
+    @mock.patch("os.makedirs", side_effect=None)
+    def test_bootstap_path_is_given(self, mock_makedir):
+        bootstrap_path = set_bootstrap_path('Test/path/to/boots', None, None)
+        assert bootstrap_path == 'Test/path/to/boots'
+
+
+class TestCheckPath:
+
+    def test_check_path_and_create(self, caplog):
+        caplog.set_level(logging.DEBUG)
+        path = 'data/test'
+        assert not os.path.exists('data/test')
+        check_path_and_create(path)
+        assert os.path.exists('data/test')
+        assert caplog.messages[0] == "Created path: data/test"
+        check_path_and_create(path)
+        assert caplog.messages[1] == "Path already exists: data/test"
+        os.rmdir('data/test')
\ No newline at end of file
diff --git a/test/test_data_handling/test_bootstraps.py b/test/test_data_handling/test_bootstraps.py
index c2b814b7..650c2323 100644
--- a/test/test_data_handling/test_bootstraps.py
+++ b/test/test_data_handling/test_bootstraps.py
@@ -1,18 +1,15 @@
-
-from src.data_handling.bootstraps import BootStraps, CreateShuffledData, BootStrapGenerator
-from src.data_handling.data_generator import DataGenerator
-from src.helpers import PyTestAllEqual, xr_all_equal
-
 import logging
-import mock
 import os
-import pytest
 import shutil
-import typing
 
+import mock
 import numpy as np
+import pytest
 import xarray as xr
 
+from src.data_handling.bootstraps import BootStraps, CreateShuffledData, BootStrapGenerator
+from src.data_handling.data_generator import DataGenerator
+
 
 @pytest.fixture
 def orig_generator(data_path):
@@ -44,7 +41,8 @@ class TestBootStrapGenerator:
         assert boot_gen.variables == ["o3", "temp"]
         assert xr.testing.assert_equal(boot_gen.history_orig, hist) is None
         assert xr.testing.assert_equal(boot_gen.history, hist.sel(variables=["temp"])) is None
-        assert xr.testing.assert_allclose(boot_gen.shuffled - 1, hist.sel(variables="o3").expand_dims({"boots": [0]})) is None
+        assert xr.testing.assert_allclose(boot_gen.shuffled - 1,
+                                          hist.sel(variables="o3").expand_dims({"boots": [0]})) is None
 
     def test_len(self, boot_gen):
         assert len(boot_gen) == 20
@@ -290,4 +288,3 @@ class TestBootStraps:
         assert f(regex, test_list, 10, 10) is None
         assert f(regex, test_list, 9, 10) == "DEBW108_h2o_o3_temp_hist9_nboots20_shuffled.nc"
         assert f(regex, test_list, 9, 20) == "DEBW108_h2o_o3_temp_hist9_nboots20_shuffled.nc"
-
diff --git a/test/test_data_handling/test_data_distributor.py b/test/test_data_handling/test_data_distributor.py
index 15344fd8..9e2242fe 100644
--- a/test/test_data_handling/test_data_distributor.py
+++ b/test/test_data_handling/test_data_distributor.py
@@ -49,7 +49,7 @@ class TestDistributor:
         values = np.zeros((2311, 19))
         assert distributor._get_number_of_mini_batches(values) == math.ceil(2311 / distributor.batch_size)
 
-    def test_distribute_on_batches_single_loop(self,  generator_two_stations, model):
+    def test_distribute_on_batches_single_loop(self, generator_two_stations, model):
         d = Distributor(generator_two_stations, model)
         for e in d.distribute_on_batches(fit_call=False):
             assert e[0].shape[0] <= d.batch_size
@@ -60,7 +60,7 @@ class TestDistributor:
         for i, e in enumerate(d.distribute_on_batches()):
             if i < len(d):
                 elements.append(e[0])
-            elif i == 2*len(d):  # check if all elements are repeated
+            elif i == 2 * len(d):  # check if all elements are repeated
                 assert np.testing.assert_array_equal(e[0], elements[i - len(d)]) is None
             else:  # break when 3rd iteration starts (is called as infinite loop)
                 break
@@ -98,7 +98,7 @@ class TestDistributor:
         assert np.testing.assert_equal(x, x_perm) is None
         assert np.testing.assert_equal(y, y_perm) is None
 
-    def test_distribute_on_batches_upsampling_no_extremes_given(self,  generator, model):
+    def test_distribute_on_batches_upsampling_no_extremes_given(self, generator, model):
         d = Distributor(generator, model, upsampling=True)
         gen_len = d.generator.get_data_generator(0, load_local_tmp_storage=False).get_transposed_label().shape[0]
         num_mini_batches = math.ceil(gen_len / d.batch_size)
diff --git a/test/test_data_handling/test_data_generator.py b/test/test_data_handling/test_data_generator.py
index 939f93cc..f48978da 100644
--- a/test/test_data_handling/test_data_generator.py
+++ b/test/test_data_handling/test_data_generator.py
@@ -1,12 +1,11 @@
-import os
-
 import operator as op
-import pytest
+import os
+import pickle
 
-import shutil
 import numpy as np
+import pytest
 import xarray as xr
-import pickle
+
 from src.data_handling.data_generator import DataGenerator
 from src.data_handling.data_preparation import DataPrep
 from src.join import EmptyQueryResult
@@ -99,10 +98,10 @@ class TestDataGenerator:
 
     def test_repr(self, gen):
         path = os.path.join(os.path.dirname(__file__), 'data')
-        assert gen.__repr__().rstrip() == f"DataGenerator(path='{path}', network='AIRBASE', stations=['DEBW107'], "\
+        assert gen.__repr__().rstrip() == f"DataGenerator(path='{path}', network='AIRBASE', stations=['DEBW107'], " \
                                           f"variables=['o3', 'temp'], station_type=None, interpolate_dim='datetime', " \
-                                          f"target_dim='variables', target_var='o3', **{{'start': 2010, 'end': 2014}})"\
-                                          .rstrip()
+                                          f"target_dim='variables', target_var='o3', **{{'start': 2010, 'end': 2014}})" \
+            .rstrip()
 
     def test_len(self, gen):
         assert len(gen) == 1
diff --git a/test/test_data_handling/test_data_preparation.py b/test/test_data_handling/test_data_preparation.py
index 747b3734..edfaa74f 100644
--- a/test/test_data_handling/test_data_preparation.py
+++ b/test/test_data_handling/test_data_preparation.py
@@ -1,7 +1,7 @@
 import datetime as dt
+import logging
 import os
 from operator import itemgetter, lt, gt
-import logging
 
 import numpy as np
 import pandas as pd
@@ -190,7 +190,7 @@ class TestDataPrep:
         assert data._transform_method is None
         assert data.mean is None
         assert data.std is None
-        data_std_orig = data.data.std('datetime'). variable.values
+        data_std_orig = data.data.std('datetime').variable.values
         data.transform('datetime', 'centre')
         assert data._transform_method == 'centre'
         assert np.testing.assert_almost_equal(data.data.mean('datetime').variable.values, np.array([[0, 0]])) is None
@@ -299,11 +299,11 @@ class TestDataPrep:
         index_array = data.create_index_array('window', range(1, 4))
         assert np.testing.assert_array_equal(index_array.data, [1, 2, 3]) is None
         assert index_array.name == 'window'
-        assert index_array.coords.dims == ('window', )
+        assert index_array.coords.dims == ('window',)
         index_array = data.create_index_array('window', range(0, 1))
         assert np.testing.assert_array_equal(index_array.data, [0]) is None
         assert index_array.name == 'window'
-        assert index_array.coords.dims == ('window', )
+        assert index_array.coords.dims == ('window',)
 
     @staticmethod
     def extract_window_data(res, orig, w):
@@ -311,7 +311,7 @@ class TestDataPrep:
         window = res.sel(slice).data.flatten()
         if w <= 0:
             delta = w
-            w = abs(w)+1
+            w = abs(w) + 1
         else:
             delta = 1
         slice = {'variables': ['temp'], 'Stations': 'DEBW107',
@@ -421,10 +421,13 @@ class TestDataPrep:
         orig = data.label
         data.multiply_extremes([1, 1.5, 2, 3])
         upsampled = data.extremes_label
+
         def f(d, op, n):
             return op(d, n).any(dim="window").sum()
+
         assert f(upsampled, gt, 1) == sum([f(orig, gt, 1), f(orig, gt, 1.5), f(orig, gt, 2) * 2, f(orig, gt, 3) * 4])
-        assert f(upsampled, lt, -1) == sum([f(orig, lt, -1), f(orig, lt, -1.5), f(orig, lt, -2) * 2, f(orig, lt, -3) * 4])
+        assert f(upsampled, lt, -1) == sum(
+            [f(orig, lt, -1), f(orig, lt, -1.5), f(orig, lt, -2) * 2, f(orig, lt, -3) * 4])
 
     def test_multiply_extremes_wrong_extremes(self, data):
         data.transform("datetime")
@@ -442,8 +445,10 @@ class TestDataPrep:
         orig = data.label
         data.multiply_extremes([1, 2], extremes_on_right_tail_only=True)
         upsampled = data.extremes_label
+
         def f(d, op, n):
             return op(d, n).any(dim="window").sum()
+
         assert f(upsampled, gt, 1) == sum([f(orig, gt, 1), f(orig, gt, 2)])
         assert upsampled.shape[2] == sum([f(orig, gt, 1), f(orig, gt, 2)])
         assert f(upsampled, lt, -1) == 0
@@ -454,13 +459,13 @@ class TestDataPrep:
         data.label = None
         assert data.multiply_extremes([1], extremes_on_right_tail_only=False) is None
 
-    def test_multiply_extremes_none_history(self,data ):
+    def test_multiply_extremes_none_history(self, data):
         data.transform("datetime")
         data.history = None
         data.make_labels("variables", "o3", "datetime", 2)
         assert data.multiply_extremes([1], extremes_on_right_tail_only=False) is None
 
-    def test_multiply_extremes_none_label_history(self,data ):
+    def test_multiply_extremes_none_label_history(self, data):
         data.history = None
         data.label = None
         assert data.multiply_extremes([1], extremes_on_right_tail_only=False) is None
diff --git a/test/test_datastore.py b/test/test_datastore.py
index 5b6cd17a..76349b0b 100644
--- a/test/test_datastore.py
+++ b/test/test_datastore.py
@@ -1,7 +1,6 @@
 __author__ = 'Lukas Leufen'
 __date__ = '2019-11-22'
 
-
 import pytest
 
 from src.datastore import AbstractDataStore, DataStoreByVariable, DataStoreByScope, CorrectScope
@@ -80,7 +79,8 @@ class TestDataStoreByVariable:
         ds.set("number", 11, "general.sub")
         with pytest.raises(NameNotFoundInScope) as e:
             ds.get("number", "general.sub2")
-        assert "Couldn't find number in scope general.sub2 . number is only defined in ['general.sub']" in e.value.args[0]
+        assert "Couldn't find number in scope general.sub2 . number is only defined in ['general.sub']" in e.value.args[
+            0]
 
     def test_list_all_scopes(self, ds):
         ds.set("number", 22, "general2")
@@ -135,9 +135,9 @@ class TestDataStoreByVariable:
         ds.set("number2", 3, "general.sub.sub")
         ds.set("number", "ABC", "general.sub.sub")
         assert ds.search_scope("general.sub", current_scope_only=False, return_all=True) == \
-            [("number", "general.sub", 11), ("number1", "general.sub", 22)]
+               [("number", "general.sub", 11), ("number1", "general.sub", 22)]
         assert ds.search_scope("general.sub.sub", current_scope_only=False, return_all=True) == \
-            [("number", "general.sub.sub", "ABC"), ("number1", "general.sub", 22), ("number2", "general.sub.sub", 3)]
+               [("number", "general.sub.sub", "ABC"), ("number1", "general.sub", 22), ("number2", "general.sub.sub", 3)]
 
     def test_create_args_dict_default_scope(self, ds_with_content):
         args = ["tester1", "tester2", "tester3", "tester4"]
@@ -153,11 +153,11 @@ class TestDataStoreByVariable:
         assert ds_with_content.create_args_dict(args) == {"tester1": 1}
 
     def test_set_args_from_dict(self, ds):
-        ds.set_args_from_dict({"tester1": 1, "tester2": 10, "tester3": 21})
+        ds.set_from_dict({"tester1": 1, "tester2": 10, "tester3": 21})
         assert ds.get("tester1", "general") == 1
         assert ds.get("tester2", "general") == 10
         assert ds.get("tester3", "general") == 21
-        ds.set_args_from_dict({"tester1": 111}, "general.sub")
+        ds.set_from_dict({"tester1": 111}, "general.sub")
         assert ds.get("tester1", "general.sub") == 111
         assert ds.get("tester3", "general.sub") == 21
 
@@ -231,7 +231,8 @@ class TestDataStoreByScope:
         ds.set("number", 11, "general.sub")
         with pytest.raises(NameNotFoundInScope) as e:
             ds.get("number", "general.sub2")
-        assert "Couldn't find number in scope general.sub2 . number is only defined in ['general.sub']" in e.value.args[0]
+        assert "Couldn't find number in scope general.sub2 . number is only defined in ['general.sub']" in e.value.args[
+            0]
 
     def test_list_all_scopes(self, ds):
         ds.set("number", 22, "general2")
@@ -286,9 +287,9 @@ class TestDataStoreByScope:
         ds.set("number2", 3, "general.sub.sub")
         ds.set("number", "ABC", "general.sub.sub")
         assert ds.search_scope("general.sub", current_scope_only=False, return_all=True) == \
-            [("number", "general.sub", 11), ("number1", "general.sub", 22)]
+               [("number", "general.sub", 11), ("number1", "general.sub", 22)]
         assert ds.search_scope("general.sub.sub", current_scope_only=False, return_all=True) == \
-            [("number", "general.sub.sub", "ABC"), ("number1", "general.sub", 22), ("number2", "general.sub.sub", 3)]
+               [("number", "general.sub.sub", "ABC"), ("number1", "general.sub", 22), ("number2", "general.sub.sub", 3)]
 
     def test_create_args_dict_default_scope(self, ds_with_content):
         args = ["tester1", "tester2", "tester3", "tester4"]
@@ -304,11 +305,11 @@ class TestDataStoreByScope:
         assert ds_with_content.create_args_dict(args) == {"tester1": 1}
 
     def test_set_args_from_dict(self, ds):
-        ds.set_args_from_dict({"tester1": 1, "tester2": 10, "tester3": 21})
+        ds.set_from_dict({"tester1": 1, "tester2": 10, "tester3": 21})
         assert ds.get("tester1", "general") == 1
         assert ds.get("tester2", "general") == 10
         assert ds.get("tester3", "general") == 21
-        ds.set_args_from_dict({"tester1": 111}, "general.sub")
+        ds.set_from_dict({"tester1": 111}, "general.sub")
         assert ds.get("tester1", "general.sub") == 111
         assert ds.get("tester3", "general.sub") == 21
 
diff --git a/test/test_helpers.py b/test/test_helpers.py
deleted file mode 100644
index 9c71a533..00000000
--- a/test/test_helpers.py
+++ /dev/null
@@ -1,393 +0,0 @@
-import logging
-import os
-import platform
-
-import keras
-import mock
-import numpy as np
-import pytest
-
-import re
-
-from src.helpers import *
-
-
-class TestToList:
-
-    def test_to_list(self):
-        assert to_list('a') == ['a']
-        assert to_list('abcd') == ['abcd']
-        assert to_list([1, 2, 3]) == [1, 2, 3]
-        assert to_list([45]) == [45]
-
-
-class TestCheckPath:
-
-    def test_check_path_and_create(self, caplog):
-        caplog.set_level(logging.DEBUG)
-        path = 'data/test'
-        assert not os.path.exists('data/test')
-        check_path_and_create(path)
-        assert os.path.exists('data/test')
-        assert caplog.messages[0] == "Created path: data/test"
-        check_path_and_create(path)
-        assert caplog.messages[1] == "Path already exists: data/test"
-        os.rmdir('data/test')
-
-
-class TestLoss:
-
-    def test_l_p_loss(self):
-        model = keras.Sequential()
-        model.add(keras.layers.Lambda(lambda x: x, input_shape=(None,)))
-        model.compile(optimizer=keras.optimizers.Adam(), loss=l_p_loss(2))
-        hist = model.fit(np.array([1, 0, 2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
-        assert hist.history['loss'][0] == 1.25
-        model.compile(optimizer=keras.optimizers.Adam(), loss=l_p_loss(3))
-        hist = model.fit(np.array([1, 0, -2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
-        assert hist.history['loss'][0] == 2.25
-
-
-class TestTimeTracking:
-
-    def test_init(self):
-        t = TimeTracking()
-        assert t.start is not None
-        assert t.start < time.time()
-        assert t.end is None
-        t2 = TimeTracking(start=False)
-        assert t2.start is None
-
-    def test__start(self):
-        t = TimeTracking(start=False)
-        t._start()
-        assert t.start < time.time()
-
-    def test__end(self):
-        t = TimeTracking()
-        t._end()
-        assert t.end > t.start
-
-    def test__duration(self):
-        t = TimeTracking()
-        d1 = t._duration()
-        assert d1 > 0
-        d2 = t._duration()
-        assert d2 > d1
-        t._end()
-        d3 = t._duration()
-        assert d3 > d2
-        assert d3 == t._duration()
-
-    def test_repr(self):
-        t = TimeTracking()
-        t._end()
-        duration = t._duration()
-        assert t.__repr__().rstrip() == f"{dt.timedelta(seconds=math.ceil(duration))} (hh:mm:ss)".rstrip()
-
-    def test_run(self):
-        t = TimeTracking(start=False)
-        assert t.start is None
-        t.run()
-        assert t.start is not None
-
-    def test_stop(self):
-        t = TimeTracking()
-        assert t.end is None
-        duration = t.stop(get_duration=True)
-        assert duration == t._duration()
-        with pytest.raises(AssertionError) as e:
-            t.stop()
-        assert "Time was already stopped" in e.value.args[0]
-        t.run()
-        assert t.end is None
-        assert t.stop() is None
-        assert t.end is not None
-
-    def test_duration(self):
-        t = TimeTracking()
-        duration = t
-        assert duration is not None
-        duration = t.stop(get_duration=True)
-        assert duration == t.duration()
-
-    def test_enter_exit(self, caplog):
-        caplog.set_level(logging.INFO)
-        with TimeTracking() as t:
-            assert t.start is not None
-            assert t.end is None
-        expression = PyTestRegex(r"undefined job finished after \d+:\d+:\d+ \(hh:mm:ss\)")
-        assert caplog.record_tuples[-1] == ('root', 20, expression)
-
-    def test_name_enter_exit(self, caplog):
-        caplog.set_level(logging.INFO)
-        with TimeTracking(name="my job") as t:
-            assert t.start is not None
-            assert t.end is None
-        expression = PyTestRegex(r"my job finished after \d+:\d+:\d+ \(hh:mm:ss\)")
-        assert caplog.record_tuples[-1] == ('root', 20, expression)
-
-
-class TestPrepareHost:
-
-    @mock.patch("socket.gethostname", side_effect=["linux-aa9b", "ZAM144", "zam347", "jrtest", "jwtest",
-                                                   "runner-6HmDp9Qd-project-2411-concurrent-01"])
-    @mock.patch("os.getlogin", return_value="testUser")
-    @mock.patch("os.path.exists", return_value=True)
-    def test_prepare_host(self, mock_host, mock_user, mock_path):
-        assert prepare_host() == "/home/testUser/machinelearningtools/data/toar_daily/"
-        assert prepare_host() == "/home/testUser/Data/toar_daily/"
-        assert prepare_host() == "/home/testUser/Data/toar_daily/"
-        assert prepare_host() == "/p/project/cjjsc42/testUser/DATA/toar_daily/"
-        assert prepare_host() == "/p/home/jusers/testUser/juwels/intelliaq/DATA/toar_daily/"
-        assert prepare_host() == '/home/testUser/machinelearningtools/data/toar_daily/'
-
-    @mock.patch("socket.gethostname", return_value="NotExistingHostName")
-    @mock.patch("os.getlogin", return_value="zombie21")
-    def test_error_handling_unknown_host(self, mock_user, mock_host):
-        with pytest.raises(OSError) as e:
-            prepare_host()
-        assert "unknown host 'NotExistingHostName'" in e.value.args[0]
-
-    @mock.patch("os.getlogin", return_value="zombie21")
-    @mock.patch("src.helpers.check_path_and_create", side_effect=PermissionError)
-    def test_error_handling(self, mock_cpath, mock_user):
-        # if "runner-6HmDp9Qd-project-2411-concurrent" not in platform.node():
-        # mock_host.return_value = "linux-aa9b"
-        with pytest.raises(NotADirectoryError) as e:
-            prepare_host()
-        assert PyTestRegex(r"path '.*' does not exist for host '.*'\.") == e.value.args[0]
-        with pytest.raises(NotADirectoryError) as e:
-            prepare_host(False)
-        # assert "does not exist for host 'linux-aa9b'" in e.value.args[0]
-        assert PyTestRegex(r"path '.*' does not exist for host '.*'\.") == e.value.args[0]
-
-    @mock.patch("socket.gethostname", side_effect=["linux-aa9b", "ZAM144", "zam347", "jrtest", "jwtest",
-                                                   "runner-6HmDp9Qd-project-2411-concurrent-01"])
-    @mock.patch("os.getlogin", side_effect=OSError)
-    @mock.patch("os.path.exists", return_value=True)
-    def test_os_error(self, mock_path, mock_user, mock_host):
-        path = prepare_host()
-        assert path == "/home/default/machinelearningtools/data/toar_daily/"
-        path = prepare_host()
-        assert path == "/home/default/Data/toar_daily/"
-        path = prepare_host()
-        assert path == "/home/default/Data/toar_daily/"
-        path = prepare_host()
-        assert path == "/p/project/cjjsc42/default/DATA/toar_daily/"
-        path = prepare_host()
-        assert path == "/p/home/jusers/default/juwels/intelliaq/DATA/toar_daily/"
-        path = prepare_host()
-        assert path == '/home/default/machinelearningtools/data/toar_daily/'
-
-    @mock.patch("socket.gethostname", side_effect=["linux-aa9b"])
-    @mock.patch("os.getlogin", return_value="testUser")
-    @mock.patch("os.path.exists", return_value=False)
-    @mock.patch("os.makedirs", side_effect=None)
-    def test_os_path_exists(self, mock_host, mock_user, mock_path, mock_check):
-        path = prepare_host()
-        assert path == "/home/testUser/machinelearningtools/data/toar_daily/"
-
-
-class TestSetExperimentName:
-
-    def test_set_experiment(self):
-        exp_name, exp_path = set_experiment_name()
-        assert exp_name == "TestExperiment"
-        assert exp_path == os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "TestExperiment"))
-        exp_name, exp_path = set_experiment_name(experiment_date="2019-11-14", experiment_path="./test2")
-        assert exp_name == "2019-11-14_network"
-        assert exp_path == os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "test2", exp_name))
-
-    def test_set_experiment_from_sys(self):
-        exp_name, _ = set_experiment_name(experiment_date="2019-11-14")
-        assert exp_name == "2019-11-14_network"
-
-    def test_set_expperiment_hourly(self):
-        exp_name, exp_path = set_experiment_name(sampling="hourly")
-        assert exp_name == "TestExperiment_hourly"
-        assert exp_path == os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "TestExperiment_hourly"))
-
-
-class TestSetBootstrapPath:
-
-    def test_bootstrap_path_is_none(self):
-        bootstrap_path = set_bootstrap_path(None, 'TestDataPath/', 'daily')
-        assert bootstrap_path == 'TestDataPath/../bootstrap_daily'
-
-    @mock.patch("os.makedirs", side_effect=None)
-    def test_bootstap_path_is_given(self, mock_makedir):
-        bootstrap_path = set_bootstrap_path('Test/path/to/boots', None, None)
-        assert bootstrap_path == 'Test/path/to/boots'
-
-
-class TestPytestRegex:
-
-    @pytest.fixture
-    def regex(self):
-        return PyTestRegex("teststring")
-
-    def test_pytest_regex_init(self, regex):
-        assert regex._regex.pattern == "teststring"
-
-    def test_pytest_regex_eq(self, regex):
-        assert regex == "teststringabcd"
-        assert regex != "teststgabcd"
-
-    def test_pytest_regex_repr(self, regex):
-        assert regex.__repr__() == "teststring"
-
-
-class TestDictToXarray:
-
-    def test_dict_to_xarray(self):
-        array1 = xr.DataArray(np.random.randn(2, 3), dims=('x', 'y'), coords={'x': [10, 20]})
-        array2 = xr.DataArray(np.random.randn(2, 3), dims=('x', 'y'), coords={'x': [10, 20]})
-        d = {"number1": array1, "number2": array2}
-        res = dict_to_xarray(d, "merge_dim")
-        assert type(res) == xr.DataArray
-        assert sorted(list(res.coords)) == ["merge_dim", "x"]
-        assert res.shape == (2, 2, 3)
-
-
-class TestFloatRound:
-
-    def test_float_round_ceil(self):
-        assert float_round(4.6) == 5
-        assert float_round(239.3992) == 240
-
-    def test_float_round_decimals(self):
-        assert float_round(23.0091, 2) == 23.01
-        assert float_round(23.1091, 3) == 23.11
-
-    def test_float_round_type(self):
-        assert float_round(34.9221, 2, math.floor) == 34.92
-        assert float_round(34.9221, 0, math.floor) == 34.
-        assert float_round(34.9221, 2, round) == 34.92
-        assert float_round(34.9221, 0, round) == 35.
-
-    def test_float_round_negative(self):
-        assert float_round(-34.9221, 2, math.floor) == -34.93
-        assert float_round(-34.9221, 0, math.floor) == -35.
-        assert float_round(-34.9221, 2) == -34.92
-        assert float_round(-34.9221, 0) == -34.
-
-
-class TestDictPop:
-
-    @pytest.fixture
-    def custom_dict(self):
-        return {'a': 1, 'b': 2, 2: 'ab'}
-
-    def test_dict_pop_single(self, custom_dict):
-        # one out as list
-        d_pop = dict_pop(custom_dict, [4])
-        assert d_pop == custom_dict
-        # one out as str
-        d_pop = dict_pop(custom_dict, '4')
-        assert d_pop == custom_dict
-        # one in as str
-        d_pop = dict_pop(custom_dict, 'b')
-        assert d_pop == {'a': 1, 2: 'ab'}
-        # one in as list
-        d_pop = dict_pop(custom_dict, ['b'])
-        assert d_pop == {'a': 1, 2: 'ab'}
-
-    def test_dict_pop_multiple(self, custom_dict):
-        # all out (list)
-        d_pop = dict_pop(custom_dict, [4, 'mykey'])
-        assert d_pop == custom_dict
-        # all in (list)
-        d_pop = dict_pop(custom_dict, ['a', 2])
-        assert d_pop == {'b': 2}
-        # one in one out (list)
-        d_pop = dict_pop(custom_dict, [2, '10'])
-        assert d_pop == {'a': 1, 'b': 2}
-
-    def test_dict_pop_missing_argument(self, custom_dict):
-        with pytest.raises(TypeError) as e:
-            dict_pop()
-        assert "dict_pop() missing 2 required positional arguments: 'dict_orig' and 'pop_keys'" in e.value.args[0]
-        with pytest.raises(TypeError) as e:
-            dict_pop(custom_dict)
-        assert "dict_pop() missing 1 required positional argument: 'pop_keys'" in e.value.args[0]
-
-
-class TestListPop:
-
-    @pytest.fixture
-    def custom_list(self):
-        return [1, 2, 3, 'a', 'bc']
-
-    def test_list_pop_single(self, custom_list):
-        l_pop = list_pop(custom_list, 1)
-        assert l_pop == [2, 3, 'a', 'bc']
-        l_pop = list_pop(custom_list, 'bc')
-        assert l_pop == [1, 2, 3, 'a']
-        l_pop = list_pop(custom_list, 5)
-        assert l_pop == custom_list
-
-    def test_list_pop_multiple(self, custom_list):
-        # all in list
-        l_pop = list_pop(custom_list, [2, 'a'])
-        assert l_pop == [1, 3, 'bc']
-        # one in one out
-        l_pop = list_pop(custom_list, ['bc', 10])
-        assert l_pop == [1, 2, 3, 'a']
-        # all out
-        l_pop = list_pop(custom_list, [10, 'aa'])
-        assert l_pop == custom_list
-
-    def test_list_pop_missing_argument(self, custom_list):
-        with pytest.raises(TypeError) as e:
-            list_pop()
-        assert "list_pop() missing 2 required positional arguments: 'list_full' and 'pop_items'" in e.value.args[0]
-        with pytest.raises(TypeError) as e:
-            list_pop(custom_list)
-        assert "list_pop() missing 1 required positional argument: 'pop_items'" in e.value.args[0]
-
-
-class TestLogger:
-
-    @pytest.fixture
-    def logger(self):
-        return Logger()
-
-    def test_init_default(self):
-        log = Logger()
-        assert log.formatter == "%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]"
-        assert log.log_file == Logger.setup_logging_path()
-        # assert PyTestRegex(
-        #     ".*machinelearningtools/src/\.{2}/logging/logging_\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}\.log") == log.log_file
-
-    def test_setup_logging_path_none(self):
-        log_file = Logger.setup_logging_path(None)
-        assert PyTestRegex(
-            ".*machinelearningtools/src/\.{2}/logging/logging_\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}\.log") == log_file
-
-    @mock.patch("os.makedirs", side_effect=None)
-    def test_setup_logging_path_given(self, mock_makedirs):
-        path = "my/test/path"
-        log_path = Logger.setup_logging_path(path)
-        assert PyTestRegex("my/test/path/logging_\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}\.log") == log_path
-
-    def test_logger_console_level0(self, logger):
-        consol = logger.logger_console(0)
-        assert isinstance(consol, logging.StreamHandler)
-        assert consol.level == 0
-        formatter = logging.Formatter(logger.formatter)
-        assert isinstance(formatter, logging.Formatter)
-
-    def test_logger_console_level1(self, logger):
-        consol = logger.logger_console(1)
-        assert isinstance(consol, logging.StreamHandler)
-        assert consol.level == 1
-        formatter = logging.Formatter(logger.formatter)
-        assert isinstance(formatter, logging.Formatter)
-
-    def test_logger_console_level_wrong_type(self, logger):
-        with pytest.raises(TypeError) as e:
-            logger.logger_console(1.5)
-        assert "Level not an integer or a valid string: 1.5" == e.value.args[0]
-
-
diff --git a/test/test_helpers/test_helpers.py b/test/test_helpers/test_helpers.py
new file mode 100644
index 00000000..28a8bf6e
--- /dev/null
+++ b/test/test_helpers/test_helpers.py
@@ -0,0 +1,265 @@
+import numpy as np
+import xarray as xr
+
+import datetime as dt
+import logging
+import math
+import time
+
+import mock
+import pytest
+
+from src.helpers import to_list, dict_to_xarray, float_round, remove_items
+from src.helpers import PyTestRegex
+from src.helpers import Logger, TimeTracking
+
+
+class TestToList:
+
+    def test_to_list(self):
+        assert to_list('a') == ['a']
+        assert to_list('abcd') == ['abcd']
+        assert to_list([1, 2, 3]) == [1, 2, 3]
+        assert to_list([45]) == [45]
+
+
+class TestTimeTracking:
+
+    def test_init(self):
+        t = TimeTracking()
+        assert t.start is not None
+        assert t.start < time.time()
+        assert t.end is None
+        t2 = TimeTracking(start=False)
+        assert t2.start is None
+
+    def test__start(self):
+        t = TimeTracking(start=False)
+        t._start()
+        assert t.start < time.time()
+
+    def test__end(self):
+        t = TimeTracking()
+        t._end()
+        assert t.end > t.start
+
+    def test__duration(self):
+        t = TimeTracking()
+        d1 = t._duration()
+        assert d1 > 0
+        d2 = t._duration()
+        assert d2 > d1
+        t._end()
+        d3 = t._duration()
+        assert d3 > d2
+        assert d3 == t._duration()
+
+    def test_repr(self):
+        t = TimeTracking()
+        t._end()
+        duration = t._duration()
+        assert t.__repr__().rstrip() == f"{dt.timedelta(seconds=math.ceil(duration))} (hh:mm:ss)".rstrip()
+
+    def test_run(self):
+        t = TimeTracking(start=False)
+        assert t.start is None
+        t.run()
+        assert t.start is not None
+
+    def test_stop(self):
+        t = TimeTracking()
+        assert t.end is None
+        duration = t.stop(get_duration=True)
+        assert duration == t._duration()
+        with pytest.raises(AssertionError) as e:
+            t.stop()
+        assert "Time was already stopped" in e.value.args[0]
+        t.run()
+        assert t.end is None
+        assert t.stop() is None
+        assert t.end is not None
+
+    def test_duration(self):
+        t = TimeTracking()
+        duration = t
+        assert duration is not None
+        duration = t.stop(get_duration=True)
+        assert duration == t.duration()
+
+    def test_enter_exit(self, caplog):
+        caplog.set_level(logging.INFO)
+        with TimeTracking() as t:
+            assert t.start is not None
+            assert t.end is None
+        expression = PyTestRegex(r"undefined job finished after \d+:\d+:\d+ \(hh:mm:ss\)")
+        assert caplog.record_tuples[-1] == ('root', 20, expression)
+
+    def test_name_enter_exit(self, caplog):
+        caplog.set_level(logging.INFO)
+        with TimeTracking(name="my job") as t:
+            assert t.start is not None
+            assert t.end is None
+        expression = PyTestRegex(r"my job finished after \d+:\d+:\d+ \(hh:mm:ss\)")
+        assert caplog.record_tuples[-1] == ('root', 20, expression)
+
+
+class TestPytestRegex:
+
+    @pytest.fixture
+    def regex(self):
+        return PyTestRegex("teststring")
+
+    def test_pytest_regex_init(self, regex):
+        assert regex._regex.pattern == "teststring"
+
+    def test_pytest_regex_eq(self, regex):
+        assert regex == "teststringabcd"
+        assert regex != "teststgabcd"
+
+    def test_pytest_regex_repr(self, regex):
+        assert regex.__repr__() == "teststring"
+
+
+class TestDictToXarray:
+
+    def test_dict_to_xarray(self):
+        array1 = xr.DataArray(np.random.randn(2, 3), dims=('x', 'y'), coords={'x': [10, 20]})
+        array2 = xr.DataArray(np.random.randn(2, 3), dims=('x', 'y'), coords={'x': [10, 20]})
+        d = {"number1": array1, "number2": array2}
+        res = dict_to_xarray(d, "merge_dim")
+        assert type(res) == xr.DataArray
+        assert sorted(list(res.coords)) == ["merge_dim", "x"]
+        assert res.shape == (2, 2, 3)
+
+
+class TestFloatRound:
+
+    def test_float_round_ceil(self):
+        assert float_round(4.6) == 5
+        assert float_round(239.3992) == 240
+
+    def test_float_round_decimals(self):
+        assert float_round(23.0091, 2) == 23.01
+        assert float_round(23.1091, 3) == 23.11
+
+    def test_float_round_type(self):
+        assert float_round(34.9221, 2, math.floor) == 34.92
+        assert float_round(34.9221, 0, math.floor) == 34.
+        assert float_round(34.9221, 2, round) == 34.92
+        assert float_round(34.9221, 0, round) == 35.
+
+    def test_float_round_negative(self):
+        assert float_round(-34.9221, 2, math.floor) == -34.93
+        assert float_round(-34.9221, 0, math.floor) == -35.
+        assert float_round(-34.9221, 2) == -34.92
+        assert float_round(-34.9221, 0) == -34.
+
+
+class TestRemoveItems:
+
+    @pytest.fixture
+    def custom_list(self):
+        return [1, 2, 3, 'a', 'bc']
+
+    @pytest.fixture
+    def custom_dict(self):
+        return {'a': 1, 'b': 2, 2: 'ab'}
+
+    def test_dict_remove_single(self, custom_dict):
+        # one out as list
+        d_pop = remove_items(custom_dict, [4])
+        assert d_pop == custom_dict
+        # one out as str
+        d_pop = remove_items(custom_dict, '4')
+        assert d_pop == custom_dict
+        # one in as str
+        d_pop = remove_items(custom_dict, 'b')
+        assert d_pop == {'a': 1, 2: 'ab'}
+        # one in as list
+        d_pop = remove_items(custom_dict, ['b'])
+        assert d_pop == {'a': 1, 2: 'ab'}
+
+    def test_dict_remove_multiple(self, custom_dict):
+        # all out (list)
+        d_pop = remove_items(custom_dict, [4, 'mykey'])
+        assert d_pop == custom_dict
+        # all in (list)
+        d_pop = remove_items(custom_dict, ['a', 2])
+        assert d_pop == {'b': 2}
+        # one in one out (list)
+        d_pop = remove_items(custom_dict, [2, '10'])
+        assert d_pop == {'a': 1, 'b': 2}
+
+    def test_list_remove_single(self, custom_list):
+        l_pop = remove_items(custom_list, 1)
+        assert l_pop == [2, 3, 'a', 'bc']
+        l_pop = remove_items(custom_list, 'bc')
+        assert l_pop == [1, 2, 3, 'a']
+        l_pop = remove_items(custom_list, 5)
+        assert l_pop == custom_list
+
+    def test_list_remove_multiple(self, custom_list):
+        # all in list
+        l_pop = remove_items(custom_list, [2, 'a'])
+        assert l_pop == [1, 3, 'bc']
+        # one in one out
+        l_pop = remove_items(custom_list, ['bc', 10])
+        assert l_pop == [1, 2, 3, 'a']
+        # all out
+        l_pop = remove_items(custom_list, [10, 'aa'])
+        assert l_pop == custom_list
+
+    def test_remove_missing_argument(self, custom_dict, custom_list):
+        with pytest.raises(TypeError) as e:
+            remove_items()
+        assert "remove_items() missing 2 required positional arguments: 'obj' and 'items'" in e.value.args[0]
+        with pytest.raises(TypeError) as e:
+            remove_items(custom_dict)
+        assert "remove_items() missing 1 required positional argument: 'items'" in e.value.args[0]
+        with pytest.raises(TypeError) as e:
+            remove_items(custom_list)
+        assert "remove_items() missing 1 required positional argument: 'items'" in e.value.args[0]
+
+
+class TestLogger:
+
+    @pytest.fixture
+    def logger(self):
+        return Logger()
+
+    def test_init_default(self):
+        log = Logger()
+        assert log.formatter == "%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]"
+        assert log.log_file == Logger.setup_logging_path()
+        # assert PyTestRegex(
+        #     ".*machinelearningtools/src/\.{2}/logging/logging_\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}\.log") == log.log_file
+
+    def test_setup_logging_path_none(self):
+        log_file = Logger.setup_logging_path(None)
+        assert PyTestRegex(
+            ".*machinelearningtools/logging/logging_\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}\.log") == log_file
+
+    @mock.patch("os.makedirs", side_effect=None)
+    def test_setup_logging_path_given(self, mock_makedirs):
+        path = "my/test/path"
+        log_path = Logger.setup_logging_path(path)
+        assert PyTestRegex("my/test/path/logging_\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}\.log") == log_path
+
+    def test_logger_console_level0(self, logger):
+        consol = logger.logger_console(0)
+        assert isinstance(consol, logging.StreamHandler)
+        assert consol.level == 0
+        formatter = logging.Formatter(logger.formatter)
+        assert isinstance(formatter, logging.Formatter)
+
+    def test_logger_console_level1(self, logger):
+        consol = logger.logger_console(1)
+        assert isinstance(consol, logging.StreamHandler)
+        assert consol.level == 1
+        formatter = logging.Formatter(logger.formatter)
+        assert isinstance(formatter, logging.Formatter)
+
+    def test_logger_console_level_wrong_type(self, logger):
+        with pytest.raises(TypeError) as e:
+            logger.logger_console(1.5)
+        assert "Level not an integer or a valid string: 1.5" == e.value.args[0]
diff --git a/test/test_join.py b/test/test_join.py
index fe3d33d6..90c244c9 100644
--- a/test/test_join.py
+++ b/test/test_join.py
@@ -4,7 +4,7 @@ import pytest
 
 from src.join import *
 from src.join import _save_to_pandas, _correct_stat_name, _lower_list
-from src.join_settings import join_settings
+from src.configuration.join_settings import join_settings
 
 
 class TestJoinUrlBase:
@@ -53,7 +53,8 @@ class TestLoadSeriesInformation:
 
     def test_standard_query(self):
         expected_subset = {'o3': 23031, 'no2': 39002, 'temp--lubw': 17059, 'wspeed': 17060}
-        assert expected_subset.items() <= load_series_information(['DEBW107'], None, None, join_settings()[0], {}).items()
+        assert expected_subset.items() <= load_series_information(['DEBW107'], None, None, join_settings()[0],
+                                                                  {}).items()
 
     def test_empty_result(self):
         assert load_series_information(['DEBW107'], "traffic", None, join_settings()[0], {}) == {}
@@ -137,4 +138,3 @@ class TestCreateUrl:
     def test_none_kwargs(self):
         url = create_url("www.base2.edu/", "testingservice", mood="sad", happiness=None, stress_factor=100)
         assert url == "www.base2.edu/testingservice/?mood=sad&stress_factor=100"
-
diff --git a/test/test_model_modules/test_advanced_paddings.py b/test/test_model_modules/test_advanced_paddings.py
index bbeaf1c7..8c7cae91 100644
--- a/test/test_model_modules/test_advanced_paddings.py
+++ b/test/test_model_modules/test_advanced_paddings.py
@@ -69,10 +69,10 @@ class TestPadUtils:
     ##################################################################################
 
     def test_check_padding_format_negative_pads(self):
-
         with pytest.raises(ValueError) as einfo:
             PadUtils.check_padding_format((-2, 1))
-        assert "The `1st entry of padding` argument must be >= 0. Received: -2 of type <class 'int'>" in str(einfo.value)
+        assert "The `1st entry of padding` argument must be >= 0. Received: -2 of type <class 'int'>" in str(
+            einfo.value)
 
         with pytest.raises(ValueError) as einfo:
             PadUtils.check_padding_format((1, -1))
@@ -198,15 +198,18 @@ class TestReflectionPadding2D:
     def test_init_tuple_of_negative_int(self):
         with pytest.raises(ValueError) as einfo:
             ReflectionPadding2D(padding=(-1, 1))
-        assert "The `1st entry of padding` argument must be >= 0. Received: -1 of type <class 'int'>" in str(einfo.value)
+        assert "The `1st entry of padding` argument must be >= 0. Received: -1 of type <class 'int'>" in str(
+            einfo.value)
 
         with pytest.raises(ValueError) as einfo:
             ReflectionPadding2D(padding=(1, -2))
-        assert "The `2nd entry of padding` argument must be >= 0. Received: -2 of type <class 'int'>" in str(einfo.value)
+        assert "The `2nd entry of padding` argument must be >= 0. Received: -2 of type <class 'int'>" in str(
+            einfo.value)
 
         with pytest.raises(ValueError) as einfo:
             ReflectionPadding2D(padding=(-1, -2))
-        assert "The `1st entry of padding` argument must be >= 0. Received: -1 of type <class 'int'>" in str(einfo.value)
+        assert "The `1st entry of padding` argument must be >= 0. Received: -1 of type <class 'int'>" in str(
+            einfo.value)
 
     def test_init_tuple_of_invalid_format_float(self):
         with pytest.raises(ValueError) as einfo:
@@ -434,7 +437,6 @@ class TestPadding2D:
             'ZeroPad2D': ZeroPadding2D, 'ZeroPadding2D': ZeroPadding2D
         }
 
-
     def test_check_and_get_padding_zero_padding(self):
         assert Padding2D('ZeroPad2D')._check_and_get_padding() == ZeroPadding2D
         assert Padding2D('ZeroPadding2D')._check_and_get_padding() == ZeroPadding2D
@@ -450,14 +452,14 @@ class TestPadding2D:
         assert Padding2D('ReflectionPadding2D')._check_and_get_padding() == ReflectionPadding2D
         assert Padding2D(ReflectionPadding2D)._check_and_get_padding() == ReflectionPadding2D
 
-    def test_check_and_get_padding_raises(self,):
+    def test_check_and_get_padding_raises(self, ):
         with pytest.raises(NotImplementedError) as einfo:
             Padding2D('FalsePadding2D')._check_and_get_padding()
         assert "`'FalsePadding2D'' is not implemented as padding. " \
                "Use one of those: i) `RefPad2D', ii) `SymPad2D', iii) `ZeroPad2D'" in str(einfo.value)
         with pytest.raises(TypeError) as einfo:
             Padding2D(keras.layers.Conv2D)._check_and_get_padding()
-        assert "`Conv2D' is not a valid padding layer type. Use one of those: "\
+        assert "`Conv2D' is not a valid padding layer type. Use one of those: " \
                "i) ReflectionPadding2D, ii) SymmetricPadding2D, iii) ZeroPadding2D" in str(einfo.value)
 
     @pytest.mark.parametrize("pad_type", ["SymPad2D", "SymmetricPadding2D", SymmetricPadding2D,
@@ -469,9 +471,8 @@ class TestPadding2D:
             layer_name = pad_type.__name__
         else:
             layer_name = pad_type
-        pd_ap = pd(padding=(1,2), name=f"{layer_name}_layer")(input_x)
+        pd_ap = pd(padding=(1, 2), name=f"{layer_name}_layer")(input_x)
         assert pd_ap._keras_history[0].input_shape == (None, 32, 32, 3)
         assert pd_ap._keras_history[0].output_shape == (None, 34, 36, 3)
         assert pd_ap._keras_history[0].padding == ((1, 1), (2, 2))
         assert pd_ap._keras_history[0].name == f"{layer_name}_layer"
-
diff --git a/test/test_model_modules/test_inception_model.py b/test/test_model_modules/test_inception_model.py
index e5e92158..ca0126a4 100644
--- a/test/test_model_modules/test_inception_model.py
+++ b/test/test_model_modules/test_inception_model.py
@@ -1,9 +1,9 @@
 import keras
 import pytest
 
-from src.model_modules.inception_model import InceptionModelBase
-from src.model_modules.advanced_paddings import ReflectionPadding2D, SymmetricPadding2D
 from src.helpers import PyTestRegex
+from src.model_modules.advanced_paddings import ReflectionPadding2D, SymmetricPadding2D
+from src.model_modules.inception_model import InceptionModelBase
 
 
 class TestInceptionModelBase:
diff --git a/test/test_model_modules/test_keras_extensions.py b/test/test_model_modules/test_keras_extensions.py
index 35188933..56c60ec4 100644
--- a/test/test_model_modules/test_keras_extensions.py
+++ b/test/test_model_modules/test_keras_extensions.py
@@ -1,10 +1,10 @@
+import os
+
 import keras
-import numpy as np
-import pytest
 import mock
-import os
+import pytest
 
-from src.helpers import l_p_loss
+from src.model_modules.loss import l_p_loss
 from src.model_modules.keras_extensions import *
 
 
@@ -70,12 +70,13 @@ class TestModelCheckpointAdvanced:
     def callbacks(self):
         callbacks_name = os.path.join(os.path.dirname(__file__), "callback_%s")
         return [{"callback": LearningRateDecay(), "path": callbacks_name % "lr"},
-                     {"callback": HistoryAdvanced(), "path": callbacks_name % "hist"}]
+                {"callback": HistoryAdvanced(), "path": callbacks_name % "hist"}]
 
     @pytest.fixture
     def ckpt(self, callbacks):
         ckpt_name = "ckpt.test"
-        return ModelCheckpointAdvanced(filepath=ckpt_name, monitor='val_loss', save_best_only=True, callbacks=callbacks, verbose=1)
+        return ModelCheckpointAdvanced(filepath=ckpt_name, monitor='val_loss', save_best_only=True, callbacks=callbacks,
+                                       verbose=1)
 
     def test_init(self, ckpt, callbacks):
         assert ckpt.callbacks == callbacks
@@ -185,7 +186,6 @@ class TestCallbackHandler:
             clbk_handler.add_callback("callback_new_instance", "this_path")
         assert 'CallbackHandler is protected and cannot be edited.' in str(einfo.value)
 
-
     def test_get_callbacks_as_dict(self, clbk_handler_with_dummies):
         clbk = clbk_handler_with_dummies
         assert clbk.get_callbacks() == [{"callback": "callback_new_instance", "path": "this_path"},
diff --git a/test/test_model_modules/test_linear_model.py b/test/test_model_modules/test_linear_model.py
index e4e10e9d..0fab7ae3 100644
--- a/test/test_model_modules/test_linear_model.py
+++ b/test/test_model_modules/test_linear_model.py
@@ -1,7 +1,3 @@
-
-from src.model_modules.linear_model import OrdinaryLeastSquaredModel
-
-
 class TestOrdinaryLeastSquareModel:
 
     def test_constant_input_variable(self):
diff --git a/test/test_model_modules/test_loss.py b/test/test_model_modules/test_loss.py
new file mode 100644
index 00000000..c47f3f18
--- /dev/null
+++ b/test/test_model_modules/test_loss.py
@@ -0,0 +1,17 @@
+import keras
+import numpy as np
+
+from src.model_modules.loss import l_p_loss
+
+
+class TestLoss:
+
+    def test_l_p_loss(self):
+        model = keras.Sequential()
+        model.add(keras.layers.Lambda(lambda x: x, input_shape=(None,)))
+        model.compile(optimizer=keras.optimizers.Adam(), loss=l_p_loss(2))
+        hist = model.fit(np.array([1, 0, 2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
+        assert hist.history['loss'][0] == 1.25
+        model.compile(optimizer=keras.optimizers.Adam(), loss=l_p_loss(3))
+        hist = model.fit(np.array([1, 0, -2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
+        assert hist.history['loss'][0] == 2.25
\ No newline at end of file
diff --git a/test/test_model_modules/test_model_class.py b/test/test_model_modules/test_model_class.py
index cee03174..eab8f8d1 100644
--- a/test/test_model_modules/test_model_class.py
+++ b/test/test_model_modules/test_model_class.py
@@ -2,7 +2,7 @@ import keras
 import pytest
 
 from src.model_modules.model_class import AbstractModelClass
-from src.model_modules.model_class import MyPaperModel, MyTowerModel, MyLittleModel, MyBranchedModel
+from src.model_modules.model_class import MyPaperModel
 
 
 class Paddings:
@@ -93,4 +93,3 @@ class TestMyPaperModel:
 
     def test_set_loss(self, mpm):
         assert callable(mpm.loss) or (len(mpm.loss) > 0)
-
diff --git a/test/test_modules/test_experiment_setup.py b/test/test_modules/test_experiment_setup.py
index a3a83acf..dd1bda1b 100644
--- a/test/test_modules/test_experiment_setup.py
+++ b/test/test_modules/test_experiment_setup.py
@@ -4,7 +4,8 @@ import os
 
 import pytest
 
-from src.helpers import TimeTracking, prepare_host
+from src.helpers import TimeTracking
+from src.configuration.path_config import prepare_host
 from src.run_modules.experiment_setup import ExperimentSetup
 
 
diff --git a/test/test_modules/test_model_setup.py b/test/test_modules/test_model_setup.py
index 9ff7494f..b91a4327 100644
--- a/test/test_modules/test_model_setup.py
+++ b/test/test_modules/test_model_setup.py
@@ -105,4 +105,3 @@ class TestModelSetup:
 
     def test_init(self):
         pass
-
diff --git a/test/test_modules/test_training.py b/test/test_modules/test_training.py
index 31c673f0..bb319fe9 100644
--- a/test/test_modules/test_training.py
+++ b/test/test_modules/test_training.py
@@ -7,7 +7,7 @@ import shutil
 import keras
 import mock
 import pytest
-from keras.callbacks import ModelCheckpoint, History
+from keras.callbacks import History
 
 from src.data_handling.data_distributor import Distributor
 from src.data_handling.data_generator import DataGenerator
@@ -178,7 +178,8 @@ class TestTraining:
         assert all([getattr(init_without_run, f"{obj}_set") is None for obj in sets])
         init_without_run.set_generators()
         assert not all([getattr(init_without_run, f"{obj}_set") is None for obj in sets])
-        assert all([getattr(init_without_run, f"{obj}_set").generator.return_value == f"mock_{obj}_gen" for obj in sets])
+        assert all(
+            [getattr(init_without_run, f"{obj}_set").generator.return_value == f"mock_{obj}_gen" for obj in sets])
 
     def test_train(self, ready_to_train, path):
         assert not hasattr(ready_to_train.model, "history")
@@ -193,7 +194,8 @@ class TestTraining:
         model_name = "test_model.h5"
         assert model_name not in os.listdir(path)
         init_without_run.save_model()
-        assert caplog.record_tuples[0] == ("root", 10, PyTestRegex(f"save best model to {os.path.join(path, model_name)}"))
+        assert caplog.record_tuples[0] == (
+        "root", 10, PyTestRegex(f"save best model to {os.path.join(path, model_name)}"))
         assert model_name in os.listdir(path)
 
     def test_load_best_model_no_weights(self, init_without_run, caplog):
diff --git a/test/test_plotting/test_training_monitoring.py b/test/test_plotting/test_training_monitoring.py
index 7e4e21c1..6e5e0abb 100644
--- a/test/test_plotting/test_training_monitoring.py
+++ b/test/test_plotting/test_training_monitoring.py
@@ -94,7 +94,6 @@ class TestPlotModelHistory:
         assert "hist_additional.pdf" in os.listdir(path)
 
 
-
 class TestPlotModelLearningRate:
 
     @pytest.fixture
diff --git a/test/test_statistics.py b/test/test_statistics.py
index cad91556..6e981faf 100644
--- a/test/test_statistics.py
+++ b/test/test_statistics.py
@@ -3,7 +3,7 @@ import pandas as pd
 import pytest
 import xarray as xr
 
-from src.statistics import standardise, standardise_inverse, standardise_apply, centre, centre_inverse, centre_apply,\
+from src.statistics import standardise, standardise_inverse, standardise_apply, centre, centre_inverse, centre_apply, \
     apply_inverse_transformation
 
 lazy = pytest.lazy_fixture
-- 
GitLab