diff --git a/docs/_source/defaults.rst b/docs/_source/defaults.rst
index 775134f5761b27cbdc6927efaf3e3d6fa1dd68cf..e95cf10eb8b53e776a2607dafba52fd1edad98ca 100644
--- a/docs/_source/defaults.rst
+++ b/docs/_source/defaults.rst
@@ -17,6 +17,7 @@ create_new_model
 data_handler
 data_origin
 data_path
+debug                             -               MLAir checks if it is running in debug mode and stores this
 dimensions
 end
 epochs
@@ -57,6 +58,7 @@ train_start
 transformation                    :py:`{}`        implement all further transformation functionality
                                                   inside your custom data handler
 use_all_stations_on_all_data_sets
+use_multiprocessing               :py:`True`      is set to False if MLAir is running in debug mode
 upsampling
 val_end
 val_min_length
diff --git a/mlair/configuration/defaults.py b/mlair/configuration/defaults.py
index 04e441fe2ec3b421cf5f0ad1469584f5ef2aa668..8805acfc99d2064b656e3fc80c95a6de198acf29 100644
--- a/mlair/configuration/defaults.py
+++ b/mlair/configuration/defaults.py
@@ -53,6 +53,7 @@ DEFAULT_SAMPLING = "daily"
 DEFAULT_DATA_ORIGIN = {"cloudcover": "REA", "humidity": "REA", "pblheight": "REA", "press": "REA", "relhum": "REA",
                        "temp": "REA", "totprecip": "REA", "u": "REA", "v": "REA", "no": "", "no2": "", "o3": "",
                        "pm10": "", "so2": ""}
+DEFAULT_USE_MULTIPROCESSING = True
 
 
 def get_defaults():
diff --git a/mlair/data_handler/default_data_handler.py b/mlair/data_handler/default_data_handler.py
index 2eceff328bf696bd954ec6649c78db03173c9bdb..87fc83b0c5d97631b9b0e01aa490be20c107ed1f 100644
--- a/mlair/data_handler/default_data_handler.py
+++ b/mlair/data_handler/default_data_handler.py
@@ -39,7 +39,8 @@ class DefaultDataHandler(AbstractDataHandler):
 
     def __init__(self, id_class: data_handler, experiment_path: str, min_length: int = 0,
                  extreme_values: num_or_list = None, extremes_on_right_tail_only: bool = False, name_affix=None,
-                 store_processed_data=True, iter_dim=DEFAULT_ITER_DIM, time_dim=DEFAULT_TIME_DIM):
+                 store_processed_data=True, iter_dim=DEFAULT_ITER_DIM, time_dim=DEFAULT_TIME_DIM,
+                 use_multiprocessing=True):
         super().__init__()
         self.id_class = id_class
         self.time_dim = time_dim
@@ -49,6 +50,7 @@ class DefaultDataHandler(AbstractDataHandler):
         self._Y = None
         self._X_extreme = None
         self._Y_extreme = None
+        self._use_multiprocessing = use_multiprocessing
         _name_affix = str(f"{str(self.id_class)}_{name_affix}" if name_affix is not None else id(self))
         self._save_file = os.path.join(experiment_path, "data", f"{_name_affix}.pickle")
         self._collection = self._create_collection()
@@ -286,7 +288,7 @@ class DefaultDataHandler(AbstractDataHandler):
                             new = opts.get(k)
                             transformation_dict[i][var][k] = new if old is None else old.combine_first(new)
 
-        if multiprocessing.cpu_count() > 1:  # parallel solution
+        if multiprocessing.cpu_count() > 1 and kwargs.get("use_multiprocessing", True) is True:  # parallel solution
             logging.info("use parallel transformation approach")
             pool = multiprocessing.Pool(
                 min([psutil.cpu_count(logical=False), len(set_stations), 16]))  # use only physical cpus
diff --git a/mlair/run_modules/experiment_setup.py b/mlair/run_modules/experiment_setup.py
index 30672ecc9206319896205d886157b2f2f8977f39..c777bcc4d568862485d733ca42f2ea38c52799eb 100644
--- a/mlair/run_modules/experiment_setup.py
+++ b/mlair/run_modules/experiment_setup.py
@@ -4,6 +4,7 @@ __date__ = '2019-11-15'
 import argparse
 import logging
 import os
+import sys
 from typing import Union, Dict, Any, List, Callable
 
 from mlair.configuration import path_config
@@ -17,7 +18,8 @@ from mlair.configuration.defaults import DEFAULT_STATIONS, DEFAULT_VAR_ALL_DICT,
     DEFAULT_TRAIN_START, DEFAULT_TRAIN_END, DEFAULT_TRAIN_MIN_LENGTH, DEFAULT_VAL_START, DEFAULT_VAL_END, \
     DEFAULT_VAL_MIN_LENGTH, DEFAULT_TEST_START, DEFAULT_TEST_END, DEFAULT_TEST_MIN_LENGTH, DEFAULT_TRAIN_VAL_MIN_LENGTH, \
     DEFAULT_USE_ALL_STATIONS_ON_ALL_DATA_SETS, DEFAULT_EVALUATE_BOOTSTRAPS, DEFAULT_CREATE_NEW_BOOTSTRAPS, \
-    DEFAULT_NUMBER_OF_BOOTSTRAPS, DEFAULT_PLOT_LIST, DEFAULT_SAMPLING, DEFAULT_DATA_ORIGIN, DEFAULT_ITER_DIM
+    DEFAULT_NUMBER_OF_BOOTSTRAPS, DEFAULT_PLOT_LIST, DEFAULT_SAMPLING, DEFAULT_DATA_ORIGIN, DEFAULT_ITER_DIM, \
+    DEFAULT_USE_MULTIPROCESSING
 from mlair.data_handler import DefaultDataHandler
 from mlair.run_modules.run_environment import RunEnvironment
 from mlair.model_modules.fully_connected_networks import FCN_64_32_16 as VanillaModel
@@ -62,48 +64,6 @@ class ExperimentSetup(RunEnvironment):
         * `target_dim` [.]
         * `window_lead_time` [.]
 
-        # interpolation
-        self._set_param("dimensions", dimensions, default={'new_index': ['datetime', 'Stations']})
-        self._set_param("time_dim", time_dim, default='datetime')
-        self._set_param("interpolation_method", interpolation_method, default='linear')
-        self._set_param("limit_nan_fill", limit_nan_fill, default=1)
-
-        # train set parameters
-        self._set_param("start", train_start, default="1997-01-01", scope="train")
-        self._set_param("end", train_end, default="2007-12-31", scope="train")
-        self._set_param("min_length", train_min_length, default=90, scope="train")
-
-        # validation set parameters
-        self._set_param("start", val_start, default="2008-01-01", scope="val")
-        self._set_param("end", val_end, default="2009-12-31", scope="val")
-        self._set_param("min_length", val_min_length, default=90, scope="val")
-
-        # test set parameters
-        self._set_param("start", test_start, default="2010-01-01", scope="test")
-        self._set_param("end", test_end, default="2017-12-31", scope="test")
-        self._set_param("min_length", test_min_length, default=90, scope="test")
-
-        # train_val set parameters
-        self._set_param("start", self.data_store.get("start", "train"), scope="train_val")
-        self._set_param("end", self.data_store.get("end", "val"), scope="train_val")
-        train_val_min_length = sum([self.data_store.get("min_length", s) for s in ["train", "val"]])
-        self._set_param("min_length", train_val_min_length, default=180, scope="train_val")
-
-        # use all stations on all data sets (train, val, test)
-        self._set_param("use_all_stations_on_all_data_sets", use_all_stations_on_all_data_sets, default=True)
-
-        # set post-processing instructions
-        self._set_param("evaluate_bootstraps", evaluate_bootstraps, scope="general.postprocessing")
-        create_new_bootstraps = max([self.data_store.get("train_model", "general"), create_new_bootstraps or False])
-        self._set_param("create_new_bootstraps", create_new_bootstraps, scope="general.postprocessing")
-        self._set_param("number_of_bootstraps", number_of_bootstraps, default=20, scope="general.postprocessing")
-        self._set_param("plot_list", plot_list, default=DEFAULT_PLOT_LIST, scope="general.postprocessing")
-
-        # check variables, statistics and target variable
-        self._check_target_var()
-        self._compare_variables_and_statistics()
-
-
     Creates
         * plot of model architecture in `<model_name>.pdf`
 
@@ -135,8 +95,11 @@ class ExperimentSetup(RunEnvironment):
         predicted.
     :param dimensions:
     :param time_dim:
-    :param interpolation_method:
-    :param limit_nan_fill:
+    :param interpolation_method: The method to use for interpolation.
+    :param interpolation_limit: The maximum number of subsequent time steps in a gap to fill by interpolation. If the
+        gap exceeds this number, the gap is not filled by interpolation at all. The value of time steps is an arbitrary
+        number that is applied depending on the `sampling` frequency. A limit of 2 means that either 2 hours or 2 days
+        are allowed to be interpolated in dependency of the set sampling rate.
     :param train_start:
     :param train_end:
     :param val_start:
@@ -197,6 +160,29 @@ class ExperimentSetup(RunEnvironment):
     :param data_path: path to find and store meteorological and environmental / air quality data. Leave this parameter
         empty, if your host system is known and a suitable path was already hardcoded in the program (see
         :py:func:`prepare host <src.configuration.path_config.prepare_host>`).
+    :param experiment_date:
+    :param window_dim: "Temporal" dimension of the input and target data, that is provided for each sample. The number
+        of samples provided in this dimension can be set using `window_history_size` for inputs and `window_lead_time`
+        on target site.
+    :param iter_dim:
+    :param batch_path:
+    :param login_nodes:
+    :param hpc_hosts:
+    :param model:
+    :param batch_size:
+    :param epochs: Number of epochs used in training. If a training is resumed and the number of epochs of the already
+        (partly) trained model is lower than this parameter, training is continue. In case this number is higher than
+        the given epochs parameter, no training is resumed. Epochs is set to 20 per default, but this value is just a
+        placeholder that should be adjusted for a meaningful training.
+    :param data_handler:
+    :param data_origin:
+    :param competitors: Provide names of reference models trained by MLAir that can be found in the `competitor_path`.
+        These models will be used in the postprocessing for comparison.
+    :param competitor_path: The path where MLAir can find competing models. If not provided, this path is assumed to be
+        in the ´data_path´ directory as a subdirectory called `competitors` (default).
+    :param use_multiprocessing: Enable parallel preprocessing (postprocessing not implemented yet) by setting this
+        parameter to `True` (default). If set to `False` the computation is performed in an serial approach.
+        Multiprocessing is disabled when running in debug mode and cannot be switched on.
 
     """
 
@@ -228,7 +214,8 @@ class ExperimentSetup(RunEnvironment):
                  number_of_bootstraps=None,
                  create_new_bootstraps=None, data_path: str = None, batch_path: str = None, login_nodes=None,
                  hpc_hosts=None, model=None, batch_size=None, epochs=None, data_handler=None,
-                 data_origin: Dict = None, competitors: list = None, competitor_path: str = None, **kwargs):
+                 data_origin: Dict = None, competitors: list = None, competitor_path: str = None,
+                 use_multiprocessing: bool = None, **kwargs):
 
         # create run framework
         super().__init__()
@@ -265,6 +252,12 @@ class ExperimentSetup(RunEnvironment):
         logging.info(f"Experiment path is: {experiment_path}")
         path_config.check_path_and_create(self.data_store.get("experiment_path"))
 
+        # host system setup
+        debug_mode = sys.gettrace() is not None
+        self._set_param("debug_mode", debug_mode)
+        use_multiprocessing = False if debug_mode is True else use_multiprocessing
+        self._set_param("use_multiprocessing", use_multiprocessing, default=DEFAULT_USE_MULTIPROCESSING)
+
         # batch path (temporary)
         self._set_param("batch_path", batch_path, default=os.path.join(experiment_path, "batch_data"))
 
diff --git a/mlair/run_modules/pre_processing.py b/mlair/run_modules/pre_processing.py
index 813873b8181fcb78917c5ef4e697da63b2941845..f59a4e89ced738c9198619ec0d117df2edf3ee93 100644
--- a/mlair/run_modules/pre_processing.py
+++ b/mlair/run_modules/pre_processing.py
@@ -241,8 +241,9 @@ class PreProcessing(RunEnvironment):
         collection = DataCollection(name=set_name)
         valid_stations = []
         kwargs = self.data_store.create_args_dict(data_handler.requirements(), scope=set_name)
+        use_multiprocessing = self.data_store.get("use_multiprocessing")
 
-        if multiprocessing.cpu_count() > 1:  # parallel solution
+        if multiprocessing.cpu_count() > 1 and use_multiprocessing:  # parallel solution
             logging.info("use parallel validate station approach")
             pool = multiprocessing.Pool(
                 min([psutil.cpu_count(logical=False), len(set_stations), 16]))  # use only physical cpus