diff --git a/CI/run_pytest.sh b/CI/run_pytest.sh
index 5547b7ab2715e59c123056e56def989bdefdcfeb..a7d883dcc95e0c16541af00ed0891e2d31dee82c 100644
--- a/CI/run_pytest.sh
+++ b/CI/run_pytest.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# run pytest for all modules
+# run pytest for all run_modules
 python3 -m pytest --html=report.html --self-contained-html test/ | tee test_results.out
 
 IS_FAILED=$?
diff --git a/requirements.txt b/requirements.txt
index 7ac250ac9417cd6bf5b58a9538df874808c77310..e8ac7eb75d866b27d72af51fae31f417c471dd05 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,4 +10,5 @@ pytest-cov
 pytest-html
 pydot
 mock
+statsmodels
 matplotlib
diff --git a/run.py b/run.py
index e45b2dd6dc3da47c1febe46a387a45014db2772d..03eda04280a19e5c2bb9f1743c40f07e9e3fd2cc 100644
--- a/run.py
+++ b/run.py
@@ -5,12 +5,12 @@ __date__ = '2019-11-14'
 import logging
 import argparse
 
-from src.modules.experiment_setup import ExperimentSetup
-from src.modules.run_environment import RunEnvironment
-from src.modules.pre_processing import PreProcessing
-from src.modules.model_setup import ModelSetup
-from src.modules.training import Training
-from src.modules.modules import PostProcessing
+from src.run_modules.experiment_setup import ExperimentSetup
+from src.run_modules.run_environment import RunEnvironment
+from src.run_modules.pre_processing import PreProcessing
+from src.run_modules.model_setup import ModelSetup
+from src.run_modules.training import Training
+from src.run_modules.post_processing import PostProcessing
 
 
 def main(parser_args):
diff --git a/src/data_handling/data_distributor.py b/src/data_handling/data_distributor.py
index 77f83536db5eaed3545d609e1d33a042c7ad23dd..74df5f6ac1c998e644fa7d89a688fc12dee21265 100644
--- a/src/data_handling/data_distributor.py
+++ b/src/data_handling/data_distributor.py
@@ -1,4 +1,8 @@
 from __future__ import generator_stop
+
+__author__ = "Lukas Leufen, Felix Kleinert"
+__date__ = '2019-12-05'
+
 import math
 
 import keras
diff --git a/src/data_handling/data_preparation.py b/src/data_handling/data_preparation.py
index badd75aa709e108c2516ccf24a3de915c77ca258..db800f5e50146c4f73cde643f154bcb1a5047437 100644
--- a/src/data_handling/data_preparation.py
+++ b/src/data_handling/data_preparation.py
@@ -249,6 +249,17 @@ class DataPrep(object):
         else:
             self.inverse_transform()
 
+    def get_transformation_information(self, variable):
+        try:
+            mean = self.mean.sel({'variables': variable}).values
+        except AttributeError:
+            mean = None
+        try:
+            std = self.std.sel({'variables': variable}).values
+        except AttributeError:
+            std = None
+        return mean, std, self._transform_method
+
     def make_history_window(self, dim: str, window: int) -> None:
         """
         This function uses shifts the data window+1 times and returns a xarray which has a new dimension 'window'
diff --git a/src/datastore.py b/src/datastore.py
index d14ae07d70f44b6c62987ee8c3fcbce8eee0ce46..92623baf7c01f8199f653b7220db77a931986708 100644
--- a/src/datastore.py
+++ b/src/datastore.py
@@ -30,7 +30,7 @@ class EmptyScope(Exception):
 class AbstractDataStore(ABC):
 
     """
-    Data store for all settings for the experiment workflow to save experiment parameters for the proceeding modules
+    Data store for all settings for the experiment workflow to save experiment parameters for the proceeding run_modules
     and predefine parameters loaded during the experiment setup phase. The data store is hierarchically structured, so
     that global settings can be overwritten by local adjustments.
     """
@@ -106,7 +106,7 @@ class AbstractDataStore(ABC):
 class DataStoreByVariable(AbstractDataStore):
 
     """
-    Data store for all settings for the experiment workflow to save experiment parameters for the proceeding modules
+    Data store for all settings for the experiment workflow to save experiment parameters for the proceeding run_modules
     and predefine parameters loaded during the experiment setup phase. The data store is hierarchically structured, so
     that global settings can be overwritten by local adjustments.
 
@@ -230,7 +230,7 @@ class DataStoreByVariable(AbstractDataStore):
 class DataStoreByScope(AbstractDataStore):
 
     """
-    Data store for all settings for the experiment workflow to save experiment parameters for the proceeding modules
+    Data store for all settings for the experiment workflow to save experiment parameters for the proceeding run_modules
     and predefine parameters loaded during the experiment setup phase. The data store is hierarchically structured, so
     that global settings can be overwritten by local adjustments.
 
diff --git a/src/helpers.py b/src/helpers.py
index 2ef776898e35a16b0bfd54b5984864c740dbf341..40a3f9762cd649651631e45d94b78c19562b9749 100644
--- a/src/helpers.py
+++ b/src/helpers.py
@@ -1,6 +1,6 @@
 import re
 
-__author__ = 'Lukas Leufen'
+__author__ = 'Lukas Leufen, Felix Kleinert'
 __date__ = '2019-10-21'
 
 
diff --git a/src/modules/__init__.py b/src/model_modules/__init__.py
similarity index 100%
rename from src/modules/__init__.py
rename to src/model_modules/__init__.py
diff --git a/src/flatten.py b/src/model_modules/flatten.py
similarity index 96%
rename from src/flatten.py
rename to src/model_modules/flatten.py
index 1166cf328ee2c8326b0628c4a93184a2dece16fe..707b0d4421fd506d16f93d7c56406abc878ab9f1 100644
--- a/src/flatten.py
+++ b/src/model_modules/flatten.py
@@ -1,4 +1,4 @@
-__author__ = "Lukas Leufen"
+__author__ = "Felix Kleinert, Lukas Leufen"
 __date__ = '2019-12-02'
 
 import keras
diff --git a/src/inception_model.py b/src/model_modules/inception_model.py
similarity index 100%
rename from src/inception_model.py
rename to src/model_modules/inception_model.py
diff --git a/src/model_modules/linear_model.py b/src/model_modules/linear_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..17a9b2326ab6ba1829ee4f65f0161de887e70778
--- /dev/null
+++ b/src/model_modules/linear_model.py
@@ -0,0 +1,46 @@
+__author__ = "Felix Kleinert, Lukas Leufen"
+__date__ = '2019-12-11'
+
+
+import statsmodels.api as sm
+import numpy as np
+
+
+class OrdinaryLeastSquaredModel:
+
+    def __init__(self, generator):
+        self.x = []
+        self.y = []
+        self.generator = generator
+        self.model = self.train_ols_model_from_generator()
+
+    def train_ols_model_from_generator(self):
+        self.set_x_y_from_generator()
+        self.x = sm.add_constant(self.x)
+        return self.ordinary_least_squared_model(self.x, self.y)
+
+    def set_x_y_from_generator(self):
+        data_x = None
+        data_y = None
+        for item in self.generator:
+            x = self.reshape_xarray_to_numpy(item[0])
+            y = item[1].values
+            data_x = np.concatenate((data_x, x), axis=0) if data_x is not None else x
+            data_y = np.concatenate((data_y, y), axis=0) if data_y is not None else y
+        self.x = data_x
+        self.y = data_y
+
+    def predict(self, data):
+        data = sm.add_constant(self.reshape_xarray_to_numpy(data))
+        return self.model.predict(data)
+
+    @staticmethod
+    def reshape_xarray_to_numpy(data):
+        shape = data.values.shape
+        res = data.values.reshape(shape[0], shape[1] * shape[3])
+        return res
+
+    @staticmethod
+    def ordinary_least_squared_model(x, y):
+        ols_model = sm.OLS(y, x)
+        return ols_model.fit()
diff --git a/src/run_modules/__init__.py b/src/run_modules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/modules/experiment_setup.py b/src/run_modules/experiment_setup.py
similarity index 93%
rename from src/modules/experiment_setup.py
rename to src/run_modules/experiment_setup.py
index f726c66e116d3bf978281805915f571a50f0cf2f..b0c628b8966aa7cfedbf389552a9893acca297fa 100644
--- a/src/modules/experiment_setup.py
+++ b/src/run_modules/experiment_setup.py
@@ -1,4 +1,4 @@
-__author__ = "Lukas Leufen"
+__author__ = "Lukas Leufen, Felix Kleinert"
 __date__ = '2019-11-15'
 
 
@@ -8,7 +8,7 @@ from typing import Union, Dict, Any
 import os
 
 from src import helpers
-from src.modules.run_environment import RunEnvironment
+from src.run_modules.run_environment import RunEnvironment
 
 
 DEFAULT_STATIONS = ['DEBW107', 'DEBY081', 'DEBW013', 'DEBW076', 'DEBW087', 'DEBY052', 'DEBY032', 'DEBW022', 'DEBY004',
@@ -33,7 +33,7 @@ class ExperimentSetup(RunEnvironment):
                  window_lead_time=None, dimensions=None, interpolate_dim=None, interpolate_method=None,
                  limit_nan_fill=None, train_start=None, train_end=None, val_start=None, val_end=None, test_start=None,
                  test_end=None, use_all_stations_on_all_data_sets=True, trainable=False, fraction_of_train=None,
-                 experiment_path=None, plot_path=None):
+                 experiment_path=None, plot_path=None, forecast_path=None):
 
         # create run framework
         super().__init__()
@@ -49,10 +49,17 @@ class ExperimentSetup(RunEnvironment):
         self._set_param("experiment_name", exp_name)
         self._set_param("experiment_path", exp_path)
         helpers.check_path_and_create(self.data_store.get("experiment_path", "general"))
+
+        # set plot path
         default_plot_path = os.path.join(exp_path, "plots")
         self._set_param("plot_path", plot_path, default=default_plot_path)
         helpers.check_path_and_create(self.data_store.get("plot_path", "general"))
 
+        # set results path
+        default_forecast_path = os.path.join(exp_path, "forecasts")
+        self._set_param("forecast_path", forecast_path, default_forecast_path)
+        helpers.check_path_and_create(self.data_store.get("forecast_path", "general"))
+
         # setup for data
         self._set_param("var_all_dict", var_all_dict, default=DEFAULT_VAR_ALL_DICT)
         self._set_param("stations", stations, default=DEFAULT_STATIONS)
diff --git a/src/modules/model_setup.py b/src/run_modules/model_setup.py
similarity index 96%
rename from src/modules/model_setup.py
rename to src/run_modules/model_setup.py
index a62b53b86651109c4c1dd10d4a7dfccbaf3cf9c2..812f639c8f18725e1e7c4decd03d5f2b61578fc4 100644
--- a/src/modules/model_setup.py
+++ b/src/run_modules/model_setup.py
@@ -1,4 +1,4 @@
-__author__ = "Lukas Leufen"
+__author__ = "Lukas Leufen, Felix Kleinert"
 __date__ = '2019-12-02'
 
 
@@ -6,16 +6,15 @@ import keras
 from keras import losses
 from keras.callbacks import ModelCheckpoint
 from keras.regularizers import l2
-from keras.optimizers import Adam, SGD
+from keras.optimizers import SGD
 import tensorflow as tf
 import logging
 import os
 
-from src.modules.run_environment import RunEnvironment
+from src.run_modules.run_environment import RunEnvironment
 from src.helpers import l_p_loss, LearningRateDecay
-from src.inception_model import InceptionModelBase
-from src.flatten import flatten_tail
-from src.model_modules.model_class import MyLittleModel
+from src.model_modules.inception_model import InceptionModelBase
+from src.model_modules.flatten import flatten_tail
 
 
 class ModelSetup(RunEnvironment):
diff --git a/src/modules/modules.py b/src/run_modules/modules.py
similarity index 82%
rename from src/modules/modules.py
rename to src/run_modules/modules.py
index 888c7e06f0ef34b17f6c3f2fc2da6fe0316282f4..5f0f12c19a87de7ba4ad1e0508906e75d1605563 100644
--- a/src/modules/modules.py
+++ b/src/run_modules/modules.py
@@ -1,9 +1,9 @@
 import logging
 import argparse
 
-from src.modules.run_environment import RunEnvironment
-from src.modules.experiment_setup import ExperimentSetup
-from src.modules.pre_processing import PreProcessing
+from src.run_modules.run_environment import RunEnvironment
+from src.run_modules.experiment_setup import ExperimentSetup
+from src.run_modules.pre_processing import PreProcessing
 
 
 class Training(RunEnvironment):
diff --git a/src/run_modules/post_processing.py b/src/run_modules/post_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..35d93dcbd932d1c298c0744fcd0205697576bb4c
--- /dev/null
+++ b/src/run_modules/post_processing.py
@@ -0,0 +1,165 @@
+__author__ = "Lukas Leufen, Felix Kleinert"
+__date__ = '2019-12-11'
+
+
+import logging
+import os
+
+import numpy as np
+import pandas as pd
+import xarray as xr
+import statsmodels.api as sm
+
+from src.run_modules.run_environment import RunEnvironment
+from src.data_handling.data_distributor import Distributor
+from src.model_modules.linear_model import OrdinaryLeastSquaredModel
+from src import statistics
+from src import helpers
+from src.helpers import TimeTracking
+
+
+class PostProcessing(RunEnvironment):
+
+    def __init__(self):
+        super().__init__()
+        self.model = self.data_store.get("best_model", "general")
+        self.ols_model = None
+        self.batch_size = self.data_store.get("batch_size", "general.model")
+        self.test_data = self.data_store.get("generator", "general.test")
+        self.test_data_distributed = Distributor(self.test_data, self.model, self.batch_size)
+        self.train_data = self.data_store.get("generator", "general.train")
+        self._run()
+
+    def _run(self):
+        self.train_ols_model()
+        preds_for_all_stations = self.make_prediction()
+
+    def calculate_test_score(self):
+        test_score = self.model.evaluate(generator=self.test_data_distributed.distribute_on_batches(),
+                                         use_multiprocessing=False, verbose=0, steps=1)
+        logging.info(f"test score = {test_score}")
+        self._save_test_score(test_score)
+
+    def _save_test_score(self, score):
+        path = self.data_store.get("experiment_path", "general")
+        with open(os.path.join(path, "test_scores.txt")) as f:
+            for index, item in enumerate(score):
+                f.write(f"{self.model.metrics[index]}, {item}\n")
+
+    def train_ols_model(self):
+        self.ols_model = OrdinaryLeastSquaredModel(self.train_data)
+
+    def make_prediction(self, freq="1D"):
+        nn_prediction_all_stations = []
+        for i, v in enumerate(self.test_data):
+            data = self.test_data.get_data_generator(i)
+
+            nn_prediction, persistence_prediction, ols_prediction = self._create_empty_prediction_arrays(data, count=3)
+            input_data = self.test_data[i][0]
+
+            # get scaling parameters
+            mean, std, transformation_method = data.get_transformation_information(variable='o3')
+
+            # nn forecast
+            nn_prediction = self._create_nn_forecast(input_data, nn_prediction, mean, std, transformation_method)
+
+            # persistence
+            persistence_prediction = self._create_persistence_forecast(input_data, persistence_prediction, mean, std, 
+                                                                       transformation_method)
+
+            # ols
+            ols_prediction = self._create_ols_forecast(input_data, ols_prediction, mean, std, transformation_method)
+
+            # orig pred
+            orig_pred = self._create_orig_forecast(data, None, mean, std, transformation_method)
+
+            # merge all predictions
+            full_index = self.create_fullindex(data.data.indexes['datetime'], freq)
+            all_predictions = self.create_forecast_arrays(full_index, list(data.label.indexes['window']),
+                                                          CNN=nn_prediction,
+                                                          persi=persistence_prediction,
+                                                          orig=orig_pred,
+                                                          OLS=ols_prediction)
+
+            # save all forecasts locally
+            path = self.data_store.get("forecast_path", "general")
+            file = os.path.join(path, f"forecasts_{data.station[0]}_test.nc")
+            all_predictions.to_netcdf(file)
+
+            # save nn forecast to return variable
+            nn_prediction_all_stations.append(nn_prediction)
+        return nn_prediction_all_stations
+
+    @staticmethod
+    def _create_orig_forecast(data, placeholder, mean, std, transformation_method):
+        return statistics.apply_inverse_transformation(data.label, mean, std, transformation_method)
+
+    def _create_ols_forecast(self, input_data, ols_prediction, mean, std, transformation_method):
+        tmp_ols = self.ols_model.predict(input_data)
+        tmp_ols = statistics.apply_inverse_transformation(tmp_ols, mean, std, transformation_method)
+        ols_prediction.values = np.swapaxes(np.expand_dims(tmp_ols, axis=1), 2, 0)
+        return ols_prediction
+
+    def _create_persistence_forecast(self, input_data, persistence_prediction, mean, std, transformation_method):
+        tmp_persi = input_data.sel({'window': 0, 'variables': 'o3'})
+        tmp_persi = statistics.apply_inverse_transformation(tmp_persi, mean, std, transformation_method)
+        window_lead_time = self.data_store.get("window_lead_time", "general")
+        persistence_prediction.values = np.expand_dims(np.tile(tmp_persi.squeeze('Stations'), (window_lead_time, 1)),
+                                                       axis=1)
+        return persistence_prediction
+
+    def _create_nn_forecast(self, input_data, nn_prediction, mean, std, transformation_method):
+        tmp_nn = self.model.predict(input_data)
+        tmp_nn = statistics.apply_inverse_transformation(tmp_nn, mean, std, transformation_method)
+        nn_prediction.values = np.swapaxes(np.expand_dims(tmp_nn, axis=1), 2, 0)
+        return nn_prediction
+
+    @staticmethod
+    def _create_empty_prediction_arrays(generator, count=1):
+        return [generator.label.copy()] * count
+
+    @staticmethod
+    def create_fullindex(df, freq):
+        # Diese Funkton erstellt ein leeres df, mit Index der Frequenz frequ zwischen dem ersten und dem letzten Datum in df
+        # param: df as pandas dataframe
+        # param: freq as string
+        # return: index as pandas dataframe
+        if isinstance(df, pd.DataFrame):
+            earliest = df.index[0]
+            latest = df.index[-1]
+        elif isinstance(df, xr.DataArray):
+            earliest = df.index[0].values
+            latest = df.index[-1].values
+        elif isinstance(df, pd.core.indexes.datetimes.DatetimeIndex):
+            earliest = df[0]
+            latest = df[-1]
+        else:
+            raise AttributeError(f"unknown array type. Only pandas dataframes, xarray dataarrays and pandas datetimes "
+                                 f"are supported. Given type is {type(df)}.")
+        index = pd.DataFrame(index=pd.date_range(earliest, latest, freq=freq))
+        return index
+
+    @staticmethod
+    def create_forecast_arrays(index, ahead_names, **kwargs):
+        """
+        This function combines different forecast types into one xarray.
+
+        :param index: as index; index for forecasts (e.g. time)
+        :param ahead_names: as list of str/int: names of ahead values (e.g. hours or days)
+        :param kwargs: as xarrays; data of forecasts
+        :return: xarray of dimension 3: index, ahead_names, # predictions
+
+        """
+        keys = list(kwargs.keys())
+        res = xr.DataArray(np.full((len(index.index), len(ahead_names), len(keys)), np.nan),
+                           coords=[index.index, ahead_names, keys], dims=['index', 'ahead', 'type'])
+        for k, v in kwargs.items():
+            try:
+                match_index = np.stack(set(res.index.values) & set(v.index.values))
+                res.loc[match_index, :, k] = v.loc[match_index]
+            except AttributeError:  # v is xarray type and has no attribute .index
+                match_index = np.stack(set(res.index.values) & set(v.indexes['datetime'].values))
+                res.loc[match_index, :, k] = v.sel({'datetime': match_index}).squeeze('Stations').transpose()
+        return res
+
+
diff --git a/src/modules/pre_processing.py b/src/run_modules/pre_processing.py
similarity index 98%
rename from src/modules/pre_processing.py
rename to src/run_modules/pre_processing.py
index cce9ee587c7a9b70b9cce8064cb4b77aa1bf3386..9faae7943eb4cd4adb6572c524f5d1795aaa65fe 100644
--- a/src/modules/pre_processing.py
+++ b/src/run_modules/pre_processing.py
@@ -1,4 +1,4 @@
-__author__ = "Lukas Leufen"
+__author__ = "Lukas Leufen, Felix Kleinert"
 __date__ = '2019-11-25'
 
 
@@ -7,7 +7,7 @@ from typing import Tuple, Dict, List
 
 from src.data_handling.data_generator import DataGenerator
 from src.helpers import TimeTracking
-from src.modules.run_environment import RunEnvironment
+from src.run_modules.run_environment import RunEnvironment
 from src.join import EmptyQueryResult
 
 
diff --git a/src/modules/run_environment.py b/src/run_modules/run_environment.py
similarity index 100%
rename from src/modules/run_environment.py
rename to src/run_modules/run_environment.py
diff --git a/src/modules/training.py b/src/run_modules/training.py
similarity index 98%
rename from src/modules/training.py
rename to src/run_modules/training.py
index ba70e3b4296e9ba93426835baccb0fde7c728934..272609a31a3e3c91d6857ed841d5dd2783c66f35 100644
--- a/src/modules/training.py
+++ b/src/run_modules/training.py
@@ -1,4 +1,3 @@
-
 __author__ = "Lukas Leufen, Felix Kleinert"
 __date__ = '2019-12-05'
 
@@ -7,7 +6,7 @@ import os
 import json
 import keras
 
-from src.modules.run_environment import RunEnvironment
+from src.run_modules.run_environment import RunEnvironment
 from src.data_handling.data_distributor import Distributor
 from src.plotting.training_monitoring import PlotModelHistory, PlotModelLearningRate
 from src.helpers import LearningRateDecay
@@ -97,6 +96,7 @@ class Training(RunEnvironment):
         model_name = os.path.join(path, name)
         logging.debug(f"save best model to {model_name}")
         self.model.save(model_name)
+        self.data_store.set("best_model", self.model, "general")
 
     def load_best_model(self, name: str) -> None:
         """
diff --git a/src/statistics.py b/src/statistics.py
index 060081de9e21f5cbc7c560066451bbdbf14b7eb1..6f34187e32949910df5762a45d868701920b610f 100644
--- a/src/statistics.py
+++ b/src/statistics.py
@@ -9,6 +9,18 @@ from typing import Union, Tuple
 Data = Union[xr.DataArray, pd.DataFrame]
 
 
+def apply_inverse_transformation(data, mean, std=None, method="standardise"):
+    if method == 'standardise':
+        return standardise_inverse(data, mean, std)
+    elif method == 'centre':
+        return centre_inverse(data, mean)
+    elif method == 'normalise':
+        # use min/max of data or given min/max
+        raise NotImplementedError
+    else:
+        raise NotImplementedError
+
+
 def standardise(data: Data, dim: Union[str, int]) -> Tuple[Data, Data, Data]:
     """
     This function standardises a xarray.dataarray (along dim) or pandas.DataFrame (along axis) with mean=0 and std=1
diff --git a/test/test_inception_model.py b/test/test_model_modules/test_inception_model.py
similarity index 99%
rename from test/test_inception_model.py
rename to test/test_model_modules/test_inception_model.py
index ad18059517aef997021e4b7a538f16e30378b8dd..82b754c2dd09d700c439a3fa9773d02e7c0df070 100644
--- a/test/test_inception_model.py
+++ b/test/test_model_modules/test_inception_model.py
@@ -1,7 +1,6 @@
 import pytest
-from src.inception_model import InceptionModelBase
+from src.model_modules.inception_model import InceptionModelBase
 import keras
-import tensorflow as tf
 
 
 class TestInceptionModelBase:
diff --git a/test/test_modules/test_experiment_setup.py b/test/test_modules/test_experiment_setup.py
index bfff606ea367f3bb19380078ccecd7db508bb9b1..4a2ba9f9e641612a7f9ffbc81b93b68fd5dbccd8 100644
--- a/test/test_modules/test_experiment_setup.py
+++ b/test/test_modules/test_experiment_setup.py
@@ -3,7 +3,7 @@ import logging
 import argparse
 import os
 
-from src.modules.experiment_setup import ExperimentSetup
+from src.run_modules.experiment_setup import ExperimentSetup
 from src.helpers import TimeTracking, prepare_host
 from src.datastore import NameNotFoundInScope, NameNotFoundInDataStore
 
diff --git a/test/test_modules/test_model_setup.py b/test/test_modules/test_model_setup.py
index ca7503040ecf8e45636fddebccdcebd7242dbaec..d604d7474af84740a4b7a1cc51e5e94f1c94533b 100644
--- a/test/test_modules/test_model_setup.py
+++ b/test/test_modules/test_model_setup.py
@@ -3,8 +3,8 @@ import os
 import keras
 import mock
 
-from src.modules.model_setup import ModelSetup
-from src.modules.run_environment import RunEnvironment
+from src.run_modules.model_setup import ModelSetup
+from src.run_modules.run_environment import RunEnvironment
 from src.data_handling.data_generator import DataGenerator
 from src.model_modules.model_class import AbstractModelClass
 from src.datastore import EmptyScope
diff --git a/test/test_modules/test_post_processing.py b/test/test_modules/test_post_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..0eb7f74148feedc988c89cd0a3193fc6380a0f0e
--- /dev/null
+++ b/test/test_modules/test_post_processing.py
@@ -0,0 +1,10 @@
+import keras
+
+from src.run_modules.post_processing import PostProcessing
+
+
+class TestPostProcessing:
+
+    def test_init(self):
+        post = PostProcessing()
+        assert isinstance(post.model, keras.Model)
diff --git a/test/test_modules/test_pre_processing.py b/test/test_modules/test_pre_processing.py
index 34c27ff1f08eaa3b223dab5d3bcc6e3cb9a09a97..a562e7b05a79f0068b10e9e36771669fe47d4ce8 100644
--- a/test/test_modules/test_pre_processing.py
+++ b/test/test_modules/test_pre_processing.py
@@ -2,11 +2,11 @@ import logging
 import pytest
 
 from src.helpers import PyTestRegex
-from src.modules.experiment_setup import ExperimentSetup
-from src.modules.pre_processing import PreProcessing, DEFAULT_ARGS_LIST, DEFAULT_KWARGS_LIST
+from src.run_modules.experiment_setup import ExperimentSetup
+from src.run_modules.pre_processing import PreProcessing, DEFAULT_ARGS_LIST, DEFAULT_KWARGS_LIST
 from src.data_handling.data_generator import DataGenerator
 from src.datastore import NameNotFoundInScope
-from src.modules.run_environment import RunEnvironment
+from src.run_modules.run_environment import RunEnvironment
 
 
 class TestPreProcessing:
diff --git a/test/test_modules/test_run_environment.py b/test/test_modules/test_run_environment.py
index 1eeaa02c530d05e1ceee7bde8811db53ad6042aa..d82675b57ea6feb4f83c99dab6f648c2846e4137 100644
--- a/test/test_modules/test_run_environment.py
+++ b/test/test_modules/test_run_environment.py
@@ -1,7 +1,7 @@
 import logging
 
 from src.helpers import TimeTracking, PyTestRegex
-from src.modules.run_environment import RunEnvironment
+from src.run_modules.run_environment import RunEnvironment
 
 
 class TestRunEnvironment:
diff --git a/test/test_modules/test_training.py b/test/test_modules/test_training.py
index e6e3571d9beb03671a8e49f7f3988501b5eaa674..3426eb1d355a6690ee57c3ee45e5088d7df9c249 100644
--- a/test/test_modules/test_training.py
+++ b/test/test_modules/test_training.py
@@ -8,10 +8,10 @@ import shutil
 import logging
 import glob
 
-from src.inception_model import InceptionModelBase
-from src.flatten import flatten_tail
-from src.modules.training import Training
-from src.modules.run_environment import RunEnvironment
+from src.model_modules.inception_model import InceptionModelBase
+from src.model_modules.flatten import flatten_tail
+from src.run_modules.training import Training
+from src.run_modules.run_environment import RunEnvironment
 from src.data_handling.data_distributor import Distributor
 from src.data_handling.data_generator import DataGenerator
 from src.helpers import LearningRateDecay, PyTestRegex