diff --git a/CHANGELOG.md b/CHANGELOG.md
index e0c57c86048251f3125b58e17aa9b999cd4581f3..7d17fa89513192bf7d8ce0fa39e6e1449dc9b4f0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,19 @@
 All notable changes to this project will be documented in this file.
 
 
+## v0.12.1 -  2020-09-28  - examples in notebook
+
+### general:
+- introduced a notebook documentation for easy starting, #174
+- updated special installation instructions for the Juelich HPC systems, #172
+
+### new features:
+- names of input and output shape are renamed consistently to: input_shape, and output_shape, #175
+
+### technical:
+- it is possible to assign a custom name to a run module (e.g. used in logging), #173
+
+
 ## v0.12.0 -  2020-09-21  - Documentation and Bugfixes
 
 ### general:
diff --git a/Examples_from_manuscript.ipynb b/Examples_from_manuscript.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..dd258554a16874a62b81e8d69d0088c1ede15deb
--- /dev/null
+++ b/Examples_from_manuscript.ipynb
@@ -0,0 +1,217 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# MLAir (v1.0) - Examples\n",
+    "\n",
+    "This notebook contains all examples as provided in Leufen et al. (2020). \n",
+    "Please follow the installation instructions provided in the [README](https://gitlab.version.fz-juelich.de/toar/mlair/-/blob/master/README.md) on gitlab. "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Example 1\n",
+    "\n",
+    "The following cell imports MLAir and executes a minimalistic toy experiment. This cell is equivalent to Figure 2 in the manuscript."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import mlair\n",
+    "\n",
+    "# just give it a dry run without any modifications\n",
+    "mlair.run()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Example 2 \n",
+    "\n",
+    "In the following cell we use other station IDs provided as a list of strings (see also [JOIN-Web interface](https://join.fz-juelich.de/services/rest/surfacedata/) of the TOAR database for more details).\n",
+    "Moreover, we expand the `window_history_size` to 14 days and run the experiment. This cell is equivalent to Figure 3 in the manuscript."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# our new stations to use\n",
+    "stations = ['DEBW030', 'DEBW037', 'DEBW031', 'DEBW015', 'DEBW107']\n",
+    "\n",
+    "# expanded temporal context to 14 (days, because of default sampling=\"daily\")\n",
+    "window_history_size = 14\n",
+    "\n",
+    "# restart the experiment with little customisation\n",
+    "mlair.run(stations=stations, \n",
+    "          window_history_size=window_history_size)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Example 3 \n",
+    "\n",
+    "The following cell loads the trained model from Example 2 and generates predictions for the two specified stations. \n",
+    "To ensure that the model is not retrained the keywords `create_new_model` and `train_model` are set to `False`. This cell is equivalent to Figure 4 in the manuscript. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# our new stations to use\n",
+    "stations = ['DEBY002', 'DEBY079']\n",
+    "\n",
+    "# same setting for window_history_size\n",
+    "window_history_size = 14\n",
+    "\n",
+    "# run experiment without training\n",
+    "mlair.run(stations=stations, \n",
+    "          window_history_size=window_history_size, \n",
+    "          create_new_model=False, \n",
+    "          train_model=False)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Example 4\n",
+    "\n",
+    "The following cell demonstrates how a user defined model can be implemented by inheriting from `AbstractModelClass`. Within the `__init__` method `super().__init__`, `set_model` and `set_compile_options` should be called. Moreover, it is possible to set custom objects by calling `set_custom_objects`. Those custom objects are used to re-load the model (see also Keras documentation). For demonstration, the loss is added as custom object which is not required because a Keras built-in function is used as loss.\n",
+    "\n",
+    "The Keras-model itself is defined in `set_model` by using the sequential or functional Keras API. All compile options can be defined in `set_compile_options`.\n",
+    "This cell is equivalent to Figure 5 in the manuscript."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import keras\n",
+    "from keras.losses import mean_squared_error as mse\n",
+    "from keras.layers import PReLU, Input, Conv2D, Flatten, Dropout, Dense\n",
+    "\n",
+    "from mlair.model_modules import AbstractModelClass\n",
+    "from mlair.workflows import DefaultWorkflow\n",
+    "\n",
+    "class MyCustomisedModel(AbstractModelClass):\n",
+    "\n",
+    "    \"\"\"\n",
+    "    A customised model with a 1x1 Conv, and 2 Dense layers (16, \n",
+    "    output shape). Dropout is used after Conv layer.\n",
+    "    \"\"\"\n",
+    "    def __init__(self, input_shape: list, output_shape: list):\n",
+    "    \n",
+    "        # set attributes shape_inputs and shape_outputs\n",
+    "        super().__init__(input_shape[0], output_shape[0])\n",
+    "\n",
+    "        # apply to model\n",
+    "        self.set_model()\n",
+    "        self.set_compile_options()\n",
+    "        self.set_custom_objects(loss=self.compile_options['loss'])\n",
+    "\n",
+    "    def set_model(self):\n",
+    "        x_input = Input(shape=self._input_shape)\n",
+    "        x_in = Conv2D(4, (1, 1))(x_input)\n",
+    "        x_in = PReLU()(x_in)\n",
+    "        x_in = Flatten()(x_in)\n",
+    "        x_in = Dropout(0.1)(x_in)\n",
+    "        x_in = Dense(16)(x_in)\n",
+    "        x_in = PReLU()(x_in)\n",
+    "        x_in = Dense(self._output_shape)(x_in)\n",
+    "        out = PReLU()(x_in)\n",
+    "        self.model = keras.Model(inputs=x_input, outputs=[out])\n",
+    "\n",
+    "    def set_compile_options(self):\n",
+    "        self.initial_lr = 1e-2\n",
+    "        self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)\n",
+    "        self.loss = mse\n",
+    "        self.compile_options = {\"metrics\": [\"mse\", \"mae\"]}\n",
+    "\n",
+    "# Make use of MyCustomisedModel within the DefaultWorkflow\n",
+    "workflow = DefaultWorkflow(model=MyCustomisedModel, epochs=2)\n",
+    "workflow.run()\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Example 5 \n",
+    "\n",
+    "Embedding of a custom Run Module in a modified MLAir workflow. In comparison to examples 1 to 4, this code example works on a single step deeper regarding the level of abstraction. Instead of calling the run method of MLAir, the user needs to add all stages individually and is responsible for all dependencies between the stages. By using the `Workflow` class as context manager, all stages are automatically connected with the result that all stages can easily be plugged in. This cell is equivalent to Figure 6 in the manuscript."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import logging\n",
+    "\n",
+    "class CustomStage(mlair.RunEnvironment):\n",
+    "    \"\"\"A custom MLAir stage for demonstration.\"\"\"\n",
+    "    def __init__(self, test_string):\n",
+    "        super().__init__() # always call super init method\n",
+    "        self._run(test_string) # call a class method\n",
+    "        \n",
+    "    def _run(self, test_string):\n",
+    "        logging.info(\"Just running a custom stage.\")\n",
+    "        logging.info(\"test_string = \" + test_string)\n",
+    "        epochs = self.data_store.get(\"epochs\")\n",
+    "        logging.info(\"epochs = \" + str(epochs))\n",
+    "    \n",
+    "    \n",
+    "# create your custom MLAir workflow\n",
+    "CustomWorkflow = mlair.Workflow()\n",
+    "# provide stages without initialisation\n",
+    "CustomWorkflow.add(mlair.ExperimentSetup, epochs=128)\n",
+    "# add also keyword arguments for a specific stage\n",
+    "CustomWorkflow.add(CustomStage, test_string=\"Hello World\")\n",
+    "# finally execute custom workflow in order of adding\n",
+    "CustomWorkflow.run()\n",
+    "    "
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python (mlt_new)",
+   "language": "python",
+   "name": "venv"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.9"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/README.md b/README.md
index c33aab4b8643d2907b07b5ebcb254076515d03d2..dfbcc518341e4f10f642f385b1b60633f56b5ee0 100644
--- a/README.md
+++ b/README.md
@@ -17,17 +17,19 @@ install the geo packages. For special instructions to install MLAir on the Jueli
 
 * (geo) Install **proj** on your machine using the console. E.g. for opensuse / leap `zypper install proj`
 * (geo) A c++ compiler is required for the installation of the program **cartopy**
-* Install all requirements from [`requirements.txt`](https://gitlab.version.fz-juelich.de/toar/machinelearningtools/-/blob/master/requirements.txt)
+* Install all requirements from [`requirements.txt`](https://gitlab.version.fz-juelich.de/toar/mlair/-/blob/master/requirements.txt)
   preferably in a virtual environment
 * (tf) Currently, TensorFlow-1.13 is mentioned in the requirements. We already tested the TensorFlow-1.15 version and couldn't
   find any compatibility errors. Please note, that tf-1.13 and 1.15 have two distinct branches each, the default branch 
   for CPU support, and the "-gpu" branch for GPU support. If the GPU version is installed, MLAir will make use of the GPU
   device.
 * Installation of **MLAir**:
-    * Either clone MLAir from the [gitlab repository](https://gitlab.version.fz-juelich.de/toar/machinelearningtools.git) 
+    * Either clone MLAir from the [gitlab repository](https://gitlab.version.fz-juelich.de/toar/mlair.git) 
       and use it without installation (beside the requirements) 
-    * or download the distribution file (?? .whl) and install it via `pip install <??>`. In this case, you can simply
-      import MLAir in any python script inside your virtual environment using `import mlair`.
+    * or download the distribution file ([current version](https://gitlab.version.fz-juelich.de/toar/mlair/-/blob/master/dist/mlair-0.12.1-py3-none-any.whl)) 
+      and install it via `pip install <dist_file>.whl`. In this case, you can simply import MLAir in any python script 
+      inside your virtual environment using `import mlair`.
+      
 
 # How to start with MLAir
 
@@ -47,15 +49,19 @@ mlair.run()
 The logging output will show you many informations. Additional information (including debug messages) are collected 
 inside the experiment path in the logging folder.
 ```log
-INFO: mlair started
+INFO: DefaultWorkflow started
 INFO: ExperimentSetup started
 INFO: Experiment path is: /home/<usr>/mlair/testrun_network 
 ...
-INFO: load data for DEBW001 from JOIN 
+INFO: load data for DEBW107 from JOIN
+INFO: load data for DEBY081 from JOIN
+INFO: load data for DEBW013 from JOIN
+INFO: load data for DEBW076 from JOIN
+INFO: load data for DEBW087 from JOIN
 ...
 INFO: Training started
 ...
-INFO: mlair finished after 00:00:12 (hh:mm:ss)
+INFO: DefaultWorkflow finished after 0:03:04 (hh:mm:ss)
 ```
 
 ## Example 2
@@ -77,15 +83,17 @@ mlair.run(stations=stations,
 ```
 The output looks similar, but we can see, that the new stations are loaded.
 ```log
-INFO: mlair started
+INFO: DefaultWorkflow started
 INFO: ExperimentSetup started
 ...
 INFO: load data for DEBW030 from JOIN 
 INFO: load data for DEBW037 from JOIN 
+INFO: load data for DEBW031 from JOIN 
+INFO: load data for DEBW015 from JOIN 
 ...
 INFO: Training started
 ...
-INFO: mlair finished after 00:00:24 (hh:mm:ss)
+INFO: DefaultWorkflow finished after 00:02:03 (hh:mm:ss)
 ```
 
 ## Example 3
@@ -107,15 +115,15 @@ window_history_size = 14
 mlair.run(stations=stations, 
           window_history_size=window_history_size, 
           create_new_model=False, 
-          trainable=False)
+          train_model=False)
 ```
 We can see from the terminal that no training was performed. Analysis is now made on the new stations.
 ```log
-INFO: mlair started
+INFO: DefaultWorkflow started
 ...
-INFO: No training has started, because trainable parameter was false. 
+INFO: No training has started, because train_model parameter was false. 
 ...
-INFO: mlair finished after 00:00:06 (hh:mm:ss)
+INFO: DefaultWorkflow finished after 0:01:27 (hh:mm:ss)
 ```
 
 
@@ -137,7 +145,7 @@ DefaultWorkflow.run()
 ```
 The output of running this default workflow will be structured like the following.
 ```log
-INFO: mlair started
+INFO: DefaultWorkflow started
 INFO: ExperimentSetup started
 ...
 INFO: ExperimentSetup finished after 00:00:01 (hh:mm:ss)
@@ -153,7 +161,7 @@ INFO: Training finished after 00:02:15 (hh:mm:ss)
 INFO: PostProcessing started
 ...
 INFO: PostProcessing finished after 00:01:37 (hh:mm:ss)
-INFO: mlair finished after 00:04:05 (hh:mm:ss)
+INFO: DefaultWorkflow finished after 00:04:05 (hh:mm:ss)
 ```
 
 # Customised Run Module and Workflow
@@ -199,7 +207,7 @@ CustomWorkflow.run()
 The output will look like:
 
 ```log
-INFO: mlair started
+INFO: Workflow started
 ...
 INFO: ExperimentSetup finished after 00:00:12 (hh:mm:ss)
 INFO: CustomStage started
@@ -207,7 +215,7 @@ INFO: Just running a custom stage.
 INFO: test_string = Hello World
 INFO: epochs = 128
 INFO: CustomStage finished after 00:00:01 (hh:mm:ss)
-INFO: mlair finished after 00:00:13 (hh:mm:ss)
+INFO: Workflow finished after 00:00:13 (hh:mm:ss)
 ```
 
 # Custom Model
@@ -222,17 +230,13 @@ behaviour.
 
 ```python
 from mlair import AbstractModelClass
-import keras
 
 class MyCustomisedModel(AbstractModelClass):
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
-
-        super().__init__(shape_inputs[0], shape_outputs[0])
+    def __init__(self, input_shape: list, output_shape: list):
 
-        # settings
-        self.dropout_rate = 0.1
-        self.activation = keras.layers.PReLU
+        # set attributes shape_inputs and shape_outputs
+        super().__init__(input_shape[0], output_shape[0])
 
         # apply to model
         self.set_model()
@@ -250,38 +254,40 @@ class MyCustomisedModel(AbstractModelClass):
   loss has been added for demonstration only, because we use a build-in loss function. Nonetheless, we always encourage
   you to add the loss as custom object, to prevent potential errors when loading an already created model instead of
   training a new one.
-* Now build your model inside `set_model()` by using the instance attributes `self.shape_inputs` and
-  `self.shape_outputs` and storing the model as `self.model`.
+* Now build your model inside `set_model()` by using the instance attributes `self._input_shape` and
+  `self._output_shape` and storing the model as `self.model`.
 
 ```python
+import keras
+from keras.layers import PReLU, Input, Conv2D, Flatten, Dropout, Dense
+
 class MyCustomisedModel(AbstractModelClass):
 
     def set_model(self):
-        x_input = keras.layers.Input(shape=self.shape_inputs)
-        x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
-        x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
-        x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
-        x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
-        x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
-        x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
-        out_main = self.activation()(x_in)
-        self.model = keras.Model(inputs=x_input, outputs=[out_main])
+        x_input = Input(shape=self._input_shape)
+        x_in = Conv2D(4, (1, 1))(x_input)
+        x_in = PReLU()(x_in)
+        x_in = Flatten()(x_in)
+        x_in = Dropout(0.1)(x_in)
+        x_in = Dense(16)(x_in)
+        x_in = PReLU()(x_in)
+        x_in = Dense(self._output_shape)(x_in)
+        out = PReLU()(x_in)
+        self.model = keras.Model(inputs=x_input, outputs=[out])
 ```
 
 * Your are free how to design your model. Just make sure to save it in the class attribute model.
 * Additionally, set your custom compile options including the loss definition.
 
 ```python
+from keras.losses import mean_squared_error as mse
+
 class MyCustomisedModel(AbstractModelClass):
 
     def set_compile_options(self):
         self.initial_lr = 1e-2
         self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
-        self.lr_decay = mlair.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr,
-                                                                               drop=.94,
-                                                                               epochs_drop=10)
-        self.loss = keras.losses.mean_squared_error
+        self.loss = mse
         self.compile_options = {"metrics": ["mse", "mae"]}
 ```
 
@@ -302,6 +308,15 @@ class MyCustomisedModel(AbstractModelClass):
         self.loss = keras.losses.mean_squared_error
         self.compile_options = {"optimizer" = keras.optimizers.Adam()}
     ```
+    
+## How to plug in the customised model into the workflow?
+* Make use of the `model` argument and pass `MyCustomisedModel` when instantiating a workflow.
+```python
+from mlair.workflows import DefaultWorkflow
+
+workflow = DefaultWorkflow(model=MyCustomisedModel)
+workflow.run()
+```
 
 
 ## Specials for Branched Models
diff --git a/dist/mlair-0.12.1-py3-none-any.whl b/dist/mlair-0.12.1-py3-none-any.whl
new file mode 100644
index 0000000000000000000000000000000000000000..940e25d1973e6d66328482d6e88a9457b5b88c71
Binary files /dev/null and b/dist/mlair-0.12.1-py3-none-any.whl differ
diff --git a/docs/_source/customise.rst b/docs/_source/customise.rst
index 4c9ee5386365c74e3d85c0f81085fcd3e1971b69..cb77eb63f8bf1a53d54ca2e0f80fd9bdeb93a0b0 100644
--- a/docs/_source/customise.rst
+++ b/docs/_source/customise.rst
@@ -27,7 +27,7 @@ The output of running this default workflow will be structured like the followin
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: DefaultWorkflow started
     INFO: ExperimentSetup started
     ...
     INFO: ExperimentSetup finished after 00:00:01 (hh:mm:ss)
@@ -43,7 +43,7 @@ The output of running this default workflow will be structured like the followin
     INFO: PostProcessing started
     ...
     INFO: PostProcessing finished after 00:01:37 (hh:mm:ss)
-    INFO: mlair finished after 00:04:05 (hh:mm:ss)
+    INFO: DefaultWorkflow finished after 00:04:05 (hh:mm:ss)
 
 Custom Model
 ------------
@@ -65,9 +65,9 @@ How to create a customised model?
 
     class MyCustomisedModel(AbstractModelClass):
 
-        def __init__(self, shape_inputs: list, shape_outputs: list):
+        def __init__(self, input_shape: list, output_shape: list):
 
-            super().__init__(shape_inputs[0], shape_outputs[0])
+            super().__init__(input_shape[0], output_shape[0])
 
             # settings
             self.dropout_rate = 0.1
@@ -88,22 +88,22 @@ How to create a customised model?
   loss has been added for demonstration only, because we use a build-in loss function. Nonetheless, we always encourage
   you to add the loss as custom object, to prevent potential errors when loading an already created model instead of
   training a new one.
-* Now build your model inside :py:`set_model()` by using the instance attributes :py:`self.shape_inputs` and
-  :py:`self.shape_outputs` and storing the model as :py:`self.model`.
+* Now build your model inside :py:`set_model()` by using the instance attributes :py:`self._input_shape` and
+  :py:`self._output_shape` and storing the model as :py:`self.model`.
 
 .. code-block:: python
 
     class MyCustomisedModel(AbstractModelClass):
 
         def set_model(self):
-            x_input = keras.layers.Input(shape=self.shape_inputs)
+            x_input = keras.layers.Input(shape=self._input_shape)
             x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
             x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
             x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
             x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
             x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
             x_in = self.activation()(x_in)
-            x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+            x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
             out_main = self.activation()(x_in)
             self.model = keras.Model(inputs=x_input, outputs=[out_main])
 
@@ -143,6 +143,20 @@ How to create a customised model?
               self.compile_options = {"optimizer" = keras.optimizers.Adam()}
 
 
+How to plug in the customised model into the workflow?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* Make use of the :py:`model` argument and pass :py:`MyCustomisedModel` when instantiating a workflow.
+
+
+.. code-block:: python
+
+    from mlair.workflows import DefaultWorkflow
+
+    workflow = DefaultWorkflow(model=MyCustomisedModel)
+    workflow.run()
+
+
 Specials for Branched Models
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -342,7 +356,7 @@ The output will look like:
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: Workflow started
     ...
     INFO: ExperimentSetup finished after 00:00:12 (hh:mm:ss)
     INFO: CustomStage started
@@ -350,4 +364,4 @@ The output will look like:
     INFO: test_string = Hello World
     INFO: epochs = 128
     INFO: CustomStage finished after 00:00:01 (hh:mm:ss)
-    INFO: mlair finished after 00:00:13 (hh:mm:ss)
\ No newline at end of file
+    INFO: Workflow finished after 00:00:13 (hh:mm:ss)
\ No newline at end of file
diff --git a/docs/_source/get-started.rst b/docs/_source/get-started.rst
index 2e8838fd5b1ac63a7e34e39b7f8bc24d70f9c1b7..7c909b778ca12dfd74497c90d60d62adf9801b29 100644
--- a/docs/_source/get-started.rst
+++ b/docs/_source/get-started.rst
@@ -60,7 +60,7 @@ inside the experiment path in the logging folder.
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: DefaultWorkflow started
     INFO: ExperimentSetup started
     INFO: Experiment path is: /home/<usr>/mlair/testrun_network
     ...
@@ -68,7 +68,7 @@ inside the experiment path in the logging folder.
     ...
     INFO: Training started
     ...
-    INFO: mlair finished after 00:00:12 (hh:mm:ss)
+    INFO: DefaultWorkflow finished after 00:00:12 (hh:mm:ss)
 
 
 Example 2
@@ -94,7 +94,7 @@ The output looks similar, but we can see, that the new stations are loaded.
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: DefaultWorkflow started
     INFO: ExperimentSetup started
     ...
     INFO: load data for DEBW030 from JOIN
@@ -102,7 +102,7 @@ The output looks similar, but we can see, that the new stations are loaded.
     ...
     INFO: Training started
     ...
-    INFO: mlair finished after 00:00:24 (hh:mm:ss)
+    INFO: DefaultWorkflow finished after 00:00:24 (hh:mm:ss)
 
 Example 3
 ~~~~~~~~~
@@ -132,9 +132,9 @@ We can see from the terminal that no training was performed. Analysis is now mad
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: DefaultWorkflow started
     ...
     INFO: No training has started, because trainable parameter was false.
     ...
-    INFO: mlair finished after 00:00:06 (hh:mm:ss)
+    INFO: DefaultWorkflow finished after 00:00:06 (hh:mm:ss)
 
diff --git a/mlair/__init__.py b/mlair/__init__.py
index 0cfc33bb6ca17496e68e5e6281fa37a53a66466b..c0f6183fc3c30cd91818915de12115fd9679f648 100644
--- a/mlair/__init__.py
+++ b/mlair/__init__.py
@@ -1,7 +1,7 @@
 __version_info__ = {
     'major': 0,
     'minor': 12,
-    'micro': 0,
+    'micro': 1,
 }
 
 from mlair.run_modules import RunEnvironment, ExperimentSetup, PreProcessing, ModelSetup, Training, PostProcessing
diff --git a/mlair/model_modules/model_class.py b/mlair/model_modules/model_class.py
index 0e69d22012a592b30c6ffdf9ed6082c47a291f90..c9cc13bd8108e43b5a9f03682942eacdf5a55f04 100644
--- a/mlair/model_modules/model_class.py
+++ b/mlair/model_modules/model_class.py
@@ -23,9 +23,9 @@ How to create a customised model?
 
         class MyCustomisedModel(AbstractModelClass):
 
-            def __init__(self, shape_inputs: list, shape_outputs: list):
+            def __init__(self, input_shape: list, output_shape: list):
 
-                super().__init__(shape_inputs[0], shape_outputs[0])
+                super().__init__(input_shape[0], output_shape[0])
 
                 # settings
                 self.dropout_rate = 0.1
@@ -49,14 +49,14 @@ How to create a customised model?
         class MyCustomisedModel(AbstractModelClass):
 
             def set_model(self):
-                x_input = keras.layers.Input(shape=self.shape_inputs)
+                x_input = keras.layers.Input(shape=self._input_shape)
                 x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
                 x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
                 x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
                 x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
                 x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
                 x_in = self.activation()(x_in)
-                x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+                x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
                 out_main = self.activation()(x_in)
                 self.model = keras.Model(inputs=x_input, outputs=[out_main])
 
@@ -139,7 +139,7 @@ class AbstractModelClass(ABC):
     the corresponding loss function.
     """
 
-    def __init__(self, shape_inputs, shape_outputs) -> None:
+    def __init__(self, input_shape, output_shape) -> None:
         """Predefine internal attributes for model and loss."""
         self.__model = None
         self.model_name = self.__class__.__name__
@@ -154,8 +154,8 @@ class AbstractModelClass(ABC):
                                           }
         self.__compile_options = self.__allowed_compile_options
         self.__compile_options_is_set = False
-        self.shape_inputs = shape_inputs
-        self.shape_outputs = self.__extract_from_tuple(shape_outputs)
+        self._input_shape = input_shape
+        self._output_shape = self.__extract_from_tuple(output_shape)
 
     def __getattr__(self, name: str) -> Any:
         """
@@ -355,17 +355,17 @@ class MyLittleModel(AbstractModelClass):
     on the window_lead_time parameter.
     """
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
         """
         Sets model and loss depending on the given arguments.
 
-        :param shape_inputs: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param shape_outputs: list of output shapes (expect len=1 with shape=(window_forecast))
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
         """
 
-        assert len(shape_inputs) == 1
-        assert len(shape_outputs) == 1
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = 0.1
@@ -381,7 +381,7 @@ class MyLittleModel(AbstractModelClass):
         """
         Build the model.
         """
-        x_input = keras.layers.Input(shape=self.shape_inputs)
+        x_input = keras.layers.Input(shape=self._input_shape)
         x_in = keras.layers.Flatten(name='{}'.format("major"))(x_input)
         x_in = keras.layers.Dense(64, name='{}_Dense_64'.format("major"))(x_in)
         x_in = self.activation()(x_in)
@@ -389,7 +389,7 @@ class MyLittleModel(AbstractModelClass):
         x_in = self.activation()(x_in)
         x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+        x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
         out_main = self.activation()(x_in)
         self.model = keras.Model(inputs=x_input, outputs=[out_main])
 
@@ -410,17 +410,17 @@ class MyBranchedModel(AbstractModelClass):
     Dense layer.
     """
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
         """
         Sets model and loss depending on the given arguments.
 
-        :param shape_inputs: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param shape_outputs: list of output shapes (expect len=1 with shape=(window_forecast))
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
         """
 
-        assert len(shape_inputs) == 1
-        assert len(shape_outputs) == 1
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = 0.1
@@ -438,22 +438,22 @@ class MyBranchedModel(AbstractModelClass):
         """
 
         # add 1 to window_size to include current time step t0
-        x_input = keras.layers.Input(shape=self.shape_inputs)
+        x_input = keras.layers.Input(shape=self._input_shape)
         x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
         x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
         x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
         x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
         x_in = keras.layers.Dense(64, name='{}_Dense_64'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        out_minor_1 = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("minor_1"))(x_in)
+        out_minor_1 = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("minor_1"))(x_in)
         out_minor_1 = self.activation(name="minor_1")(out_minor_1)
         x_in = keras.layers.Dense(32, name='{}_Dense_32'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        out_minor_2 = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("minor_2"))(x_in)
+        out_minor_2 = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("minor_2"))(x_in)
         out_minor_2 = self.activation(name="minor_2")(out_minor_2)
         x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+        x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
         out_main = self.activation(name="main")(x_in)
         self.model = keras.Model(inputs=x_input, outputs=[out_minor_1, out_minor_2, out_main])
 
@@ -468,17 +468,17 @@ class MyBranchedModel(AbstractModelClass):
 
 class MyTowerModel(AbstractModelClass):
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
         """
         Sets model and loss depending on the given arguments.
 
-        :param shape_inputs: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param shape_outputs: list of output shapes (expect len=1 with shape=(window_forecast))
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
         """
 
-        assert len(shape_inputs) == 1
-        assert len(shape_outputs) == 1
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = 1e-2
@@ -529,7 +529,7 @@ class MyTowerModel(AbstractModelClass):
         ##########################################
         inception_model = InceptionModelBase()
 
-        X_input = keras.layers.Input(shape=self.shape_inputs)
+        X_input = keras.layers.Input(shape=self._input_shape)
 
         X_in = inception_model.inception_block(X_input, conv_settings_dict1, pool_settings_dict1,
                                                regularizer=self.regularizer,
@@ -551,7 +551,7 @@ class MyTowerModel(AbstractModelClass):
         # out_main = flatten_tail(X_in, 'Main', activation=activation, bound_weight=True, dropout_rate=self.dropout_rate,
         #                         reduction_filter=64, inner_neurons=64, output_neurons=self.window_lead_time)
 
-        out_main = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self.shape_outputs,
+        out_main = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self._output_shape,
                                 output_activation='linear', reduction_filter=64,
                                 name='Main', bound_weight=True, dropout_rate=self.dropout_rate,
                                 kernel_regularizer=self.regularizer
@@ -566,17 +566,17 @@ class MyTowerModel(AbstractModelClass):
 
 class MyPaperModel(AbstractModelClass):
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
         """
         Sets model and loss depending on the given arguments.
 
-        :param shape_inputs: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param shape_outputs: list of output shapes (expect len=1 with shape=(window_forecast))
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
         """
 
-        assert len(shape_inputs) == 1
-        assert len(shape_outputs) == 1
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = .3
@@ -643,7 +643,7 @@ class MyPaperModel(AbstractModelClass):
         ##########################################
         inception_model = InceptionModelBase()
 
-        X_input = keras.layers.Input(shape=self.shape_inputs)
+        X_input = keras.layers.Input(shape=self._input_shape)
 
         pad_size = PadUtils.get_padding_for_same(first_kernel)
         # X_in = adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
@@ -661,7 +661,7 @@ class MyPaperModel(AbstractModelClass):
                                                padding=self.padding)
         # out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
         #                           self.activation, 32, 64)
-        out_minor1 = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self.shape_outputs,
+        out_minor1 = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self._output_shape,
                                   output_activation='linear', reduction_filter=32,
                                   name='minor_1', bound_weight=False, dropout_rate=self.dropout_rate,
                                   kernel_regularizer=self.regularizer
@@ -679,8 +679,8 @@ class MyPaperModel(AbstractModelClass):
         #                                        batch_normalisation=True)
         #############################################
 
-        out_main = flatten_tail(X_in, inner_neurons=64 * 2, activation=activation, output_neurons=self.shape_outputs,
-                                output_activation='linear',  reduction_filter=64 * 2,
+        out_main = flatten_tail(X_in, inner_neurons=64 * 2, activation=activation, output_neurons=self._output_shape,
+                                output_activation='linear', reduction_filter=64 * 2,
                                 name='Main', bound_weight=False, dropout_rate=self.dropout_rate,
                                 kernel_regularizer=self.regularizer
                                 )
diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py
index e4bff5a621619e6d806fc7ae58ed093331463187..c6af13b02e818431578c7423d837f95e64ca3d15 100644
--- a/mlair/run_modules/model_setup.py
+++ b/mlair/run_modules/model_setup.py
@@ -95,13 +95,13 @@ class ModelSetup(RunEnvironment):
     def _set_shapes(self):
         """Set input and output shapes from train collection."""
         shape = list(map(lambda x: x.shape[1:], self.data_store.get("data_collection", "train")[0].get_X()))
-        self.data_store.set("shape_inputs", shape, self.scope)
+        self.data_store.set("input_shape", shape, self.scope)
         shape = list(map(lambda y: y.shape[1:], self.data_store.get("data_collection", "train")[0].get_Y()))
-        self.data_store.set("shape_outputs", shape, self.scope)
+        self.data_store.set("output_shape", shape, self.scope)
 
     def compile_model(self):
         """
-        Compiles the keras model. Compile options are mandetory and have to be set by implementing set_compile() method
+        Compiles the keras model. Compile options are mandatory and have to be set by implementing set_compile() method
         in child class of AbstractModelClass.
         """
         compile_options = self.model.compile_options
@@ -135,7 +135,7 @@ class ModelSetup(RunEnvironment):
 
     def build_model(self):
         """Build model using input and output shapes from data store."""
-        args_list = ["shape_inputs", "shape_outputs"]
+        args_list = ["input_shape", "output_shape"]
         args = self.data_store.create_args_dict(args_list, self.scope)
         model = self.data_store.get("model_class")
         self.model = model(**args)
diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py
index b4af7a754335e8da6d29870b1a0c4152d7dc9af5..de43f30d929db1de12681d92c9c585df5c07944e 100644
--- a/mlair/run_modules/post_processing.py
+++ b/mlair/run_modules/post_processing.py
@@ -74,7 +74,7 @@ class PostProcessing(RunEnvironment):
         self.plot_path: str = self.data_store.get("plot_path")
         self.target_var = self.data_store.get("target_var")
         self._sampling = self.data_store.get("sampling")
-        self.window_lead_time = extract_value(self.data_store.get("shape_outputs", "model"))
+        self.window_lead_time = extract_value(self.data_store.get("output_shape", "model"))
         self.skill_scores = None
         self.bootstrap_skill_scores = None
         self._run()
diff --git a/mlair/run_modules/run_environment.py b/mlair/run_modules/run_environment.py
index ecb55282f25c369d6f5eddd81907a7d28ec7d62b..5414b21cb0cb26674c699a02c22400959e11f1aa 100644
--- a/mlair/run_modules/run_environment.py
+++ b/mlair/run_modules/run_environment.py
@@ -92,17 +92,18 @@ class RunEnvironment(object):
     logger = None
     tracker_list = []
 
-    def __init__(self):
+    def __init__(self, name=None):
         """Start time tracking automatically and logs as info."""
         if RunEnvironment.data_store is None:
             RunEnvironment.data_store = DataStoreObject()
         if RunEnvironment.logger is None:
             RunEnvironment.logger = Logger()
-        self.time = TimeTracking()
-        logging.info(f"{self.__class__.__name__} started")
+        self._name = name if name is not None else self.__class__.__name__
+        self.time = TimeTracking(name=name)
+        logging.info(f"{self._name} started")
         # atexit.register(self.__del__)
         self.data_store.tracker.append({})
-        self.tracker_list.extend([{self.__class__.__name__: self.data_store.tracker[-1]}])
+        self.tracker_list.extend([{self._name: self.data_store.tracker[-1]}])
 
     def __del__(self):
         """
@@ -114,7 +115,7 @@ class RunEnvironment(object):
         """
         if not self.del_by_exit:
             self.time.stop()
-            logging.info(f"{self.__class__.__name__} finished after {self.time}")
+            logging.info(f"{self._name} finished after {self.time}")
             self.del_by_exit = True
             # copy log file and clear data store only if called as base class and not as super class
             if self.__class__.__name__ == "RunEnvironment":
diff --git a/mlair/workflows/abstract_workflow.py b/mlair/workflows/abstract_workflow.py
index d3fe480fdfe09393fbf2051d8795735e9217a8ad..bced90bbe848cc9ebe36c583d05b62549f0ae80b 100644
--- a/mlair/workflows/abstract_workflow.py
+++ b/mlair/workflows/abstract_workflow.py
@@ -15,8 +15,9 @@ class Workflow:
     method is sufficient. It must be taken care for inter-stage dependencies, this workflow class only handles the
     execution but not the dependencies (workflow would probably fail in this case)."""
 
-    def __init__(self):
+    def __init__(self, name=None):
         self._registry = OrderedDict()
+        self._name = name if name is not None else self.__class__.__name__
 
     def add(self, stage, **kwargs):
         """Add a new stage with optional kwargs."""
@@ -24,6 +25,6 @@ class Workflow:
 
     def run(self):
         """Run workflow embedded in a run environment and according to the stage's ordering."""
-        with RunEnvironment():
+        with RunEnvironment(name=self._name):
             for stage, kwargs in self._registry.items():
                 stage(**kwargs)
diff --git a/test/test_data_handler/test_iterator.py b/test/test_data_handler/test_iterator.py
index ff81fc7b89b2cede0f47cdf209e77e373cd0d656..ec224c06e358297972097f2cc75cea86f768784f 100644
--- a/test/test_data_handler/test_iterator.py
+++ b/test/test_data_handler/test_iterator.py
@@ -213,12 +213,12 @@ class TestKerasIterator:
 
     def test_get_model_rank_single_output_branch(self):
         iterator = object.__new__(KerasIterator)
-        iterator.model = MyLittleModel(shape_inputs=[(14, 1, 2)], shape_outputs=[(3,)])
+        iterator.model = MyLittleModel(input_shape=[(14, 1, 2)], output_shape=[(3,)])
         assert iterator._get_model_rank() == 1
 
     def test_get_model_rank_multiple_output_branch(self):
         iterator = object.__new__(KerasIterator)
-        iterator.model = MyBranchedModel(shape_inputs=[(14, 1, 2)], shape_outputs=[(3,)])
+        iterator.model = MyBranchedModel(input_shape=[(14, 1, 2)], output_shape=[(3,)])
         assert iterator._get_model_rank() == 3
 
     def test_get_model_rank_error(self):
diff --git a/test/test_model_modules/test_model_class.py b/test/test_model_modules/test_model_class.py
index 3e77fd17c4cd8151fe76816abf0bef323adb2e96..28218eb60e23d6e5b0e361fc2617398aade799cc 100644
--- a/test/test_model_modules/test_model_class.py
+++ b/test/test_model_modules/test_model_class.py
@@ -12,7 +12,7 @@ class Paddings:
 class AbstractModelSubClass(AbstractModelClass):
 
     def __init__(self):
-        super().__init__(shape_inputs=(12, 1, 2), shape_outputs=3)
+        super().__init__(input_shape=(12, 1, 2), output_shape=3)
         self.test_attr = "testAttr"
 
 
@@ -20,7 +20,7 @@ class TestAbstractModelClass:
 
     @pytest.fixture
     def amc(self):
-        return AbstractModelClass(shape_inputs=(14, 1, 2), shape_outputs=(3,))
+        return AbstractModelClass(input_shape=(14, 1, 2), output_shape=(3,))
 
     @pytest.fixture
     def amsc(self):
@@ -31,8 +31,8 @@ class TestAbstractModelClass:
         # assert amc.loss is None
         assert amc.model_name == "AbstractModelClass"
         assert amc.custom_objects == {}
-        assert amc.shape_inputs == (14, 1, 2)
-        assert amc.shape_outputs == 3
+        assert amc._input_shape == (14, 1, 2)
+        assert amc._output_shape == 3
 
     def test_model_property(self, amc):
         amc.model = keras.Model()
@@ -181,10 +181,10 @@ class TestAbstractModelClass:
         assert amc.compile == amc.model.compile
 
     def test_get_settings(self, amc, amsc):
-        assert amc.get_settings() == {"model_name": "AbstractModelClass", "shape_inputs": (14, 1, 2),
-                                      "shape_outputs": 3}
+        assert amc.get_settings() == {"model_name": "AbstractModelClass", "_input_shape": (14, 1, 2),
+                                      "_output_shape": 3}
         assert amsc.get_settings() == {"test_attr": "testAttr", "model_name": "AbstractModelSubClass",
-                                       "shape_inputs": (12, 1, 2), "shape_outputs": 3}
+                                       "_input_shape": (12, 1, 2), "_output_shape": 3}
 
     def test_custom_objects(self, amc):
         amc.custom_objects = {"Test": 123}
@@ -204,7 +204,7 @@ class TestMyPaperModel:
 
     @pytest.fixture
     def mpm(self):
-        return MyPaperModel(shape_inputs=[(7, 1, 9)], shape_outputs=[(4,)])
+        return MyPaperModel(input_shape=[(7, 1, 9)], output_shape=[(4,)])
 
     def test_init(self, mpm):
         # check if loss number of loss functions fit to model outputs
diff --git a/test/test_run_modules/test_model_setup.py b/test/test_run_modules/test_model_setup.py
index 1b3e43b2bbfda44f1a5b5463e876adc578360ff3..382105344dfb9fffb37215f2706dda1f2ebd90ea 100644
--- a/test/test_run_modules/test_model_setup.py
+++ b/test/test_run_modules/test_model_setup.py
@@ -49,10 +49,10 @@ class TestModelSetup:
     @pytest.fixture
     def setup_with_gen(self, setup, keras_iterator):
         setup.data_store.set("data_collection", keras_iterator, "train")
-        shape_inputs = [keras_iterator[0].get_X()[0].shape[1:]]
-        setup.data_store.set("shape_inputs", shape_inputs, "model")
-        shape_outputs = [keras_iterator[0].get_Y()[0].shape[1:]]
-        setup.data_store.set("shape_outputs", shape_outputs, "model")
+        input_shape = [keras_iterator[0].get_X()[0].shape[1:]]
+        setup.data_store.set("input_shape", input_shape, "model")
+        output_shape = [keras_iterator[0].get_Y()[0].shape[1:]]
+        setup.data_store.set("output_shape", output_shape, "model")
         yield setup
         RunEnvironment().__del__()
 
@@ -64,7 +64,7 @@ class TestModelSetup:
 
     @pytest.fixture
     def setup_with_model(self, setup):
-        setup.model = AbstractModelClass(shape_inputs=(12, 1), shape_outputs=2)
+        setup.model = AbstractModelClass(input_shape=(12, 1), output_shape=2)
         setup.model.test_param = "42"
         yield setup
         RunEnvironment().__del__()
@@ -103,16 +103,16 @@ class TestModelSetup:
         setup_with_gen.build_model()
         assert isinstance(setup_with_gen.model, AbstractModelClass)
         expected = {"lr_decay", "model_name", "dropout_rate", "regularizer", "initial_lr", "optimizer", "activation",
-                    "shape_inputs", "shape_outputs"}
+                    "input_shape", "output_shape"}
         assert expected <= self.current_scope_as_set(setup_with_gen)
 
     def test_set_shapes(self, setup_with_gen_tiny):
-        assert len(setup_with_gen_tiny.data_store.search_name("shape_inputs")) == 0
-        assert len(setup_with_gen_tiny.data_store.search_name("shape_outputs")) == 0
+        assert len(setup_with_gen_tiny.data_store.search_name("input_shape")) == 0
+        assert len(setup_with_gen_tiny.data_store.search_name("output_shape")) == 0
         setup_with_gen_tiny._set_shapes()
-        assert setup_with_gen_tiny.data_store.get("shape_inputs", setup_with_gen_tiny.scope) == [(14, 1, 5), (10, 1, 2),
+        assert setup_with_gen_tiny.data_store.get("input_shape", setup_with_gen_tiny.scope) == [(14, 1, 5), (10, 1, 2),
                                                                                                  (1, 1, 2)]
-        assert setup_with_gen_tiny.data_store.get("shape_outputs", setup_with_gen_tiny.scope) == [(5,), (3,)]
+        assert setup_with_gen_tiny.data_store.get("output_shape", setup_with_gen_tiny.scope) == [(5,), (3,)]
 
     def test_load_weights(self):
         pass