diff --git a/README.md b/README.md
index c33aab4b8643d2907b07b5ebcb254076515d03d2..8e424adcc04250732f534b351f7ec2dd631dd201 100644
--- a/README.md
+++ b/README.md
@@ -47,7 +47,7 @@ mlair.run()
 The logging output will show you many informations. Additional information (including debug messages) are collected 
 inside the experiment path in the logging folder.
 ```log
-INFO: mlair started
+INFO: DefaultWorkflow started
 INFO: ExperimentSetup started
 INFO: Experiment path is: /home/<usr>/mlair/testrun_network 
 ...
@@ -55,7 +55,7 @@ INFO: load data for DEBW001 from JOIN
 ...
 INFO: Training started
 ...
-INFO: mlair finished after 00:00:12 (hh:mm:ss)
+INFO: DefaultWorkflow finished after 00:00:12 (hh:mm:ss)
 ```
 
 ## Example 2
@@ -77,7 +77,7 @@ mlair.run(stations=stations,
 ```
 The output looks similar, but we can see, that the new stations are loaded.
 ```log
-INFO: mlair started
+INFO: DefaultWorkflow started
 INFO: ExperimentSetup started
 ...
 INFO: load data for DEBW030 from JOIN 
@@ -85,7 +85,7 @@ INFO: load data for DEBW037 from JOIN
 ...
 INFO: Training started
 ...
-INFO: mlair finished after 00:00:24 (hh:mm:ss)
+INFO: DefaultWorkflow finished after 00:00:24 (hh:mm:ss)
 ```
 
 ## Example 3
@@ -111,11 +111,11 @@ mlair.run(stations=stations,
 ```
 We can see from the terminal that no training was performed. Analysis is now made on the new stations.
 ```log
-INFO: mlair started
+INFO: DefaultWorkflow started
 ...
 INFO: No training has started, because trainable parameter was false. 
 ...
-INFO: mlair finished after 00:00:06 (hh:mm:ss)
+INFO: DefaultWorkflow finished after 00:00:06 (hh:mm:ss)
 ```
 
 
@@ -137,7 +137,7 @@ DefaultWorkflow.run()
 ```
 The output of running this default workflow will be structured like the following.
 ```log
-INFO: mlair started
+INFO: DefaultWorkflow started
 INFO: ExperimentSetup started
 ...
 INFO: ExperimentSetup finished after 00:00:01 (hh:mm:ss)
@@ -153,7 +153,7 @@ INFO: Training finished after 00:02:15 (hh:mm:ss)
 INFO: PostProcessing started
 ...
 INFO: PostProcessing finished after 00:01:37 (hh:mm:ss)
-INFO: mlair finished after 00:04:05 (hh:mm:ss)
+INFO: DefaultWorkflow finished after 00:04:05 (hh:mm:ss)
 ```
 
 # Customised Run Module and Workflow
@@ -199,7 +199,7 @@ CustomWorkflow.run()
 The output will look like:
 
 ```log
-INFO: mlair started
+INFO: Workflow started
 ...
 INFO: ExperimentSetup finished after 00:00:12 (hh:mm:ss)
 INFO: CustomStage started
@@ -207,7 +207,7 @@ INFO: Just running a custom stage.
 INFO: test_string = Hello World
 INFO: epochs = 128
 INFO: CustomStage finished after 00:00:01 (hh:mm:ss)
-INFO: mlair finished after 00:00:13 (hh:mm:ss)
+INFO: Workflow finished after 00:00:13 (hh:mm:ss)
 ```
 
 # Custom Model
@@ -226,9 +226,9 @@ import keras
 
 class MyCustomisedModel(AbstractModelClass):
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
 
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = 0.1
@@ -250,21 +250,21 @@ class MyCustomisedModel(AbstractModelClass):
   loss has been added for demonstration only, because we use a build-in loss function. Nonetheless, we always encourage
   you to add the loss as custom object, to prevent potential errors when loading an already created model instead of
   training a new one.
-* Now build your model inside `set_model()` by using the instance attributes `self.shape_inputs` and
-  `self.shape_outputs` and storing the model as `self.model`.
+* Now build your model inside `set_model()` by using the instance attributes `self._input_shape` and
+  `self._output_shape` and storing the model as `self.model`.
 
 ```python
 class MyCustomisedModel(AbstractModelClass):
 
     def set_model(self):
-        x_input = keras.layers.Input(shape=self.shape_inputs)
+        x_input = keras.layers.Input(shape=self._input_shape)
         x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
         x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
         x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
         x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
         x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+        x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
         out_main = self.activation()(x_in)
         self.model = keras.Model(inputs=x_input, outputs=[out_main])
 ```
diff --git a/docs/_source/customise.rst b/docs/_source/customise.rst
index 4c9ee5386365c74e3d85c0f81085fcd3e1971b69..3d3873cb6c08d0ad6bfcfeda0fe415807252bfff 100644
--- a/docs/_source/customise.rst
+++ b/docs/_source/customise.rst
@@ -27,7 +27,7 @@ The output of running this default workflow will be structured like the followin
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: DefaultWorkflow started
     INFO: ExperimentSetup started
     ...
     INFO: ExperimentSetup finished after 00:00:01 (hh:mm:ss)
@@ -43,7 +43,7 @@ The output of running this default workflow will be structured like the followin
     INFO: PostProcessing started
     ...
     INFO: PostProcessing finished after 00:01:37 (hh:mm:ss)
-    INFO: mlair finished after 00:04:05 (hh:mm:ss)
+    INFO: DefaultWorkflow finished after 00:04:05 (hh:mm:ss)
 
 Custom Model
 ------------
@@ -65,9 +65,9 @@ How to create a customised model?
 
     class MyCustomisedModel(AbstractModelClass):
 
-        def __init__(self, shape_inputs: list, shape_outputs: list):
+        def __init__(self, input_shape: list, output_shape: list):
 
-            super().__init__(shape_inputs[0], shape_outputs[0])
+            super().__init__(input_shape[0], output_shape[0])
 
             # settings
             self.dropout_rate = 0.1
@@ -88,22 +88,22 @@ How to create a customised model?
   loss has been added for demonstration only, because we use a build-in loss function. Nonetheless, we always encourage
   you to add the loss as custom object, to prevent potential errors when loading an already created model instead of
   training a new one.
-* Now build your model inside :py:`set_model()` by using the instance attributes :py:`self.shape_inputs` and
-  :py:`self.shape_outputs` and storing the model as :py:`self.model`.
+* Now build your model inside :py:`set_model()` by using the instance attributes :py:`self._input_shape` and
+  :py:`self._output_shape` and storing the model as :py:`self.model`.
 
 .. code-block:: python
 
     class MyCustomisedModel(AbstractModelClass):
 
         def set_model(self):
-            x_input = keras.layers.Input(shape=self.shape_inputs)
+            x_input = keras.layers.Input(shape=self._input_shape)
             x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
             x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
             x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
             x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
             x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
             x_in = self.activation()(x_in)
-            x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+            x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
             out_main = self.activation()(x_in)
             self.model = keras.Model(inputs=x_input, outputs=[out_main])
 
@@ -342,7 +342,7 @@ The output will look like:
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: Workflow started
     ...
     INFO: ExperimentSetup finished after 00:00:12 (hh:mm:ss)
     INFO: CustomStage started
@@ -350,4 +350,4 @@ The output will look like:
     INFO: test_string = Hello World
     INFO: epochs = 128
     INFO: CustomStage finished after 00:00:01 (hh:mm:ss)
-    INFO: mlair finished after 00:00:13 (hh:mm:ss)
\ No newline at end of file
+    INFO: Workflow finished after 00:00:13 (hh:mm:ss)
\ No newline at end of file
diff --git a/docs/_source/get-started.rst b/docs/_source/get-started.rst
index 2e8838fd5b1ac63a7e34e39b7f8bc24d70f9c1b7..7c909b778ca12dfd74497c90d60d62adf9801b29 100644
--- a/docs/_source/get-started.rst
+++ b/docs/_source/get-started.rst
@@ -60,7 +60,7 @@ inside the experiment path in the logging folder.
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: DefaultWorkflow started
     INFO: ExperimentSetup started
     INFO: Experiment path is: /home/<usr>/mlair/testrun_network
     ...
@@ -68,7 +68,7 @@ inside the experiment path in the logging folder.
     ...
     INFO: Training started
     ...
-    INFO: mlair finished after 00:00:12 (hh:mm:ss)
+    INFO: DefaultWorkflow finished after 00:00:12 (hh:mm:ss)
 
 
 Example 2
@@ -94,7 +94,7 @@ The output looks similar, but we can see, that the new stations are loaded.
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: DefaultWorkflow started
     INFO: ExperimentSetup started
     ...
     INFO: load data for DEBW030 from JOIN
@@ -102,7 +102,7 @@ The output looks similar, but we can see, that the new stations are loaded.
     ...
     INFO: Training started
     ...
-    INFO: mlair finished after 00:00:24 (hh:mm:ss)
+    INFO: DefaultWorkflow finished after 00:00:24 (hh:mm:ss)
 
 Example 3
 ~~~~~~~~~
@@ -132,9 +132,9 @@ We can see from the terminal that no training was performed. Analysis is now mad
 
 .. code-block::
 
-    INFO: mlair started
+    INFO: DefaultWorkflow started
     ...
     INFO: No training has started, because trainable parameter was false.
     ...
-    INFO: mlair finished after 00:00:06 (hh:mm:ss)
+    INFO: DefaultWorkflow finished after 00:00:06 (hh:mm:ss)
 
diff --git a/mlair/model_modules/model_class.py b/mlair/model_modules/model_class.py
index 0e69d22012a592b30c6ffdf9ed6082c47a291f90..c9cc13bd8108e43b5a9f03682942eacdf5a55f04 100644
--- a/mlair/model_modules/model_class.py
+++ b/mlair/model_modules/model_class.py
@@ -23,9 +23,9 @@ How to create a customised model?
 
         class MyCustomisedModel(AbstractModelClass):
 
-            def __init__(self, shape_inputs: list, shape_outputs: list):
+            def __init__(self, input_shape: list, output_shape: list):
 
-                super().__init__(shape_inputs[0], shape_outputs[0])
+                super().__init__(input_shape[0], output_shape[0])
 
                 # settings
                 self.dropout_rate = 0.1
@@ -49,14 +49,14 @@ How to create a customised model?
         class MyCustomisedModel(AbstractModelClass):
 
             def set_model(self):
-                x_input = keras.layers.Input(shape=self.shape_inputs)
+                x_input = keras.layers.Input(shape=self._input_shape)
                 x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
                 x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
                 x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
                 x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
                 x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
                 x_in = self.activation()(x_in)
-                x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+                x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
                 out_main = self.activation()(x_in)
                 self.model = keras.Model(inputs=x_input, outputs=[out_main])
 
@@ -139,7 +139,7 @@ class AbstractModelClass(ABC):
     the corresponding loss function.
     """
 
-    def __init__(self, shape_inputs, shape_outputs) -> None:
+    def __init__(self, input_shape, output_shape) -> None:
         """Predefine internal attributes for model and loss."""
         self.__model = None
         self.model_name = self.__class__.__name__
@@ -154,8 +154,8 @@ class AbstractModelClass(ABC):
                                           }
         self.__compile_options = self.__allowed_compile_options
         self.__compile_options_is_set = False
-        self.shape_inputs = shape_inputs
-        self.shape_outputs = self.__extract_from_tuple(shape_outputs)
+        self._input_shape = input_shape
+        self._output_shape = self.__extract_from_tuple(output_shape)
 
     def __getattr__(self, name: str) -> Any:
         """
@@ -355,17 +355,17 @@ class MyLittleModel(AbstractModelClass):
     on the window_lead_time parameter.
     """
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
         """
         Sets model and loss depending on the given arguments.
 
-        :param shape_inputs: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param shape_outputs: list of output shapes (expect len=1 with shape=(window_forecast))
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
         """
 
-        assert len(shape_inputs) == 1
-        assert len(shape_outputs) == 1
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = 0.1
@@ -381,7 +381,7 @@ class MyLittleModel(AbstractModelClass):
         """
         Build the model.
         """
-        x_input = keras.layers.Input(shape=self.shape_inputs)
+        x_input = keras.layers.Input(shape=self._input_shape)
         x_in = keras.layers.Flatten(name='{}'.format("major"))(x_input)
         x_in = keras.layers.Dense(64, name='{}_Dense_64'.format("major"))(x_in)
         x_in = self.activation()(x_in)
@@ -389,7 +389,7 @@ class MyLittleModel(AbstractModelClass):
         x_in = self.activation()(x_in)
         x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+        x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
         out_main = self.activation()(x_in)
         self.model = keras.Model(inputs=x_input, outputs=[out_main])
 
@@ -410,17 +410,17 @@ class MyBranchedModel(AbstractModelClass):
     Dense layer.
     """
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
         """
         Sets model and loss depending on the given arguments.
 
-        :param shape_inputs: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param shape_outputs: list of output shapes (expect len=1 with shape=(window_forecast))
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
         """
 
-        assert len(shape_inputs) == 1
-        assert len(shape_outputs) == 1
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = 0.1
@@ -438,22 +438,22 @@ class MyBranchedModel(AbstractModelClass):
         """
 
         # add 1 to window_size to include current time step t0
-        x_input = keras.layers.Input(shape=self.shape_inputs)
+        x_input = keras.layers.Input(shape=self._input_shape)
         x_in = keras.layers.Conv2D(32, (1, 1), padding='same', name='{}_Conv_1x1'.format("major"))(x_input)
         x_in = self.activation(name='{}_conv_act'.format("major"))(x_in)
         x_in = keras.layers.Flatten(name='{}'.format("major"))(x_in)
         x_in = keras.layers.Dropout(self.dropout_rate, name='{}_Dropout_1'.format("major"))(x_in)
         x_in = keras.layers.Dense(64, name='{}_Dense_64'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        out_minor_1 = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("minor_1"))(x_in)
+        out_minor_1 = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("minor_1"))(x_in)
         out_minor_1 = self.activation(name="minor_1")(out_minor_1)
         x_in = keras.layers.Dense(32, name='{}_Dense_32'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        out_minor_2 = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("minor_2"))(x_in)
+        out_minor_2 = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("minor_2"))(x_in)
         out_minor_2 = self.activation(name="minor_2")(out_minor_2)
         x_in = keras.layers.Dense(16, name='{}_Dense_16'.format("major"))(x_in)
         x_in = self.activation()(x_in)
-        x_in = keras.layers.Dense(self.shape_outputs, name='{}_Dense'.format("major"))(x_in)
+        x_in = keras.layers.Dense(self._output_shape, name='{}_Dense'.format("major"))(x_in)
         out_main = self.activation(name="main")(x_in)
         self.model = keras.Model(inputs=x_input, outputs=[out_minor_1, out_minor_2, out_main])
 
@@ -468,17 +468,17 @@ class MyBranchedModel(AbstractModelClass):
 
 class MyTowerModel(AbstractModelClass):
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
         """
         Sets model and loss depending on the given arguments.
 
-        :param shape_inputs: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param shape_outputs: list of output shapes (expect len=1 with shape=(window_forecast))
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
         """
 
-        assert len(shape_inputs) == 1
-        assert len(shape_outputs) == 1
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = 1e-2
@@ -529,7 +529,7 @@ class MyTowerModel(AbstractModelClass):
         ##########################################
         inception_model = InceptionModelBase()
 
-        X_input = keras.layers.Input(shape=self.shape_inputs)
+        X_input = keras.layers.Input(shape=self._input_shape)
 
         X_in = inception_model.inception_block(X_input, conv_settings_dict1, pool_settings_dict1,
                                                regularizer=self.regularizer,
@@ -551,7 +551,7 @@ class MyTowerModel(AbstractModelClass):
         # out_main = flatten_tail(X_in, 'Main', activation=activation, bound_weight=True, dropout_rate=self.dropout_rate,
         #                         reduction_filter=64, inner_neurons=64, output_neurons=self.window_lead_time)
 
-        out_main = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self.shape_outputs,
+        out_main = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self._output_shape,
                                 output_activation='linear', reduction_filter=64,
                                 name='Main', bound_weight=True, dropout_rate=self.dropout_rate,
                                 kernel_regularizer=self.regularizer
@@ -566,17 +566,17 @@ class MyTowerModel(AbstractModelClass):
 
 class MyPaperModel(AbstractModelClass):
 
-    def __init__(self, shape_inputs: list, shape_outputs: list):
+    def __init__(self, input_shape: list, output_shape: list):
         """
         Sets model and loss depending on the given arguments.
 
-        :param shape_inputs: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
-        :param shape_outputs: list of output shapes (expect len=1 with shape=(window_forecast))
+        :param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
+        :param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
         """
 
-        assert len(shape_inputs) == 1
-        assert len(shape_outputs) == 1
-        super().__init__(shape_inputs[0], shape_outputs[0])
+        assert len(input_shape) == 1
+        assert len(output_shape) == 1
+        super().__init__(input_shape[0], output_shape[0])
 
         # settings
         self.dropout_rate = .3
@@ -643,7 +643,7 @@ class MyPaperModel(AbstractModelClass):
         ##########################################
         inception_model = InceptionModelBase()
 
-        X_input = keras.layers.Input(shape=self.shape_inputs)
+        X_input = keras.layers.Input(shape=self._input_shape)
 
         pad_size = PadUtils.get_padding_for_same(first_kernel)
         # X_in = adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
@@ -661,7 +661,7 @@ class MyPaperModel(AbstractModelClass):
                                                padding=self.padding)
         # out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
         #                           self.activation, 32, 64)
-        out_minor1 = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self.shape_outputs,
+        out_minor1 = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self._output_shape,
                                   output_activation='linear', reduction_filter=32,
                                   name='minor_1', bound_weight=False, dropout_rate=self.dropout_rate,
                                   kernel_regularizer=self.regularizer
@@ -679,8 +679,8 @@ class MyPaperModel(AbstractModelClass):
         #                                        batch_normalisation=True)
         #############################################
 
-        out_main = flatten_tail(X_in, inner_neurons=64 * 2, activation=activation, output_neurons=self.shape_outputs,
-                                output_activation='linear',  reduction_filter=64 * 2,
+        out_main = flatten_tail(X_in, inner_neurons=64 * 2, activation=activation, output_neurons=self._output_shape,
+                                output_activation='linear', reduction_filter=64 * 2,
                                 name='Main', bound_weight=False, dropout_rate=self.dropout_rate,
                                 kernel_regularizer=self.regularizer
                                 )
diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py
index e4bff5a621619e6d806fc7ae58ed093331463187..c6af13b02e818431578c7423d837f95e64ca3d15 100644
--- a/mlair/run_modules/model_setup.py
+++ b/mlair/run_modules/model_setup.py
@@ -95,13 +95,13 @@ class ModelSetup(RunEnvironment):
     def _set_shapes(self):
         """Set input and output shapes from train collection."""
         shape = list(map(lambda x: x.shape[1:], self.data_store.get("data_collection", "train")[0].get_X()))
-        self.data_store.set("shape_inputs", shape, self.scope)
+        self.data_store.set("input_shape", shape, self.scope)
         shape = list(map(lambda y: y.shape[1:], self.data_store.get("data_collection", "train")[0].get_Y()))
-        self.data_store.set("shape_outputs", shape, self.scope)
+        self.data_store.set("output_shape", shape, self.scope)
 
     def compile_model(self):
         """
-        Compiles the keras model. Compile options are mandetory and have to be set by implementing set_compile() method
+        Compiles the keras model. Compile options are mandatory and have to be set by implementing set_compile() method
         in child class of AbstractModelClass.
         """
         compile_options = self.model.compile_options
@@ -135,7 +135,7 @@ class ModelSetup(RunEnvironment):
 
     def build_model(self):
         """Build model using input and output shapes from data store."""
-        args_list = ["shape_inputs", "shape_outputs"]
+        args_list = ["input_shape", "output_shape"]
         args = self.data_store.create_args_dict(args_list, self.scope)
         model = self.data_store.get("model_class")
         self.model = model(**args)
diff --git a/test/test_data_handler/test_iterator.py b/test/test_data_handler/test_iterator.py
index ff81fc7b89b2cede0f47cdf209e77e373cd0d656..ec224c06e358297972097f2cc75cea86f768784f 100644
--- a/test/test_data_handler/test_iterator.py
+++ b/test/test_data_handler/test_iterator.py
@@ -213,12 +213,12 @@ class TestKerasIterator:
 
     def test_get_model_rank_single_output_branch(self):
         iterator = object.__new__(KerasIterator)
-        iterator.model = MyLittleModel(shape_inputs=[(14, 1, 2)], shape_outputs=[(3,)])
+        iterator.model = MyLittleModel(input_shape=[(14, 1, 2)], output_shape=[(3,)])
         assert iterator._get_model_rank() == 1
 
     def test_get_model_rank_multiple_output_branch(self):
         iterator = object.__new__(KerasIterator)
-        iterator.model = MyBranchedModel(shape_inputs=[(14, 1, 2)], shape_outputs=[(3,)])
+        iterator.model = MyBranchedModel(input_shape=[(14, 1, 2)], output_shape=[(3,)])
         assert iterator._get_model_rank() == 3
 
     def test_get_model_rank_error(self):
diff --git a/test/test_model_modules/test_advanced_paddings.py b/test/test_model_modules/test_advanced_paddings.py
index 8ca81c42c0b807b28c444badba8d92a255341eb4..6035e8d7052710a757c5d9c0b05e08c9a4435355 100644
--- a/test/test_model_modules/test_advanced_paddings.py
+++ b/test/test_model_modules/test_advanced_paddings.py
@@ -472,7 +472,7 @@ class TestPadding2D:
         else:
             layer_name = pad_type
         pd_ap = pd(padding=(1, 2), name=f"{layer_name}_layer")(input_x)
-        assert pd_ap._keras_history[0].input_shape == (None, 32, 32, 3)
-        assert pd_ap._keras_history[0].output_shape == (None, 34, 36, 3)
+        assert pd_ap._keras_history[0]._input_shape == (None, 32, 32, 3)
+        assert pd_ap._keras_history[0]._output_shape == (None, 34, 36, 3)
         assert pd_ap._keras_history[0].padding == ((1, 1), (2, 2))
         assert pd_ap._keras_history[0].name == f"{layer_name}_layer"
diff --git a/test/test_model_modules/test_flatten_tail.py b/test/test_model_modules/test_flatten_tail.py
index 623d51c07f6b27c8d6238d8a5189dea33837115e..1ef0bdd97ebfc0d609a94facd036ed68f824731d 100644
--- a/test/test_model_modules/test_flatten_tail.py
+++ b/test/test_model_modules/test_flatten_tail.py
@@ -67,7 +67,7 @@ class TestFlattenTail:
         flatten = self.step_in(inner_dense)
         assert flatten.name == 'Main_tail'
         input_layer = self.step_in(flatten)
-        assert input_layer.input_shape == (None, 7, 1, 2)
+        assert input_layer._input_shape == (None, 7, 1, 2)
 
     def test_flatten_tail_all_settings(self, model_input):
         tail = flatten_tail(input_x=model_input, inner_neurons=64, activation=keras.layers.advanced_activations.ELU,
@@ -115,5 +115,5 @@ class TestFlattenTail:
         assert isinstance(reduc_conv.kernel_regularizer, keras.regularizers.L1L2)
 
         input_layer = self.step_in(reduc_conv)
-        assert input_layer.input_shape == (None, 7, 1, 2)
+        assert input_layer._input_shape == (None, 7, 1, 2)
 
diff --git a/test/test_model_modules/test_model_class.py b/test/test_model_modules/test_model_class.py
index 3e77fd17c4cd8151fe76816abf0bef323adb2e96..0ad5d123c25c085e5d31ff75133bc8fef7e09f57 100644
--- a/test/test_model_modules/test_model_class.py
+++ b/test/test_model_modules/test_model_class.py
@@ -12,7 +12,7 @@ class Paddings:
 class AbstractModelSubClass(AbstractModelClass):
 
     def __init__(self):
-        super().__init__(shape_inputs=(12, 1, 2), shape_outputs=3)
+        super().__init__(input_shape=(12, 1, 2), output_shape=3)
         self.test_attr = "testAttr"
 
 
@@ -20,7 +20,7 @@ class TestAbstractModelClass:
 
     @pytest.fixture
     def amc(self):
-        return AbstractModelClass(shape_inputs=(14, 1, 2), shape_outputs=(3,))
+        return AbstractModelClass(input_shape=(14, 1, 2), output_shape=(3,))
 
     @pytest.fixture
     def amsc(self):
@@ -31,8 +31,8 @@ class TestAbstractModelClass:
         # assert amc.loss is None
         assert amc.model_name == "AbstractModelClass"
         assert amc.custom_objects == {}
-        assert amc.shape_inputs == (14, 1, 2)
-        assert amc.shape_outputs == 3
+        assert amc._input_shape == (14, 1, 2)
+        assert amc._output_shape == 3
 
     def test_model_property(self, amc):
         amc.model = keras.Model()
@@ -204,7 +204,7 @@ class TestMyPaperModel:
 
     @pytest.fixture
     def mpm(self):
-        return MyPaperModel(shape_inputs=[(7, 1, 9)], shape_outputs=[(4,)])
+        return MyPaperModel(input_shape=[(7, 1, 9)], output_shape=[(4,)])
 
     def test_init(self, mpm):
         # check if loss number of loss functions fit to model outputs
@@ -217,7 +217,7 @@ class TestMyPaperModel:
 
     def test_set_model(self, mpm):
         assert isinstance(mpm.model, keras.Model)
-        assert mpm.model.layers[0].output_shape == (None, 7, 1, 9)
+        assert mpm.model.layers[0]._output_shape == (None, 7, 1, 9)
         # check output dimensions
         if isinstance(mpm.model.output_shape, tuple):
             assert mpm.model.output_shape == (None, 4)
diff --git a/test/test_run_modules/test_model_setup.py b/test/test_run_modules/test_model_setup.py
index 1b3e43b2bbfda44f1a5b5463e876adc578360ff3..d2989a937383aad71670f72f39ecfa8b8fed29dd 100644
--- a/test/test_run_modules/test_model_setup.py
+++ b/test/test_run_modules/test_model_setup.py
@@ -64,7 +64,7 @@ class TestModelSetup:
 
     @pytest.fixture
     def setup_with_model(self, setup):
-        setup.model = AbstractModelClass(shape_inputs=(12, 1), shape_outputs=2)
+        setup.model = AbstractModelClass(input_shape=(12, 1), output_shape=2)
         setup.model.test_param = "42"
         yield setup
         RunEnvironment().__del__()