diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index eacbe3e26323e0a0bf1579cba53e2e12ecfd27c0..4a59b5b91edbe7a918a80884cf9e38a5d70a8826 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -21,6 +21,7 @@ version:
   artifacts:
     name: pages
     when: always
+    expire_in: 1 week
     paths:
       - badges/
 
@@ -54,6 +55,7 @@ tests (from scratch):
   artifacts:
     name: pages
     when: always
+    expire_in: 1 week
     paths:
       - badges/
       - test_results/
@@ -107,6 +109,7 @@ tests:
   artifacts:
     name: pages
     when: always
+    expire_in: 1 week
     paths:
       - badges/
       - test_results/
@@ -131,6 +134,7 @@ coverage:
   artifacts:
     name: pages
     when: always
+    expire_in: 1 week
     paths:
       - badges/
       - coverage/
@@ -155,6 +159,7 @@ sphinx docs:
   artifacts:
     name: pages
     when: always
+    expire_in: 1 week
     paths:
       - badges/
       - webpage/
@@ -189,6 +194,7 @@ pages:
   artifacts:
     name: pages
     when: always
+    expire_in: 1 week
     paths:
       - public
       - badges/
diff --git a/mlair/helpers/logger.py b/mlair/helpers/logger.py
index 51ecde41192cb3a2838e443c3c338c5ac4e29b4d..d960ee6f0b0f1f3b76662817cd1bbf5f68772084 100644
--- a/mlair/helpers/logger.py
+++ b/mlair/helpers/logger.py
@@ -19,6 +19,10 @@ class Logger:
         # define shared logger format
         self.formatter = '%(asctime)s - %(levelname)s: %(message)s  [%(filename)s:%(funcName)s:%(lineno)s]'
 
+        # assure defaults
+        level_stream = level_stream or logging.INFO
+        level_file = level_file or logging.DEBUG
+
         # set log path
         self.log_file = self.setup_logging_path(log_path)
         # set root logger as file handler
diff --git a/mlair/model_modules/recurrent_networks.py b/mlair/model_modules/recurrent_networks.py
index 6ec920c1cde08c0d2fc6064528eea800fbdde2a7..e909ae7696bdf90d4e9a95e020b75a97e15dfd50 100644
--- a/mlair/model_modules/recurrent_networks.py
+++ b/mlair/model_modules/recurrent_networks.py
@@ -33,7 +33,7 @@ class RNN(AbstractModelClass):  # pragma: no cover
     def __init__(self, input_shape: list, output_shape: list, activation="relu", activation_output="linear",
                  activation_rnn="tanh", dropout_rnn=0,
                  optimizer="adam", n_layer=1, n_hidden=10, regularizer=None, dropout=None, layer_configuration=None,
-                 batch_normalization=False, rnn_type="lstm", add_dense_layer=False, **kwargs):
+                 batch_normalization=False, rnn_type="lstm", add_dense_layer=False, dense_layer_configuration=None, **kwargs):
         """
         Sets model and loss depending on the given arguments.
 
@@ -64,6 +64,15 @@ class RNN(AbstractModelClass):  # pragma: no cover
             is added if set to false. (Default false)
         :param rnn_type: define which kind of recurrent network should be applied. Chose from either lstm or gru. All
             units will be of this kind. (Default lstm)
+        :param add_dense_layer: set True to use additional dense layers between last recurrent layer and output layer. 
+            If no further specification is made on the exact dense_layer_configuration, a single layer as added with n 
+            neurons where n is equal to min(n_previous_layer, n_output**2). If set to False, the output layer directly 
+            follows after the last recurrent layer.
+        :param dense_layer_configuration: specify the number of dense layers and the number of neurons given as list
+            where each element corresponds to the number of neurons to add. The position / length of the list specifies
+            the number of layers to add. The last layer is followed by the output layer. In case a value is given for
+            the number of neurons that is less than the number of output neurons, the addition of dense layers is 
+            stopped immediately.
         """
 
         assert len(input_shape) == 1
@@ -80,6 +89,7 @@ class RNN(AbstractModelClass):  # pragma: no cover
         self.optimizer = self._set_optimizer(optimizer.lower(), **kwargs)
         self.bn = batch_normalization
         self.add_dense_layer = add_dense_layer
+        self.dense_layer_configuration = dense_layer_configuration or []
         self.layer_configuration = (n_layer, n_hidden) if layer_configuration is None else layer_configuration
         self.RNN = self._rnn.get(rnn_type.lower())
         self._update_model_name(rnn_type)
@@ -119,9 +129,22 @@ class RNN(AbstractModelClass):  # pragma: no cover
                 x_in = self.dropout(self.dropout_rate)(x_in)
 
         if self.add_dense_layer is True:
-            x_in = keras.layers.Dense(min(self._output_shape ** 2, conf[-1]), name=f"Dense_{len(conf) + 1}",
-                                      kernel_initializer=self.kernel_initializer, )(x_in)
-            x_in = self.activation(name=f"{self.activation_name}_{len(conf) + 1}")(x_in)
+            if len(self.dense_layer_configuration) == 0:
+                x_in = keras.layers.Dense(min(self._output_shape ** 2, conf[-1]), name=f"Dense_{len(conf) + 1}",
+                                          kernel_initializer=self.kernel_initializer, )(x_in)
+                x_in = self.activation(name=f"{self.activation_name}_{len(conf) + 1}")(x_in)
+                if self.dropout is not None:
+                    x_in = self.dropout(self.dropout_rate)(x_in)
+            else:
+                for layer, n_hidden in enumerate(self.dense_layer_configuration):
+                    if n_hidden < self._output_shape:
+                        break
+                    x_in = keras.layers.Dense(n_hidden, name=f"Dense_{len(conf) + layer + 1}",
+                                              kernel_initializer=self.kernel_initializer, )(x_in)
+                    x_in = self.activation(name=f"{self.activation_name}_{len(conf) + layer + 1}")(x_in)
+                    if self.dropout is not None:
+                        x_in = self.dropout(self.dropout_rate)(x_in)
+
         x_in = keras.layers.Dense(self._output_shape)(x_in)
         out = self.activation_output(name=f"{self.activation_output_name}_output")(x_in)
         self.model = keras.Model(inputs=x_input, outputs=[out])
diff --git a/mlair/run_modules/run_environment.py b/mlair/run_modules/run_environment.py
index 5414b21cb0cb26674c699a02c22400959e11f1aa..df34345b4fb67e764f6e4d8d6570a5fafb762304 100644
--- a/mlair/run_modules/run_environment.py
+++ b/mlair/run_modules/run_environment.py
@@ -92,12 +92,12 @@ class RunEnvironment(object):
     logger = None
     tracker_list = []
 
-    def __init__(self, name=None):
+    def __init__(self, name=None, log_level_stream=None):
         """Start time tracking automatically and logs as info."""
         if RunEnvironment.data_store is None:
             RunEnvironment.data_store = DataStoreObject()
         if RunEnvironment.logger is None:
-            RunEnvironment.logger = Logger()
+            RunEnvironment.logger = Logger(level_stream=log_level_stream)
         self._name = name if name is not None else self.__class__.__name__
         self.time = TimeTracking(name=name)
         logging.info(f"{self._name} started")
diff --git a/mlair/workflows/abstract_workflow.py b/mlair/workflows/abstract_workflow.py
index c969aa35ebca60aa749a294bcaa5de727407a461..adb718b7a45dfbec60f88765b5a9f869c177b73b 100644
--- a/mlair/workflows/abstract_workflow.py
+++ b/mlair/workflows/abstract_workflow.py
@@ -13,9 +13,10 @@ class Workflow:
     method is sufficient. It must be taken care for inter-stage dependencies, this workflow class only handles the
     execution but not the dependencies (workflow would probably fail in this case)."""
 
-    def __init__(self, name=None):
+    def __init__(self, name=None, log_level_stream=None):
         self._registry_kwargs = {}
         self._registry = []
+        self._log_level_stream = log_level_stream
         self._name = name if name is not None else self.__class__.__name__
 
     def add(self, stage, **kwargs):
@@ -25,6 +26,6 @@ class Workflow:
 
     def run(self):
         """Run workflow embedded in a run environment and according to the stage's ordering."""
-        with RunEnvironment(name=self._name):
+        with RunEnvironment(name=self._name, log_level_stream=self._log_level_stream):
             for pos, stage in enumerate(self._registry):
                 stage(**self._registry_kwargs[pos])
diff --git a/mlair/workflows/default_workflow.py b/mlair/workflows/default_workflow.py
index 961979cb774e928bda96d4cd1a3a7b0f8565e968..3c75d9809f59ed8e5e970ba1b2c3245adbc0459e 100644
--- a/mlair/workflows/default_workflow.py
+++ b/mlair/workflows/default_workflow.py
@@ -36,8 +36,9 @@ class DefaultWorkflow(Workflow):
                  batch_size=None,
                  epochs=None,
                  data_handler=None,
+                 log_level_stream=None,
                  **kwargs):
-        super().__init__()
+        super().__init__(log_level_stream=log_level_stream)
 
         # extract all given kwargs arguments
         params = remove_items(inspect.getfullargspec(self.__init__).args, "self")