diff --git a/src/model_modules/flatten.py b/src/model_modules/flatten.py
index bbe92472ebb48e7486dede099dc098a161f51695..efb0e977d1a1500599e04b29f09c4f2d19cada4c 100644
--- a/src/model_modules/flatten.py
+++ b/src/model_modules/flatten.py
@@ -1,33 +1,82 @@
 __author__ = "Felix Kleinert, Lukas Leufen"
 __date__ = '2019-12-02'
 
-from typing import Callable
+from typing import Union, Callable
 
 import keras
 
 
-def flatten_tail(input_X: keras.layers, name: str, bound_weight: bool = False, dropout_rate: float = 0.0,
-                 window_lead_time: int = 4, activation: Callable = keras.activations.relu,
-                 reduction_filter: int = 64, first_dense: int = 64):
+def get_activation(input_to_activate: keras.layers, activation: Union[Callable, str], **kwargs):
+    """
+    Apply activation on a given input layer.
 
-    X_in = keras.layers.Conv2D(reduction_filter, (1, 1), padding='same', name='{}_Conv_1x1'.format(name))(input_X)
+    This helper function is able to handle advanced keras activations as well as strings for standard activations
+
+    :param input_to_activate:
+    :param activation:
+    :param kwargs:
+    :return:
+    """
+    if isinstance(activation, str):
+        act = keras.layers.Activation(activation, **kwargs)(input_to_activate)
+    else:
+        act = activation(**kwargs)(input_to_activate)
+    return act
 
-    X_in = activation(name='{}_conv_act'.format(name))(X_in)
 
-    X_in = keras.layers.Flatten(name='{}'.format(name))(X_in)
+def flatten_tail(input_x: keras.layers, inner_neurons: int, activation: Union[Callable, str],
+                 output_neurons: int, output_activation: Union[Callable, str],
+                 reduction_filter: int = None,
+                 name: str = None,
+                 bound_weight: bool = False,
+                 dropout_rate: float = None,
+                 kernel_regularizer: keras.regularizers = None
+                 ):
+    """
+    Flatten output of convolutional layers
 
-    X_in = keras.layers.Dropout(dropout_rate, name='{}_Dropout_1'.format(name))(X_in)
-    X_in = keras.layers.Dense(first_dense, kernel_regularizer=keras.regularizers.l2(0.01),
-                              name='{}_Dense_1'.format(name))(X_in)
+    :param input_x:
+    :param output_neurons:
+    :param output_activation:
+    :param name:
+    :param bound_weight:
+    :param dropout_rate:
+    :param activation:
+    :param reduction_filter:
+    :param inner_neurons:
+    :param kernel_regularizer:
+
+    :return:
+    """
+    # compression layer
+    if reduction_filter is None:
+        x_in = input_x
+    else:
+        x_in = keras.layers.Conv2D(reduction_filter, (1, 1), name=f'{name}_Conv_1x1')(input_x)
+        x_in = get_activation(x_in, activation, name=f'{name}_conv_act')
+        # if isinstance(activation, str):
+        #     x_in = keras.layers.Activation(activation, )
+        # else:
+        #     x_in = activation(name='{}_conv_act'.format(name))(x_in)
+
+    x_in = keras.layers.Flatten(name='{}'.format(name))(x_in)
+
+    if dropout_rate is not None:
+        x_in = keras.layers.Dropout(dropout_rate, name=f'{name}_Dropout_1')(x_in)
+    x_in = keras.layers.Dense(inner_neurons, kernel_regularizer=kernel_regularizer,
+                              name=f'{name}_inner_Dense')(x_in)
     if bound_weight:
-        X_in = keras.layers.Activation('tanh')(X_in)
+        x_in = keras.layers.Activation('tanh')(x_in)
     else:
-        try:
-            X_in = activation(name='{}_act'.format(name))(X_in)
-        except:
-            X_in = activation()(X_in)
-
-    X_in = keras.layers.Dropout(dropout_rate, name='{}_Dropout_2'.format(name))(X_in)
-    out = keras.layers.Dense(window_lead_time, activation='linear', kernel_regularizer=keras.regularizers.l2(0.01),
-                             name='{}_Dense_2'.format(name))(X_in)
+        x_in = get_activation(x_in, activation, name=f'{name}_act')
+        # try:
+        #     x_in = activation(name='{}_act'.format(name))(x_in)
+        # except:
+        #     x_in = activation()(x_in)
+
+    if dropout_rate is not None:
+        x_in = keras.layers.Dropout(dropout_rate, name='{}_Dropout_2'.format(name))(x_in)
+    out = keras.layers.Dense(output_neurons, kernel_regularizer=kernel_regularizer,
+                             name=f'{name}_out_Dense')(x_in)
+    out = get_activation(out, output_activation, name=f'{name}_final_act')
     return out
diff --git a/src/model_modules/model_class.py b/src/model_modules/model_class.py
index d6dcea179bcfa8a6ec41518db34b186e30d908fc..d11862cfcc7ac8a2aa5e9508838018116cbc6a74 100644
--- a/src/model_modules/model_class.py
+++ b/src/model_modules/model_class.py
@@ -378,8 +378,14 @@ class MyTowerModel(AbstractModelClass):
                                                batch_normalisation=True)
         #############################################
 
-        out_main = flatten_tail(X_in, 'Main', activation=activation, bound_weight=True, dropout_rate=self.dropout_rate,
-                                reduction_filter=64, first_dense=64, window_lead_time=self.window_lead_time)
+        # out_main = flatten_tail(X_in, 'Main', activation=activation, bound_weight=True, dropout_rate=self.dropout_rate,
+        #                         reduction_filter=64, inner_neurons=64, output_neurons=self.window_lead_time)
+
+        out_main = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self.window_lead_time,
+                                output_activation='linear', reduction_filter=64,
+                                name='Main', bound_weight=True, dropout_rate=self.dropout_rate,
+                                kernel_regularizer=self.regularizer
+                                )
 
         self.model = keras.Model(inputs=X_input, outputs=[out_main])
 
@@ -498,8 +504,13 @@ class MyPaperModel(AbstractModelClass):
                                                regularizer=self.regularizer,
                                                batch_normalisation=True,
                                                padding=self.padding)
-        out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
-                                  self.activation, 32, 64)
+        # out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
+        #                           self.activation, 32, 64)
+        out_minor1 = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self.window_lead_time,
+                                  output_activation='linear', reduction_filter=32,
+                                  name='minor_1', bound_weight=False, dropout_rate=self.dropout_rate,
+                                  kernel_regularizer=self.regularizer
+                                  )
 
         X_in = keras.layers.Dropout(self.dropout_rate)(X_in)
 
@@ -512,8 +523,11 @@ class MyPaperModel(AbstractModelClass):
         #                                        batch_normalisation=True)
         #############################################
 
-        out_main = flatten_tail(X_in, 'Main', activation=activation, bound_weight=False, dropout_rate=self.dropout_rate,
-                                reduction_filter=64 * 2, first_dense=64 * 2, window_lead_time=self.window_lead_time)
+        out_main = flatten_tail(X_in, inner_neurons=64 * 2, activation=activation, output_neurons=self.window_lead_time,
+                                output_activation='linear',  reduction_filter=64 * 2,
+                                name='Main', bound_weight=False, dropout_rate=self.dropout_rate,
+                                kernel_regularizer=self.regularizer
+                                )
 
         self.model = keras.Model(inputs=X_input, outputs=[out_minor1, out_main])
 
diff --git a/test/test_modules/test_training.py b/test/test_modules/test_training.py
index 31c673f05d055eb7c4ee76318711de030d97d480..d3127de1afe0c1691b72dca0408e428fb5944bf4 100644
--- a/test/test_modules/test_training.py
+++ b/test/test_modules/test_training.py
@@ -28,11 +28,19 @@ def my_test_model(activation, window_history_size, channels, dropout_rate, add_m
     X_input = keras.layers.Input(shape=(window_history_size + 1, 1, channels))
     X_in = inception_model.inception_block(X_input, conv_settings_dict1, pool_settings_dict1)
     if add_minor_branch:
-        out = [flatten_tail(X_in, 'Minor_1', activation=activation)]
+        # out = [flatten_tail(X_in, 'Minor_1', activation=activation)]
+        out = [flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=4,
+                            output_activation='linear', reduction_filter=64,
+                            name='Minor_1', dropout_rate=dropout_rate,
+                            )]
     else:
         out = []
     X_in = keras.layers.Dropout(dropout_rate)(X_in)
-    out.append(flatten_tail(X_in, 'Main', activation=activation))
+    # out.append(flatten_tail(X_in, 'Main', activation=activation))
+    out.append(flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=4,
+                            output_activation='linear', reduction_filter=64,
+                            name='Main', dropout_rate=dropout_rate,
+                            ))
     return keras.Model(inputs=X_input, outputs=out)