diff --git a/src/model_modules/advanced_paddings.py b/src/model_modules/advanced_paddings.py
index d9e55c78fb6c78bbe219c820078c46a235627897..ea16e5b8a7c6a01456e286a2afaab4d5a88c96cc 100644
--- a/src/model_modules/advanced_paddings.py
+++ b/src/model_modules/advanced_paddings.py
@@ -254,10 +254,24 @@ class SymmetricPadding2D(_ZeroPadding):
 
 
 class Padding2D:
-    '''
-    This class combines the implemented padding methods. You can call this method by defining a specific padding type.
-    The __call__ method will return the corresponding Padding layer.
-    '''
+    """
+    Combine all implemented padding methods.
+
+    You can call this method by defining a specific padding type. The __call__ method will return the corresponding
+    Padding layer.
+
+    .. code-block:: python
+
+        input_x = ... #  your input data
+        kernel_size = (5, 1)
+        padding_size = PadUtils.get_padding_for_same(kernel_size)
+
+        tower = layers.Conv2D(...)(input_x)
+        tower = layers.Activation(...)(tower)
+        tower = Padding2D('ZeroPad2D')(padding=padding_size, name=f'Custom_Pad')(tower)
+
+    Padding type can either be set by a string or directly by using an instance of a valid padding class.
+    """
 
     allowed_paddings = {
         **dict.fromkeys(("RefPad2D", "ReflectionPadding2D"), ReflectionPadding2D),
diff --git a/src/model_modules/flatten.py b/src/model_modules/flatten.py
index efb0e977d1a1500599e04b29f09c4f2d19cada4c..e2dde4481cb405078dffdff324f5d8157388cd83 100644
--- a/src/model_modules/flatten.py
+++ b/src/model_modules/flatten.py
@@ -47,6 +47,17 @@ def flatten_tail(input_x: keras.layers, inner_neurons: int, activation: Union[Ca
     :param kernel_regularizer:
 
     :return:
+
+    .. code-block:: python
+
+        input_x = ... # your input data
+        conv_out = Conv2D(*args)(input_x) # your convolutional stack
+        out = flatten_tail(conv_out, inner_neurons=64, activation=keras.layers.advanced_activations.ELU,
+                           output_neurons=4
+                           output_activation='linear', reduction_filter=64,
+                           name='Main', bound_weight=False, dropout_rate=.3,
+                           kernel_regularizer=keras.regularizers.l2()
+                           )
     """
     # compression layer
     if reduction_filter is None: