diff --git a/mlair/model_modules/branched_input_networks.py b/mlair/model_modules/branched_input_networks.py
index a7841f6aab031647e0a3a6a8af6b7c648179cbc3..d078afd55aa8fcecc1f844542076befa048d415b 100644
--- a/mlair/model_modules/branched_input_networks.py
+++ b/mlair/model_modules/branched_input_networks.py
@@ -1,5 +1,6 @@
 from functools import partial, reduce
 import copy
+from typing import Union
 
 from tensorflow import keras as keras
 
@@ -40,10 +41,12 @@ class BranchedInputCNN(CNNfromConfig):  # pragma: no cover
                         assert pos == stop_pos
                     break
                 layer, layer_kwargs, follow_up_layer = self._extract_layer_conf(layer_opts)
-                x_in_b = layer(**layer_kwargs, name=f"{layer.__name__}_branch{branch + 1}_{pos + 1}")(x_in_b)
+                layer_name = self._get_layer_name(layer, layer_kwargs, pos, branch)
+                x_in_b = layer(**layer_kwargs, name=layer_name)(x_in_b)
                 if follow_up_layer is not None:
                     for follow_up in to_list(follow_up_layer):
-                        x_in_b = follow_up(name=f"{follow_up.__name__}_branch{branch + 1}_{pos + 1}")(x_in_b)
+                        layer_name = self._get_layer_name(follow_up, None, pos, branch)
+                        x_in_b = follow_up(name=layer_name)(x_in_b)
                 self._layer_save.append({"layer": layer, **layer_kwargs, "follow_up_layer": follow_up_layer,
                                          "branch": branch})
             x_in.append(x_in_b)
@@ -55,10 +58,12 @@ class BranchedInputCNN(CNNfromConfig):  # pragma: no cover
             for pos, layer_opts in enumerate(self.conf[stop_pos + 1:]):
                 print(layer_opts)
                 layer, layer_kwargs, follow_up_layer = self._extract_layer_conf(layer_opts)
-                x_concat = layer(**layer_kwargs, name=f"{layer.__name__}_{pos + stop_pos + 1}")(x_concat)
+                layer_name = self._get_layer_name(layer, layer_kwargs, pos + stop_pos, None)
+                x_concat = layer(**layer_kwargs, name=layer_name)(x_concat)
                 if follow_up_layer is not None:
                     for follow_up in to_list(follow_up_layer):
-                        x_concat = follow_up(name=f"{follow_up.__name__}_{pos + stop_pos + 1}")(x_concat)
+                        layer_name = self._get_layer_name(follow_up, None, pos + stop_pos, None)
+                        x_concat = follow_up(name=layer_name)(x_concat)
                 self._layer_save.append({"layer": layer, **layer_kwargs, "follow_up_layer": follow_up_layer,
                                          "branch": "concat"})
 
@@ -67,6 +72,18 @@ class BranchedInputCNN(CNNfromConfig):  # pragma: no cover
         self.model = keras.Model(inputs=x_input, outputs=[out])
         print(self.model.summary())
 
+    @staticmethod
+    def _get_layer_name(layer: keras.layers, layer_kwargs: Union[dict, None], pos: int, branch: int = None):
+        name = layer.__name__
+        if "Conv" in layer.__name__ and isinstance(layer_kwargs, dict) and "kernel_size" in layer_kwargs:
+            name = name + "_" + "x".join(map(str, layer_kwargs["kernel_size"]))
+        if "Pooling" in layer.__name__ and isinstance(layer_kwargs, dict) and "pool_size" in layer_kwargs:
+            name = name + "_" + "x".join(map(str, layer_kwargs["pool_size"]))
+        if branch is not None:
+            name += f"_branch{branch + 1}"
+        name += f"_{pos + 1}"
+        return name
+
 
 class BranchedInputRNN(RNN):  # pragma: no cover
     """A recurrent neural network with multiple input branches."""
diff --git a/mlair/model_modules/convolutional_networks.py b/mlair/model_modules/convolutional_networks.py
index a388f368366ff00a975f54e927b9a2b693cd7a4a..cd0e87d54995c70408f4c9fd57a95cf4e368632f 100644
--- a/mlair/model_modules/convolutional_networks.py
+++ b/mlair/model_modules/convolutional_networks.py
@@ -2,6 +2,7 @@ __author__ = "Lukas Leufen"
 __date__ = '2021-02-'
 
 from functools import reduce, partial
+from typing import Union
 
 from mlair.model_modules import AbstractModelClass
 from mlair.helpers import select_from_dict, to_list
@@ -80,13 +81,15 @@ class CNNfromConfig(AbstractModelClass):
         x_input = keras.layers.Input(shape=self._input_shape)
         x_in = x_input
 
-        for layer_opts in self.conf:
+        for pos, layer_opts in enumerate(self.conf):
             print(layer_opts)
             layer, layer_kwargs, follow_up_layer = self._extract_layer_conf(layer_opts)
-            x_in = layer(**layer_kwargs)(x_in)
+            layer_name = self._get_layer_name(layer, layer_kwargs, pos)
+            x_in = layer(**layer_kwargs, name=layer_name)(x_in)
             if follow_up_layer is not None:
                 for follow_up in to_list(follow_up_layer):
-                    x_in = follow_up()(x_in)
+                    layer_name = self._get_layer_name(follow_up, None, pos)
+                    x_in = follow_up(name=layer_name)(x_in)
             self._layer_save.append({"layer": layer, **layer_kwargs, "follow_up_layer": follow_up_layer})
 
         x_in = keras.layers.Dense(self._output_shape)(x_in)
@@ -94,6 +97,16 @@ class CNNfromConfig(AbstractModelClass):
         self.model = keras.Model(inputs=x_input, outputs=[out])
         print(self.model.summary())
 
+    @staticmethod
+    def _get_layer_name(layer: keras.layers, layer_kwargs: Union[dict, None], pos: int, *args):
+        name = layer.__name__
+        if "Conv" in layer.__name__ and isinstance(layer_kwargs, dict) and "kernel_size" in layer_kwargs:
+            name = name + "_" + "x".join(map(str, layer_kwargs["kernel_size"]))
+        if "Pooling" in layer.__name__ and isinstance(layer_kwargs, dict) and "pool_size" in layer_kwargs:
+            name = name + "_" + "x".join(map(str, layer_kwargs["pool_size"]))
+        name += f"_{pos + 1}"
+        return name
+
     def _set_optimizer(self, optimizer, **kwargs):
         try:
             opt_name = optimizer.lower()