diff --git a/mlair/model_modules/branched_input_networks.py b/mlair/model_modules/branched_input_networks.py
index af3a8bffa3169556d55af94192915e3a27f89cc1..acb83967b783a806e4221738c1e7144f6850c593 100644
--- a/mlair/model_modules/branched_input_networks.py
+++ b/mlair/model_modules/branched_input_networks.py
@@ -1,3 +1,4 @@
+import logging
 from functools import partial, reduce
 import copy
 from typing import Union
@@ -9,6 +10,8 @@ from mlair.helpers import select_from_dict, to_list
 from mlair.model_modules.loss import var_loss
 from mlair.model_modules.recurrent_networks import RNN
 from mlair.model_modules.convolutional_networks import CNNfromConfig
+from mlair.model_modules.residual_networks import ResNet
+from mlair.model_modules.u_networks import UNet
 
 
 class BranchedInputCNN(CNNfromConfig):  # pragma: no cover
@@ -367,3 +370,189 @@ class BranchedInputFCN(AbstractModelClass):  # pragma: no cover
                                 "metrics": ["mse", "mae", var_loss]}
         # self.compile_options = {"loss": [custom_loss([keras.losses.mean_squared_error, var_loss], loss_weights=[2, 1])],
         #                         "metrics": ["mse", "mae", var_loss]}
+
+
+class BranchedInputUNet(UNet, BranchedInputCNN):  # pragma: no cover
+    """
+    A U-net neural network with multiple input branches.
+
+    ```python
+
+    input_shape = [(72,1,9),(72,1,9),]
+    output_shape = [(4, )]
+
+    # model
+    layer_configuration=[
+
+        # 1st block (down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 16, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 16, "padding": "same"},
+        {"type": "blocksave"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+
+        # 2nd block (down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "padding": "same"},
+        {"type": "blocksave"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+
+        # 3rd block (down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "padding": "same"},
+        {"type": "blocksave"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+
+        # 4th block (final down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 128, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 128, "padding": "same"},
+
+        # 5th block (up)
+        {"type": "Conv2DTranspose", "activation": "relu", "kernel_size": (2, 1), "filters": 64, "strides": (2, 1),
+         "padding": "same"},
+        {"type": "ConcatenateUNet"},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "padding": "same"},
+
+        # 6th block (up)
+        {"type": "Conv2DTranspose", "activation": "relu", "kernel_size": (2, 1), "filters": 32, "strides": (2, 1),
+         "padding": "same"},
+        {"type": "ConcatenateUNet"},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "padding": "same"},
+
+        # 7th block (up)
+        {"type": "Conv2DTranspose", "activation": "relu", "kernel_size": (2, 1), "filters": 16, "strides": (2, 1),
+         "padding": "same"},
+        {"type": "ConcatenateUNet"},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 16, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 16, "padding": "same"},
+
+        # Tail
+        {"type": "Concatenate"},
+        {"type": "Flatten"},
+        {"type": "Dense", "units": 128, "activation": "relu"}
+    ]
+
+    model = BranchedInputUNet(input_shape, output_shape, layer_configuration)
+    ```
+
+    """
+
+    def __init__(self, input_shape, output_shape, layer_configuration: list, optimizer="adam", **kwargs):
+
+        super(BranchedInputUNet, self).__init__(input_shape, output_shape, layer_configuration, optimizer=optimizer, **kwargs)
+
+    def set_model(self):
+
+        x_input = []
+        x_in = []
+        stop_pos = None
+        block_save = []
+
+        for branch in range(len(self._input_shape)):
+            print(branch)
+            block_save = []
+            shape_b = self._input_shape[branch]
+            x_input_b = keras.layers.Input(shape=shape_b, name=f"input_branch{branch + 1}")
+            x_input.append(x_input_b)
+            x_in_b = x_input_b
+            b_conf = copy.deepcopy(self.conf)
+
+            for pos, layer_opts in enumerate(b_conf):
+                print(layer_opts)
+                if layer_opts.get("type") == "Concatenate":
+                    if stop_pos is None:
+                        stop_pos = pos
+                    else:
+                        assert pos == stop_pos
+                    break
+                layer, layer_kwargs, follow_up_layer = self._extract_layer_conf(layer_opts)
+                if layer == "blocksave":
+                    block_save.append(x_in_b)
+                    continue
+                layer_name = self._get_layer_name(layer, layer_kwargs, pos, branch)
+                if "Concatenate" in layer_name:
+                    x_in_b = layer(name=layer_name)([x_in_b, block_save.pop(-1)])
+                    self._layer_save.append({"layer": layer, "follow_up_layer": follow_up_layer})
+                    continue
+                x_in_b = layer(**layer_kwargs, name=layer_name)(x_in_b)
+                if follow_up_layer is not None:
+                    for follow_up in to_list(follow_up_layer):
+                        layer_name = self._get_layer_name(follow_up, None, pos, branch)
+                        x_in_b = follow_up(name=layer_name)(x_in_b)
+                self._layer_save.append({"layer": layer, **layer_kwargs, "follow_up_layer": follow_up_layer,
+                                         "branch": branch})
+            x_in.append(x_in_b)
+
+        print("concat")
+        x_concat = keras.layers.Concatenate()(x_in)
+        if len(block_save) > 0:
+            logging.warning(f"Branches of BranchedInputUNet are concatenated before last upsampling block is applied.")
+            block_save = []
+
+        if stop_pos is not None:
+            for pos, layer_opts in enumerate(self.conf[stop_pos + 1:]):
+                print(layer_opts)
+                layer, layer_kwargs, follow_up_layer = self._extract_layer_conf(layer_opts)
+                if layer == "blocksave":
+                    block_save.append(x_concat)
+                    continue
+                layer_name = self._get_layer_name(layer, layer_kwargs, pos, None)
+                if "Concatenate" in layer_name:
+                    x_concat = layer(name=layer_name)([x_concat, block_save.pop(-1)])
+                    self._layer_save.append({"layer": layer, "follow_up_layer": follow_up_layer})
+                    continue
+                x_concat = layer(**layer_kwargs, name=layer_name)(x_concat)
+                if follow_up_layer is not None:
+                    for follow_up in to_list(follow_up_layer):
+                        layer_name = self._get_layer_name(follow_up, None, pos + stop_pos, None)
+                        x_concat = follow_up(name=layer_name)(x_concat)
+                self._layer_save.append({"layer": layer, **layer_kwargs, "follow_up_layer": follow_up_layer,
+                                         "branch": "concat"})
+
+        x_concat = keras.layers.Dense(self._output_shape)(x_concat)
+        out = self.activation_output(name=f"{self.activation_output_name}_output")(x_concat)
+        self.model = keras.Model(inputs=x_input, outputs=[out])
+        print(self.model.summary())
+
+
+class BranchedInputResNet(ResNet, BranchedInputCNN):  # pragma: no cover
+    """
+    A convolutional neural network with multiple input branches and residual blocks (skip connections).
+
+    ```python
+    input_shape = [(65,1,9), (65,1,9)]
+    output_shape = [(4, )]
+
+    # model
+    layer_configuration=[
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (7, 1), "filters": 32, "padding": "same"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+        {"type": "residual_block", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "strides": (1, 1), "kernel_regularizer": "l2"},
+        {"type": "residual_block", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "strides": (1, 1), "kernel_regularizer": "l2"},
+        {"type": "residual_block", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "strides": (1, 1), "kernel_regularizer": "l2", "use_1x1conv": True},
+        {"type": "residual_block", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "strides": (1, 1), "kernel_regularizer": "l2"},
+        {"type": "residual_block", "activation": "relu", "kernel_size": (3, 1), "filters": 128, "strides": (1, 1), "kernel_regularizer": "l2", "use_1x1conv": True},
+        {"type": "residual_block", "activation": "relu", "kernel_size": (3, 1), "filters": 128, "strides": (1, 1), "kernel_regularizer": "l2"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Flatten"},
+        {"type": "Concatenate"},
+        {"type": "Dense", "units": 128, "activation": "relu"}
+    ]
+
+    model = BranchedInputResNet(input_shape, output_shape, layer_configuration)
+    ```
+
+    """
+
+    def __init__(self, input_shape: list, output_shape: list, layer_configuration: list, optimizer="adam", **kwargs):
+
+        super().__init__(input_shape, output_shape, layer_configuration, optimizer=optimizer, **kwargs)
diff --git a/mlair/model_modules/residual_networks.py b/mlair/model_modules/residual_networks.py
index a9b502c4ef9ba5daa2b624f678b1f951dad3b747..913ed7b89125abffcb3f91a926adc9b2cd1b22a5 100644
--- a/mlair/model_modules/residual_networks.py
+++ b/mlair/model_modules/residual_networks.py
@@ -1,16 +1,16 @@
 __author__ = "Lukas Leufen"
-__date__ = "2021-08-23"
+__date__ = "2022-08-23"
 
 from functools import partial
 
-from mlair.model_modules.branched_input_networks import BranchedInputCNN
+from mlair.model_modules.convolutional_networks import CNNfromConfig
 
 import tensorflow.keras as keras
 
 
-class BranchedInputResNet(BranchedInputCNN):
+class ResNet(CNNfromConfig):  # pragma: no cover
     """
-    A convolutional neural network with multiple input branches and residual blocks (skip connections).
+    A convolutional neural network with residual blocks (skip connections).
 
     ```python
     input_shape = [(65,1,9)]
@@ -29,11 +29,10 @@ class BranchedInputResNet(BranchedInputCNN):
         {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
         {"type": "Dropout", "rate": 0.25},
         {"type": "Flatten"},
-        {"type": "Concatenate"},
         {"type": "Dense", "units": 128, "activation": "relu"}
     ]
 
-    model = BranchedInputResNet(input_shape, output_shape, layer_configuration)
+    model = ResNet(input_shape, output_shape, layer_configuration)
     ```
 
     """
diff --git a/mlair/model_modules/u_networks.py b/mlair/model_modules/u_networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..2462553d30e84a23ae1f56528bd912d9e25e14b9
--- /dev/null
+++ b/mlair/model_modules/u_networks.py
@@ -0,0 +1,138 @@
+__author__ = "Lukas Leufen"
+__date__ = "2022-08-29"
+
+
+from functools import partial
+
+from mlair.helpers import select_from_dict, to_list
+from mlair.model_modules.convolutional_networks import CNNfromConfig
+import tensorflow.keras as keras
+
+
+class UNet(CNNfromConfig):  # pragma: no cover
+    """
+    A U-net neural network.
+
+    ```python
+    input_shape = [(65,1,9)]
+    output_shape = [(4, )]
+
+    # model
+    layer_configuration=[
+
+        # 1st block (down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 16, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 16, "padding": "same"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+        {"type": "blocksave"},
+
+        # 2nd block (down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "padding": "same"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+        {"type": "blocksave"},
+
+        # 3rd block (down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "padding": "same"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+        {"type": "blocksave"},
+
+        # 4th block (down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 128, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 128, "padding": "same"},
+        {"type": "MaxPooling2D", "pool_size": (2, 1), "strides": (2, 1)},
+        {"type": "blocksave"},
+
+        # 5th block (final down)
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 256, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 256, "padding": "same"},
+
+        # 6th block (up)
+        {"type": "Conv2DTranspose", "activation": "relu", "kernel_size": (2, 1), "filters": 128, "strides": (2, 1),
+         "padding": "same"},
+        {"type": "ConcatenateUNet"},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 128, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 128, "padding": "same"},
+
+        # 7th block (up)
+        {"type": "Conv2DTranspose", "activation": "relu", "kernel_size": (2, 1), "filters": 64, "strides": (2, 1),
+         "padding": "same"},
+        {"type": "ConcatenateUNet"},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 64, "padding": "same"},
+
+        # 8th block (up)
+        {"type": "Conv2DTranspose", "activation": "relu", "kernel_size": (2, 1), "filters": 32, "strides": (2, 1),
+         "padding": "same"},
+        {"type": "ConcatenateUNet"},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 32, "padding": "same"},
+
+        # 8th block (up)
+        {"type": "Conv2DTranspose", "activation": "relu", "kernel_size": (2, 1), "filters": 16, "strides": (2, 1),
+         "padding": "same"},
+        {"type": "ConcatenateUNet"},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 16, "padding": "same"},
+        {"type": "Dropout", "rate": 0.25},
+        {"type": "Conv2D", "activation": "relu", "kernel_size": (3, 1), "filters": 16, "padding": "same"},
+
+        # Tail
+        {"type": "Flatten"},
+        {"type": "Dense", "units": 128, "activation": "relu"}
+    ]
+
+    model = UNet(input_shape, output_shape, layer_configuration)
+    ```
+
+
+    """
+
+    def __init__(self, input_shape: list, output_shape: list, layer_configuration: list, optimizer="adam", **kwargs):
+
+        super().__init__(input_shape, output_shape, layer_configuration, optimizer=optimizer, **kwargs)
+
+    def _extract_layer_conf(self, layer_opts):
+        if layer_opts["type"] == "ConcatenateUNet":
+            layer = getattr(keras.layers, "Concatenate", None)
+            return layer, None, None
+        elif layer_opts["type"] == "blocksave":
+            return "blocksave", None, None
+        else:
+            return super()._extract_layer_conf(layer_opts)
+
+    def set_model(self):
+        x_input = keras.layers.Input(shape=self._input_shape)
+        x_in = x_input
+        block_save = []
+
+        for pos, layer_opts in enumerate(self.conf):
+            print(layer_opts)
+            layer, layer_kwargs, follow_up_layer = self._extract_layer_conf(layer_opts)
+            if layer == "blocksave":
+                block_save.append(x_in)
+                continue
+            layer_name = self._get_layer_name(layer, layer_kwargs, pos)
+            if "Concatenate" in layer_name:
+                x_in = layer(name=layer_name)([x_in, block_save.pop(-1)])
+                self._layer_save.append({"layer": layer, "follow_up_layer": follow_up_layer})
+                continue
+            x_in = layer(**layer_kwargs, name=layer_name)(x_in)
+            if follow_up_layer is not None:
+                for follow_up in to_list(follow_up_layer):
+                    layer_name = self._get_layer_name(follow_up, None, pos)
+                    x_in = follow_up(name=layer_name)(x_in)
+            self._layer_save.append({"layer": layer, **layer_kwargs, "follow_up_layer": follow_up_layer})
+
+        x_in = keras.layers.Dense(self._output_shape)(x_in)
+        out = self.activation_output(name=f"{self.activation_output_name}_output")(x_in)
+        self.model = keras.Model(inputs=x_input, outputs=[out])
+        print(self.model.summary())