diff --git a/mlair/model_modules/model_class.py b/mlair/model_modules/model_class.py
index 756d1d1416a52bf180dc41dfadfb4aaf80d59bcd..a2eda6e8287af2ce489bf75b02d7b205549ff144 100644
--- a/mlair/model_modules/model_class.py
+++ b/mlair/model_modules/model_class.py
@@ -127,7 +127,7 @@ import keras
 import tensorflow as tf
 from mlair.model_modules.inception_model import InceptionModelBase
 from mlair.model_modules.flatten import flatten_tail
-from mlair.model_modules.advanced_paddings import PadUtils, Padding2D
+from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, SymmetricPadding2D
 
 
 class AbstractModelClass(ABC):
@@ -636,19 +636,22 @@ class MyPaperModel(AbstractModelClass):
         assert len(output_shape) == 1
         super().__init__(input_shape[0], output_shape[0])
 
+        from mlair.model_modules.keras_extensions import LearningRateDecay
+
         # settings
         self.dropout_rate = .3
         self.regularizer = keras.regularizers.l2(0.001)
         self.initial_lr = 1e-3
-        self.lr_decay = mlair.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94,
-                                                                               epochs_drop=10)
+        self.lr_decay = LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
         self.activation = keras.layers.ELU
         self.padding = "SymPad2D"
 
         # apply to model
         self.set_model()
         self.set_compile_options()
-        self.set_custom_objects(loss=self.compile_options["loss"], Padding2D=Padding2D)
+        self.set_custom_objects(loss=self.compile_options["loss"],
+                                SymmetricPadding2D=SymmetricPadding2D,
+                                LearningRateDecay=LearningRateDecay)
 
     def set_model(self):
         """
@@ -668,10 +671,10 @@ class MyPaperModel(AbstractModelClass):
         conv_settings_dict1 = {
             'tower_1': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (3, 1),
                         'activation': activation},
-            'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
-                        'activation': activation},
-            'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
-                        'activation': activation},
+            # 'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
+            #             'activation': activation},
+            # 'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
+            #             'activation': activation},
             # 'tower_4':{'reduction_filter':8, 'tower_filter':8*2, 'tower_kernel':(7,1), 'activation':activation},
         }
         pool_settings_dict1 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
@@ -679,10 +682,10 @@ class MyPaperModel(AbstractModelClass):
         conv_settings_dict2 = {
             'tower_1': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (3, 1),
                         'activation': activation},
-            'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
-                        'activation': activation},
-            'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
-                        'activation': activation},
+            # 'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
+            #             'activation': activation},
+            # 'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
+            #             'activation': activation},
             # 'tower_4':{'reduction_filter':8*2, 'tower_filter':16*2, 'tower_kernel':(7,1), 'activation':activation},
         }
         pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
@@ -704,8 +707,6 @@ class MyPaperModel(AbstractModelClass):
         X_input = keras.layers.Input(shape=self._input_shape)
 
         pad_size = PadUtils.get_padding_for_same(first_kernel)
-        # X_in = adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
-        # X_in = inception_model.padding_layer("SymPad2D")(padding=pad_size, name="SymPad")(X_input)  # adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
         X_in = Padding2D("SymPad2D")(padding=pad_size, name="SymPad")(X_input)
         X_in = keras.layers.Conv2D(filters=first_filters,
                                    kernel_size=first_kernel,
diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py
index c6af13b02e818431578c7423d837f95e64ca3d15..dda18fac5d8546c6e399334f3d89415d246a1975 100644
--- a/mlair/run_modules/model_setup.py
+++ b/mlair/run_modules/model_setup.py
@@ -76,6 +76,9 @@ class ModelSetup(RunEnvironment):
         # build model graph using settings from my_model_settings()
         self.build_model()
 
+        # broadcast custom objects
+        self.broadcast_custom_objects()
+
         # plot model structure
         self.plot_model()
 
@@ -141,6 +144,16 @@ class ModelSetup(RunEnvironment):
         self.model = model(**args)
         self.get_model_settings()
 
+    def broadcast_custom_objects(self):
+        """
+        Broadcast custom objects to keras utils.
+
+        This method is very important, because it adds the model's custom objects to the keras utils. By doing so, all
+        custom objects can be treated as standard keras modules. Therefore, problems related to model or callback
+        loading are solved.
+        """
+        keras.utils.get_custom_objects().update(self.model.custom_objects)
+
     def get_model_settings(self):
         """Load all model settings and store in data store."""
         model_settings = self.model.get_settings()