diff --git a/mlair/configuration/defaults.py b/mlair/configuration/defaults.py
index 55a18a1a62b396fd8b4510416d8b056a5088e88a..04e441fe2ec3b421cf5f0ad1469584f5ef2aa668 100644
--- a/mlair/configuration/defaults.py
+++ b/mlair/configuration/defaults.py
@@ -26,6 +26,7 @@ DEFAULT_EPOCHS = 20
 DEFAULT_TARGET_VAR = "o3"
 DEFAULT_TARGET_DIM = "variables"
 DEFAULT_WINDOW_LEAD_TIME = 3
+DEFAULT_WINDOW_DIM = "window"
 DEFAULT_TIME_DIM = "datetime"
 DEFAULT_ITER_DIM = "Stations"
 DEFAULT_DIMENSIONS = {"new_index": [DEFAULT_TIME_DIM, DEFAULT_ITER_DIM]}
diff --git a/mlair/helpers/statistics.py b/mlair/helpers/statistics.py
index e1887e62eb682b9ba1cab335b690ae4de5cd7966..65574f4c2641d811e31087a1c9bb61f5e9a2aa8f 100644
--- a/mlair/helpers/statistics.py
+++ b/mlair/helpers/statistics.py
@@ -186,21 +186,21 @@ class SkillScores:
     Skill score according to Murphy:
         Follow climatological skill score definition of Murphy (1988). External data is data from another time period
         than the internal data set on initialisation. In other terms, this should be the train and validation data
-        whereas the internal data is the test data. This sounds perhaps counter-intuitive, but if a skill score is
-        evaluated to a model to another, this must be performend test data set. Therefore, for this case the foreign
-        data is train and val data.
+        whereas the external data is the test data. This sounds perhaps counter-intuitive, but if a skill score is
+        evaluated to a model to another, this must be performed on test data set. Therefore, for this case the foreign
+        data is test.
 
         .. code-block:: python
 
-            skill_scores_class = SkillScores(internal_data)  # must contain columns obs and CNN.
-            skill_scores_clim = skill_scores_class.climatological_skill_scores(external_data, window_lead_time=3)
+            skill_scores_class = SkillScores(external_data)  # must contain columns obs and CNN.
+            skill_scores_clim = skill_scores_class.climatological_skill_scores(internal_data, window_lead_time=3)
 
     """
     models_default = ["cnn", "persi", "ols"]
 
-    def __init__(self, internal_data: Data, models=None, observation_name="obs"):
+    def __init__(self, external_data: Data, models=None, observation_name="obs"):
         """Set internal data."""
-        self.internal_data = internal_data
+        self.external_data = external_data
         self.models = self.set_model_names(models)
         self.observation_name = observation_name
 
@@ -236,7 +236,7 @@ class SkillScores:
         combinations, combination_strings = self.get_model_name_combinations()
         skill_score = pd.DataFrame(index=combination_strings)
         for iahead in ahead_names:
-            data = self.internal_data.sel(ahead=iahead)
+            data = self.external_data.sel(ahead=iahead)
             skill_score[iahead] = [self.general_skill_score(data,
                                                             forecast_name=first,
                                                             reference_name=second,
@@ -244,7 +244,7 @@ class SkillScores:
                                    for (first, second) in combinations]
         return skill_score
 
-    def climatological_skill_scores(self, external_data: Data, window_lead_time: int,
+    def climatological_skill_scores(self, internal_data: Data, window_lead_time: int,
                                     forecast_name: str) -> xr.DataArray:
         """
         Calculate climatological skill scores according to Murphy (1988).
@@ -252,7 +252,7 @@ class SkillScores:
         Calculate all CASES I - IV and terms [ABC][I-IV]. Internal data has to be set by initialisation, external data
         is part of parameters.
 
-        :param external_data: external data
+        :param internal_data: internal data
         :param window_lead_time: interested time step of forecast horizon to select data
         :param forecast_name: name of the forecast to use for this calculation (must be available in `data`)
 
@@ -266,8 +266,8 @@ class SkillScores:
                                    dims=['terms', 'ahead'])
 
         for iahead in ahead_names:
-
-            data = self.internal_data.sel(ahead=iahead)
+            data = internal_data.sel(ahead=iahead)
+            external_data = self.external_data.sel(ahead=iahead, type=[self.observation_name])
 
             skill_score.loc[["CASE I", "AI", "BI", "CI"], iahead] = np.stack(self._climatological_skill_score(
                 data, mu_type=1, forecast_name=forecast_name, observation_name=self.observation_name).values.flatten())
@@ -275,20 +275,22 @@ class SkillScores:
             skill_score.loc[["CASE II", "AII", "BII"], iahead] = np.stack(self._climatological_skill_score(
                 data, mu_type=2, forecast_name=forecast_name, observation_name=self.observation_name).values.flatten())
 
-            if external_data is not None:
-                skill_score.loc[["CASE III", "AIII"], iahead] = np.stack(self._climatological_skill_score(
-                    data, mu_type=3, forecast_name=forecast_name, observation_name=self.observation_name,
-                    external_data=external_data).values.flatten())
+            # if external_data is not None:
+            skill_score.loc[["CASE III", "AIII"], iahead] = np.stack(self._climatological_skill_score(
+                data, mu_type=3, forecast_name=forecast_name, observation_name=self.observation_name,
+                external_data=external_data).values.flatten())
 
-                skill_score.loc[["CASE IV", "AIV", "BIV", "CIV"], iahead] = np.stack(self._climatological_skill_score(
-                    data, mu_type=4, forecast_name=forecast_name, observation_name=self.observation_name,
-                    external_data=external_data).values.flatten())
+            skill_score.loc[["CASE IV", "AIV", "BIV", "CIV"], iahead] = np.stack(self._climatological_skill_score(
+                data, mu_type=4, forecast_name=forecast_name, observation_name=self.observation_name,
+                external_data=external_data).values.flatten())
 
         return skill_score
 
-    def _climatological_skill_score(self, data, observation_name, forecast_name, mu_type=1, external_data=None):
+    def _climatological_skill_score(self, internal_data, observation_name, forecast_name, mu_type=1,
+                                    external_data=None):
         kwargs = {"external_data": external_data} if external_data is not None else {}
-        return self.__getattribute__(f"skill_score_mu_case_{mu_type}")(data, observation_name, forecast_name, **kwargs)
+        return self.__getattribute__(f"skill_score_mu_case_{mu_type}")(internal_data, observation_name, forecast_name,
+                                                                       **kwargs)
 
     def general_skill_score(self, data: Data, forecast_name: str, reference_name: str,
                             observation_name: str = None) -> np.ndarray:
@@ -346,15 +348,15 @@ class SkillScores:
         suffix = {"mean": mean, "sigma": sigma, "r": r, "p": p}
         return AI, BI, CI, data, suffix
 
-    def skill_score_mu_case_1(self, data, observation_name, forecast_name):
+    def skill_score_mu_case_1(self, internal_data, observation_name, forecast_name):
         """Calculate CASE I."""
-        AI, BI, CI, data, _ = self.skill_score_pre_calculations(data, observation_name, forecast_name)
+        AI, BI, CI, data, _ = self.skill_score_pre_calculations(internal_data, observation_name, forecast_name)
         skill_score = np.array(AI - BI - CI)
         return pd.DataFrame({"skill_score": [skill_score], "AI": [AI], "BI": [BI], "CI": [CI]}).to_xarray().to_array()
 
-    def skill_score_mu_case_2(self, data, observation_name, forecast_name):
+    def skill_score_mu_case_2(self, internal_data, observation_name, forecast_name):
         """Calculate CASE II."""
-        AI, BI, CI, data, suffix = self.skill_score_pre_calculations(data, observation_name, forecast_name)
+        AI, BI, CI, data, suffix = self.skill_score_pre_calculations(internal_data, observation_name, forecast_name)
         monthly_mean = self.create_monthly_mean_from_daily_data(data)
         data = xr.concat([data, monthly_mean], dim="type")
         sigma = suffix["sigma"]
@@ -365,29 +367,25 @@ class SkillScores:
         skill_score = np.array((AI - BI - CI - AII + BII) / (1 - AII + BII))
         return pd.DataFrame({"skill_score": [skill_score], "AII": [AII], "BII": [BII]}).to_xarray().to_array()
 
-    def skill_score_mu_case_3(self, data, observation_name, forecast_name, external_data=None):
+    def skill_score_mu_case_3(self, internal_data, observation_name, forecast_name, external_data=None):
         """Calculate CASE III."""
-        AI, BI, CI, data, suffix = self.skill_score_pre_calculations(data, observation_name, forecast_name)
+        AI, BI, CI, data, suffix = self.skill_score_pre_calculations(internal_data, observation_name, forecast_name)
         mean, sigma = suffix["mean"], suffix["sigma"]
         AIII = (((external_data.mean().values - mean.sel(type=observation_name, drop=True)) / sigma.sel(
             type=observation_name, drop=True)) ** 2).values
-        skill_score = np.array((AI - BI - CI + AIII) / 1 + AIII)
+        skill_score = np.array((AI - BI - CI + AIII) / (1 + AIII))
         return pd.DataFrame({"skill_score": [skill_score], "AIII": [AIII]}).to_xarray().to_array()
 
-    def skill_score_mu_case_4(self, data, observation_name, forecast_name, external_data=None):
+    def skill_score_mu_case_4(self, internal_data, observation_name, forecast_name, external_data=None):
         """Calculate CASE IV."""
-        AI, BI, CI, data, suffix = self.skill_score_pre_calculations(data, observation_name, forecast_name)
-        monthly_mean_external = self.create_monthly_mean_from_daily_data(external_data, columns=data.type.values,
-                                                                         index=data.index)
+        AI, BI, CI, data, suffix = self.skill_score_pre_calculations(internal_data, observation_name, forecast_name)
+        monthly_mean_external = self.create_monthly_mean_from_daily_data(external_data, index=data.index)
         data = xr.concat([data, monthly_mean_external], dim="type")
         mean, sigma = suffix["mean"], suffix["sigma"]
-        monthly_mean_external = self.create_monthly_mean_from_daily_data(external_data, columns=data.type.values)
         mean_external = monthly_mean_external.mean()
         sigma_external = np.sqrt(monthly_mean_external.var())
-
         r_mu, p_mu = stats.pearsonr(
             *[data.sel(type=n).values.flatten() for n in [observation_name, observation_name + "X"]])
-
         AIV = np.array(r_mu ** 2)
         BIV = ((r_mu - sigma_external / sigma.sel(type=observation_name, drop=True)) ** 2).values
         CIV = (((mean_external - mean.sel(type=observation_name, drop=True)) / sigma.sel(type=observation_name,
diff --git a/mlair/model_modules/model_class.py b/mlair/model_modules/model_class.py
index a603b466e4dab0dc30b6b6b22d10b6c27ee59767..a2eda6e8287af2ce489bf75b02d7b205549ff144 100644
--- a/mlair/model_modules/model_class.py
+++ b/mlair/model_modules/model_class.py
@@ -127,7 +127,7 @@ import keras
 import tensorflow as tf
 from mlair.model_modules.inception_model import InceptionModelBase
 from mlair.model_modules.flatten import flatten_tail
-from mlair.model_modules.advanced_paddings import PadUtils, Padding2D
+from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, SymmetricPadding2D
 
 
 class AbstractModelClass(ABC):
@@ -636,19 +636,22 @@ class MyPaperModel(AbstractModelClass):
         assert len(output_shape) == 1
         super().__init__(input_shape[0], output_shape[0])
 
+        from mlair.model_modules.keras_extensions import LearningRateDecay
+
         # settings
         self.dropout_rate = .3
         self.regularizer = keras.regularizers.l2(0.001)
         self.initial_lr = 1e-3
-        self.lr_decay = mlair.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94,
-                                                                               epochs_drop=10)
+        self.lr_decay = LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
         self.activation = keras.layers.ELU
         self.padding = "SymPad2D"
 
         # apply to model
         self.set_model()
         self.set_compile_options()
-        self.set_custom_objects(loss=self.compile_options["loss"], Padding2D=Padding2D)
+        self.set_custom_objects(loss=self.compile_options["loss"],
+                                SymmetricPadding2D=SymmetricPadding2D,
+                                LearningRateDecay=LearningRateDecay)
 
     def set_model(self):
         """
@@ -668,10 +671,10 @@ class MyPaperModel(AbstractModelClass):
         conv_settings_dict1 = {
             'tower_1': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (3, 1),
                         'activation': activation},
-            'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
-                        'activation': activation},
-            'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
-                        'activation': activation},
+            # 'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
+            #             'activation': activation},
+            # 'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
+            #             'activation': activation},
             # 'tower_4':{'reduction_filter':8, 'tower_filter':8*2, 'tower_kernel':(7,1), 'activation':activation},
         }
         pool_settings_dict1 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
@@ -679,10 +682,10 @@ class MyPaperModel(AbstractModelClass):
         conv_settings_dict2 = {
             'tower_1': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (3, 1),
                         'activation': activation},
-            'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
-                        'activation': activation},
-            'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
-                        'activation': activation},
+            # 'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (5, 1),
+            #             'activation': activation},
+            # 'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2, 'tower_kernel': (1, 1),
+            #             'activation': activation},
             # 'tower_4':{'reduction_filter':8*2, 'tower_filter':16*2, 'tower_kernel':(7,1), 'activation':activation},
         }
         pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32, 'activation': activation}
@@ -704,8 +707,6 @@ class MyPaperModel(AbstractModelClass):
         X_input = keras.layers.Input(shape=self._input_shape)
 
         pad_size = PadUtils.get_padding_for_same(first_kernel)
-        # X_in = adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
-        # X_in = inception_model.padding_layer("SymPad2D")(padding=pad_size, name="SymPad")(X_input)  # adv_pad.SymmetricPadding2D(padding=pad_size)(X_input)
         X_in = Padding2D("SymPad2D")(padding=pad_size, name="SymPad")(X_input)
         X_in = keras.layers.Conv2D(filters=first_filters,
                                    kernel_size=first_kernel,
@@ -748,7 +749,7 @@ class MyPaperModel(AbstractModelClass):
     def set_compile_options(self):
         self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
         self.compile_options = {"loss": [keras.losses.mean_squared_error, keras.losses.mean_squared_error],
-                                "metrics": ['mse', 'mea']}
+                                "metrics": ['mse', 'mae']}
 
 
 if __name__ == "__main__":
diff --git a/mlair/run_modules/experiment_setup.py b/mlair/run_modules/experiment_setup.py
index 18ef98f81d29730d3d6fac9bb57e6dc91942df24..af540fc296f1d4b707b5373fdbcbb14dac1afc7f 100644
--- a/mlair/run_modules/experiment_setup.py
+++ b/mlair/run_modules/experiment_setup.py
@@ -13,7 +13,7 @@ from mlair.configuration.defaults import DEFAULT_STATIONS, DEFAULT_VAR_ALL_DICT,
     DEFAULT_HPC_LOGIN_LIST, DEFAULT_HPC_HOST_LIST, DEFAULT_CREATE_NEW_MODEL, DEFAULT_TRAIN_MODEL, \
     DEFAULT_FRACTION_OF_TRAINING, DEFAULT_EXTREME_VALUES, DEFAULT_EXTREMES_ON_RIGHT_TAIL_ONLY, DEFAULT_PERMUTE_DATA, \
     DEFAULT_BATCH_SIZE, DEFAULT_EPOCHS, DEFAULT_TARGET_VAR, DEFAULT_TARGET_DIM, DEFAULT_WINDOW_LEAD_TIME, \
-    DEFAULT_DIMENSIONS, DEFAULT_TIME_DIM, DEFAULT_INTERPOLATION_METHOD, DEFAULT_INTERPOLATION_LIMIT, \
+    DEFAULT_WINDOW_DIM, DEFAULT_DIMENSIONS, DEFAULT_TIME_DIM, DEFAULT_INTERPOLATION_METHOD, DEFAULT_INTERPOLATION_LIMIT, \
     DEFAULT_TRAIN_START, DEFAULT_TRAIN_END, DEFAULT_TRAIN_MIN_LENGTH, DEFAULT_VAL_START, DEFAULT_VAL_END, \
     DEFAULT_VAL_MIN_LENGTH, DEFAULT_TEST_START, DEFAULT_TEST_END, DEFAULT_TEST_MIN_LENGTH, DEFAULT_TRAIN_VAL_MIN_LENGTH, \
     DEFAULT_USE_ALL_STATIONS_ON_ALL_DATA_SETS, DEFAULT_EVALUATE_BOOTSTRAPS, DEFAULT_CREATE_NEW_BOOTSTRAPS, \
@@ -211,6 +211,7 @@ class ExperimentSetup(RunEnvironment):
                  target_var="o3",
                  target_dim=None,
                  window_lead_time: int = None,
+                 window_dim=None,
                  dimensions=None,
                  time_dim=None,
                  iter_dim=None,
@@ -299,8 +300,9 @@ class ExperimentSetup(RunEnvironment):
         self._set_param("transformation", None, scope="preprocessing")
         self._set_param("data_handler", data_handler, default=DefaultDataHandler)
 
-        # iteration
+        # iter and window dimension
         self._set_param("iter_dim", iter_dim, default=DEFAULT_ITER_DIM)
+        self._set_param("window_dim", window_dim, default=DEFAULT_WINDOW_DIM)
 
         # target
         self._set_param("target_var", target_var, default=DEFAULT_TARGET_VAR)
diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py
index c6af13b02e818431578c7423d837f95e64ca3d15..dda18fac5d8546c6e399334f3d89415d246a1975 100644
--- a/mlair/run_modules/model_setup.py
+++ b/mlair/run_modules/model_setup.py
@@ -76,6 +76,9 @@ class ModelSetup(RunEnvironment):
         # build model graph using settings from my_model_settings()
         self.build_model()
 
+        # broadcast custom objects
+        self.broadcast_custom_objects()
+
         # plot model structure
         self.plot_model()
 
@@ -141,6 +144,16 @@ class ModelSetup(RunEnvironment):
         self.model = model(**args)
         self.get_model_settings()
 
+    def broadcast_custom_objects(self):
+        """
+        Broadcast custom objects to keras utils.
+
+        This method is very important, because it adds the model's custom objects to the keras utils. By doing so, all
+        custom objects can be treated as standard keras modules. Therefore, problems related to model or callback
+        loading are solved.
+        """
+        keras.utils.get_custom_objects().update(self.model.custom_objects)
+
     def get_model_settings(self):
         """Load all model settings and store in data store."""
         model_settings = self.model.get_settings()
diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py
index d223858ccf056703b1e4c0975a382f11f6a150cc..c810d3c5517643612f773bdbffc3e0e029d9150d 100644
--- a/mlair/run_modules/post_processing.py
+++ b/mlair/run_modules/post_processing.py
@@ -89,8 +89,9 @@ class PostProcessing(RunEnvironment):
         # ols model
         self.train_ols_model()
 
-        # forecasts
-        self.make_prediction()
+        # forecasts on test data
+        self.make_prediction(self.test_data)
+        self.make_prediction(self.train_val_data)
 
         # calculate error metrics on test data
         self.calculate_test_score()
@@ -397,7 +398,7 @@ class PostProcessing(RunEnvironment):
         """Train ordinary least squared model on train data."""
         self.ols_model = OrdinaryLeastSquaredModel(self.train_data)
 
-    def make_prediction(self):
+    def make_prediction(self, subset):
         """
         Create predictions for NN, OLS, and persistence and add true observation as reference.
 
@@ -405,10 +406,12 @@ class PostProcessing(RunEnvironment):
         predictions for a single station are stored locally under `<forecast/forecast_norm>_<station>_test.nc` and can
         be found inside `forecast_path`.
         """
-        logging.debug("start make_prediction")
+        subset_type = subset.name
+        logging.debug(f"start make_prediction for {subset_type}")
         time_dimension = self.data_store.get("time_dim")
         window_dim = self.data_store.get("window_dim")
-        for i, data in enumerate(self.test_data):
+        subset_type = subset.name
+        for i, data in enumerate(subset):
             input_data = data.get_X()
             target_data = data.get_Y(as_numpy=False)
             observation_data = data.get_observation()
@@ -446,7 +449,7 @@ class PostProcessing(RunEnvironment):
                 # save all forecasts locally
                 path = self.data_store.get("forecast_path")
                 prefix = "forecasts_norm" if normalised is True else "forecasts"
-                file = os.path.join(path, f"{prefix}_{str(data)}_test.nc")
+                file = os.path.join(path, f"{prefix}_{str(data)}_{subset_type}.nc")
                 all_predictions.to_netcdf(file)
 
     def _get_frequency(self) -> str:
@@ -612,21 +615,33 @@ class PostProcessing(RunEnvironment):
             res.loc[match_index, :, k] = v.loc[match_index]
         return res
 
-    def _get_external_data(self, station: str) -> Union[xr.DataArray, None]:
+    def _get_internal_data(self, station: str, path: str) -> Union[xr.DataArray, None]:
         """
-        Get external data for given station.
+        Get internal data for given station.
 
-        External data is defined as data that is not part of the observed period. From an evaluation perspective, this
+        Internal data is defined as data that is already known to the model. From an evaluation perspective, this
         refers to data, that is no test data, and therefore to train and val data.
 
+        :param station: name of station to load internal data.
+        """
+        try:
+            file = os.path.join(path, f"forecasts_{str(station)}_train_val.nc")
+            return xr.open_dataarray(file)
+        except (IndexError, KeyError):
+            return None
+
+    def _get_external_data(self, station: str, path: str) -> Union[xr.DataArray, None]:
+        """
+        Get external data for given station.
+
+        External data is defined as data that is not known to the model. From an evaluation perspective, this refers to
+        data, that is not train or val data, and therefore to test data.
+
         :param station: name of station to load external data.
         """
         try:
-            data = self.train_val_data[station]
-            observation = data.get_observation()
-            transformation_func = data.apply_transformation
-            external_data = self._create_observation(observation, None, transformation_func, normalised=False)
-            return external_data.rename({external_data.dims[0]: 'index'})
+            file = os.path.join(path, f"forecasts_{str(station)}_test.nc")
+            return xr.open_dataarray(file)
         except (IndexError, KeyError):
             return None
 
@@ -644,13 +659,13 @@ class PostProcessing(RunEnvironment):
         skill_score_competitive = {}
         skill_score_climatological = {}
         for station in self.test_data:
-            file = os.path.join(path, f"forecasts_{str(station)}_test.nc")
-            data = xr.open_dataarray(file)
+            external_data = self._get_external_data(station, path)
             competitor = self.load_competitors(str(station))
-            combined = xr.concat([data, competitor], dim="type") if competitor is not None else data
+            combined = xr.concat([external_data, competitor], dim="type") if competitor is not None else external_data
             skill_score = statistics.SkillScores(combined, models=remove_items(list(combined.type.values), "obs"))
-            external_data = self._get_external_data(station)  # ToDo: check if external is still right?
             skill_score_competitive[station] = skill_score.skill_scores(self.window_lead_time)
+
+            internal_data = self._get_internal_data(station, path)  # ToDo: check if external is still right?
             skill_score_climatological[station] = skill_score.climatological_skill_scores(
-                external_data, self.window_lead_time, forecast_name=self.forecast_indicator)
+                internal_data, self.window_lead_time, forecast_name=self.forecast_indicator)
         return skill_score_competitive, skill_score_climatological
diff --git a/mlair/run_modules/training.py b/mlair/run_modules/training.py
index 113765e0d295bb0b1d756cd1cefba85093b20089..6c993d56b540cf3cf5b86d9c1920fc3a22557e46 100644
--- a/mlair/run_modules/training.py
+++ b/mlair/run_modules/training.py
@@ -223,7 +223,7 @@ class Training(RunEnvironment):
         if multiple_branches_used:
             filename = os.path.join(path, f"{name}_history_main_loss.pdf")
             PlotModelHistory(filename=filename, history=history, main_branch=True)
-        if "mean_squared_error" in history.model.metrics_names:
+        if len([e for e in history.model.metrics_names if "mean_squared_error" in e]) > 0:
             filename = os.path.join(path, f"{name}_history_main_mse.pdf")
             PlotModelHistory(filename=filename, history=history, plot_metric="mse", main_branch=multiple_branches_used)