diff --git a/mlair/configuration/defaults.py b/mlair/configuration/defaults.py
index d0b3592905c32a4c7af875014532a5539805dd3a..b63000c2e104c81cae7ee26dc883dcedf6b8666b 100644
--- a/mlair/configuration/defaults.py
+++ b/mlair/configuration/defaults.py
@@ -53,6 +53,7 @@ DEFAULT_DO_UNCERTAINTY_ESTIMATE = True
 DEFAULT_UNCERTAINTY_ESTIMATE_BLOCK_LENGTH = "1m"
 DEFAULT_UNCERTAINTY_ESTIMATE_EVALUATE_COMPETITORS = True
 DEFAULT_UNCERTAINTY_ESTIMATE_N_BOOTS = 1000
+DEFAULT_DO_BIAS_FREE_EVALUATION = True
 DEFAULT_EVALUATE_FEATURE_IMPORTANCE = True
 DEFAULT_FEATURE_IMPORTANCE_CREATE_NEW_BOOTSTRAPS = False
 DEFAULT_FEATURE_IMPORTANCE_N_BOOTS = 20
diff --git a/mlair/helpers/statistics.py b/mlair/helpers/statistics.py
index 9415e0fcb0557f792d1cc0679868caff97e316d3..c897f875c56da820512c0de58aca34f9cf0fc4cb 100644
--- a/mlair/helpers/statistics.py
+++ b/mlair/helpers/statistics.py
@@ -637,3 +637,16 @@ def create_n_bootstrap_realizations(data: xr.DataArray, dim_name_time: str, dim_
             realizations[season][boot] = calculate_average(shuffled.sel({dim_name_time: sel}),
                                                            dim=dim_name_time, skipna=True)
     return realizations
+
+
+def calculate_bias_free_data(data, time_dim="index", window_size=30):
+
+    # overall mean
+    overall_mean = data.mean(time_dim)
+    bias_free_data = data - overall_mean
+
+    # seasonal mean
+    seasonal_mean = data.rolling(**{time_dim: window_size}, min_periods=int(window_size/2), center=True).mean()
+    seasonal_bias_free_data = data - seasonal_mean
+
+    return bias_free_data, seasonal_bias_free_data
\ No newline at end of file
diff --git a/mlair/run_modules/experiment_setup.py b/mlair/run_modules/experiment_setup.py
index f89633cbe0f80f26dbb2481ca24a7fd294ee6888..8bbcfddf741c657af0d0e26f7c3937058e83f821 100644
--- a/mlair/run_modules/experiment_setup.py
+++ b/mlair/run_modules/experiment_setup.py
@@ -24,7 +24,8 @@ from mlair.configuration.defaults import DEFAULT_STATIONS, DEFAULT_VAR_ALL_DICT,
     DEFAULT_FEATURE_IMPORTANCE_BOOTSTRAP_TYPE, DEFAULT_FEATURE_IMPORTANCE_BOOTSTRAP_METHOD, DEFAULT_OVERWRITE_LAZY_DATA, \
     DEFAULT_UNCERTAINTY_ESTIMATE_BLOCK_LENGTH, DEFAULT_UNCERTAINTY_ESTIMATE_EVALUATE_COMPETITORS, \
     DEFAULT_UNCERTAINTY_ESTIMATE_N_BOOTS, DEFAULT_DO_UNCERTAINTY_ESTIMATE, DEFAULT_CREATE_SNAPSHOT, \
-    DEFAULT_EARLY_STOPPING_EPOCHS, DEFAULT_RESTORE_BEST_MODEL_WEIGHTS, DEFAULT_COMPETITORS
+    DEFAULT_EARLY_STOPPING_EPOCHS, DEFAULT_RESTORE_BEST_MODEL_WEIGHTS, DEFAULT_COMPETITORS, \
+    DEFAULT_DO_BIAS_FREE_EVALUATION
 from mlair.data_handler import DefaultDataHandler
 from mlair.run_modules.run_environment import RunEnvironment
 from mlair.model_modules.fully_connected_networks import FCN_64_32_16 as VanillaModel
@@ -240,7 +241,8 @@ class ExperimentSetup(RunEnvironment):
                  max_number_multiprocessing: int = None, start_script: Union[Callable, str] = None,
                  overwrite_lazy_data: bool = None, uncertainty_estimate_block_length: str = None,
                  uncertainty_estimate_evaluate_competitors: bool = None, uncertainty_estimate_n_boots: int = None,
-                 do_uncertainty_estimate: bool = None, model_display_name: str = None, transformation_file: str = None,
+                 do_uncertainty_estimate: bool = None, do_bias_free_evaluation: bool = None,
+                 model_display_name: str = None, transformation_file: str = None,
                  calculate_fresh_transformation: bool = None, snapshot_load_path: str = None,
                  create_snapshot: bool = None, snapshot_path: str = None, **kwargs):
 
@@ -393,6 +395,8 @@ class ExperimentSetup(RunEnvironment):
                         default=DEFAULT_UNCERTAINTY_ESTIMATE_EVALUATE_COMPETITORS, scope="uncertainty_estimate")
         self._set_param("n_boots", uncertainty_estimate_n_boots,
                         default=DEFAULT_UNCERTAINTY_ESTIMATE_N_BOOTS, scope="uncertainty_estimate")
+        self._set_param("do_bias_free_evaluation", do_bias_free_evaluation,
+                        default=DEFAULT_DO_BIAS_FREE_EVALUATION, scope="general.postprocessing")
 
         self._set_param("evaluate_feature_importance", evaluate_feature_importance,
                         default=DEFAULT_EVALUATE_FEATURE_IMPORTANCE, scope="general.postprocessing")
diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py
index e4cc34f66db7426876209fece0dc902341ac6582..4a0915b9e3133925fe1e0b309b250217cbd42a7e 100644
--- a/mlair/run_modules/post_processing.py
+++ b/mlair/run_modules/post_processing.py
@@ -89,6 +89,7 @@ class PostProcessing(RunEnvironment):
         self.window_lead_time = extract_value(self.data_store.get("output_shape", "model"))
         self.skill_scores = None
         self.errors = None
+        self.bias_free_errors = None
         self.feature_importance_skill_scores = None
         self.uncertainty_estimate = None
         self.uncertainty_estimate_seasons = {}
@@ -147,7 +148,14 @@ class PostProcessing(RunEnvironment):
             self.report_error_metrics(errors)
             self.report_error_metrics({self.forecast_indicator: skill_score_climatological})
             self.report_error_metrics({"skill_score": skill_score_competitive})
-        self.store_errors(errors)
+        self.errors = self.store_errors(errors)
+
+        # bias free evaluation
+        if self.data_store.get("do_bias_free_evaluation", "postprocessing") is True:
+            bias_free_errors = self.calculate_bias_free_error_metrics()
+            self.report_error_metrics(bias_free_errors[0], tag="bias_free")
+            self.report_error_metrics(bias_free_errors[1], tag="seasonal_bias_free")
+            self.bias_free_errors = [self.store_errors(bias_free_errors[0]), self.store_errors(bias_free_errors[1])]
 
         # plotting
         self.plot()
@@ -658,6 +666,29 @@ class PostProcessing(RunEnvironment):
             logging.error(f"Could not create plot PlotErrorMetrics due to the following error: {e}"
                           f"\n{sys.exc_info()[0]}\n{sys.exc_info()[1]}\n{sys.exc_info()[2]}")
 
+        try:
+            if "PlotErrorMetrics" in plot_list and self.bias_free_errors is not None:
+                error_metric_units = statistics.get_error_metrics_units("ppb")
+                error_metrics_name = statistics.get_error_metrics_long_name()
+                tag = {0: "", 1: "seasonal_"}
+                for i, errors in enumerate(self.bias_free_errors):
+                    for error_metric in errors.keys():
+                        try:
+                            PlotSampleUncertaintyFromBootstrap(
+                                data=errors[error_metric], plot_folder=self.plot_path,
+                                model_type_dim=self.model_type_dim,
+                                dim_name_boots="station", error_measure=error_metrics_name[error_metric],
+                                error_unit=error_metric_units[error_metric], model_name=self.model_display_name,
+                                model_indicator=self.model_display_name, sampling=self._sampling, apply_root=False,
+                                plot_name=f"{tag[i]}bias_free_error_plot_{error_metric}")
+                        except Exception as e:
+                            logging.error(f"Could not create plot PlotErrorMetrics for {error_metric} (bias free "
+                                          f"{tag[i]}) due to the following error: "
+                                          f"{e}\n{sys.exc_info()[0]}\n{sys.exc_info()[1]}\n{sys.exc_info()[2]}")
+        except Exception as e:
+            logging.error(f"Could not create plot PlotErrorMetrics (bias free) due to the following error: {e}"
+                          f"\n{sys.exc_info()[0]}\n{sys.exc_info()[1]}\n{sys.exc_info()[2]}")
+
         try:
             if "PlotStationMap" in plot_list:
                 gens = [(self.train_data, {"marker": 5, "ms": 9}),
@@ -746,10 +777,6 @@ class PostProcessing(RunEnvironment):
             logging.error(f"Could not create plot PlotErrorsOnMap due to the following error: {e}"
                           f"\n{sys.exc_info()[0]}\n{sys.exc_info()[1]}\n{sys.exc_info()[2]}")
 
-
-
-
-
     @TimeTrackingWrapper
     def calculate_test_score(self):
         """Evaluate test score of model and save locally."""
@@ -1067,6 +1094,46 @@ class PostProcessing(RunEnvironment):
         except (TypeError, AttributeError):
             return forecast if competitor is None else competitor
 
+    def calculate_bias_free_error_metrics(self):
+        all_stations = self.data_store.get("stations")
+        errors = [{}, {}]  # errors_total_bias_free, errors_seasonal_bias_free
+        for station in all_stations:
+            external_data = self._get_external_data(station, self.forecast_path)  # test data
+
+            # test errors
+            if external_data is not None:
+                external_data.coords[self.model_type_dim] = [{self.forecast_indicator: self.model_display_name}.get(n, n)
+                                                              for n in external_data.coords[self.model_type_dim].values]
+                model_type_list = external_data.coords[self.model_type_dim].values.tolist()
+
+            # load competitors
+            competitor = self.load_competitors(station)
+            combined = self._combine_forecasts(external_data, competitor, dim=self.model_type_dim)
+            if combined is not None:
+                model_list = remove_items(combined.coords[self.model_type_dim].values.tolist(),
+                                          self.observation_indicator)
+            else:
+                model_list = None
+                continue
+
+            # data_total_bias_free, data_seasonal_bias_free
+            bias_free_data = statistics.calculate_bias_free_data(combined, time_dim=self.index_dim, window_size=30)
+
+            # test errors of competitors
+            for model_type in (model_list or []):
+                for data, e in zip(bias_free_data, errors):
+                    if self.observation_indicator not in data.coords[self.model_type_dim]:
+                        continue
+                    if model_type not in e.keys():
+                        e[model_type] = {}
+                    e[model_type][station] = statistics.calculate_error_metrics(
+                        *map(lambda x: data.sel(**{self.model_type_dim: x}),
+                             [model_type, self.observation_indicator]), dim=self.index_dim)
+        for e in errors:
+            for model_type in e.keys():
+                e[model_type].update({"total": self.calculate_average_errors(e[model_type])})
+        return errors
+
     def calculate_error_metrics(self) -> Tuple[Dict, Dict, Dict, Dict]:
         """
         Calculate error metrics and skill scores of NN forecast.
@@ -1177,9 +1244,10 @@ class PostProcessing(RunEnvironment):
         file_name = "feature_importance_skill_score_report_raw.csv"
         df.to_csv(os.path.join(report_path, file_name), sep=";")
 
-    def report_error_metrics(self, errors):
+    def report_error_metrics(self, errors, tag=None):
         report_path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
         path_config.check_path_and_create(report_path)
+        base_file_name = "error_report" if tag is None else f"error_report_{tag}"
         for model_type in errors.keys():
             metric_collection = {}
             for station, station_errors in errors[model_type].items():
@@ -1207,9 +1275,9 @@ class PostProcessing(RunEnvironment):
                     df.reindex(df.index.drop(["total"]).to_list() + ["total"], )
                 column_format = tables.create_column_format_for_tex(df)
                 if model_type == "skill_score":
-                    file_name = f"error_report_{model_type}_{metric}.%s".replace(' ', '_').replace('/', '_')
+                    file_name = f"{base_file_name}_{model_type}_{metric}.%s".replace(' ', '_').replace('/', '_')
                 else:
-                    file_name = f"error_report_{metric}_{model_type}.%s".replace(' ', '_').replace('/', '_')
+                    file_name = f"{base_file_name}_{metric}_{model_type}.%s".replace(' ', '_').replace('/', '_')
                 tables.save_to_tex(report_path, file_name % "tex", column_format=column_format, df=df)
                 tables.save_to_md(report_path, file_name % "md", df=df)
 
@@ -1226,4 +1294,4 @@ class PostProcessing(RunEnvironment):
             metric_collection[model_type] = xr.Dataset(station_collection).to_array(station_dim)
         coll = xr.Dataset(metric_collection).to_array(self.model_type_dim)
         coll = coll.transpose(station_dim, self.ahead_dim, self.model_type_dim, error_dim)
-        self.errors = {k: coll.sel({error_dim: k}, drop=True) for k in coll.coords[error_dim].values}
+        return {k: coll.sel({error_dim: k}, drop=True) for k in coll.coords[error_dim].values}
diff --git a/mlair/run_modules/pre_processing.py b/mlair/run_modules/pre_processing.py
index fe4d90784c4c068c9bc8d1f0fb727474aad44f48..1fa2a3c58b5e6fe122cd123804708a26beed68d8 100644
--- a/mlair/run_modules/pre_processing.py
+++ b/mlair/run_modules/pre_processing.py
@@ -434,7 +434,8 @@ class PreProcessing(RunEnvironment):
                            "model_class", "model_display_name", "model_path", "n_boots", "n_hidden", "n_layer",
                            "neighbors", "plot_list", "plot_path", "regularizer", "restore_best_model_weights",
                            "snapshot_load_path", "snapshot_path", "stations", "tmp_path", "train_model",
-                           "transformation", "use_multiprocessing", "cams_data_path", "cams_interp_method"]
+                           "transformation", "use_multiprocessing", "cams_data_path", "cams_interp_method", 
+                           "do_bias_free_evaluation"]
         data_handler = self.data_store.get("data_handler")
         model_class = self.data_store.get("model_class")
         excluded_params = list(set(excluded_params + data_handler.store_attributes() + model_class.requirements()))