diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py
index f71e55c8d2c5182d6c3e104758e0ffe42e148263..8be9373d54d3bbd2e8a59f43355039735a3fbccc 100644
--- a/mlair/run_modules/post_processing.py
+++ b/mlair/run_modules/post_processing.py
@@ -135,9 +135,10 @@ class PostProcessing(RunEnvironment):
         with TimeTracking(name="calculate_error_metrics", log_on_enter=True):
             skill_score_competitive, _, skill_score_climatological, errors = self.calculate_error_metrics()
             self.skill_scores = (skill_score_competitive, skill_score_climatological)
-        self.report_error_metrics(errors)
-        self.report_error_metrics({self.forecast_indicator: skill_score_climatological})
-        self.report_error_metrics({"skill_score": skill_score_competitive})
+        with TimeTracking(name="report_error_metrics", log_on_enter=True):
+            self.report_error_metrics(errors)
+            self.report_error_metrics({self.forecast_indicator: skill_score_climatological})
+            self.report_error_metrics({"skill_score": skill_score_competitive})
 
         # load upstream wind sector for test_data
         try:
@@ -695,14 +696,17 @@ class PostProcessing(RunEnvironment):
             if "PlotScoresOnMap" in plot_list:
                 report_path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
                 PlotScoresOnMap(plot_folder=self.plot_path, skill_score_report_path=report_path,
-                                        model_name=self.model_display_name,
-                                        reference_models=self.competitors+["persi, ols"])
+                                model_name=self.model_display_name,
+                                reference_models=self.competitors+["persi, ols"])
         except Exception as e:
             logging.error(f"Could not create plot PlotScoresOnMap due to the following error: {e}"
                           f"\n{sys.exc_info()[0]}\n{sys.exc_info()[1]}\n{sys.exc_info()[2]}")
 
+        
+    @TimeTrackingWrapper
     def calculate_test_score(self):
         """Evaluate test score of model and save locally."""
+        logging.info(f"start to calculate test scores")
 
         # test scores on transformed data
         test_score = self.model.evaluate(self.test_data_distributed,
@@ -713,8 +717,10 @@ class PostProcessing(RunEnvironment):
                 logging.info(f"{self.model.metrics_names[index]} (test), {item}")
                 f.write(f"{self.model.metrics_names[index]}, {item}\n")
 
+    @TimeTrackingWrapper
     def train_ols_model(self):
         """Train ordinary least squared model on train data."""
+        logging.info(f"start train_ols_model on train data")
         self.ols_model = OrdinaryLeastSquaredModel(self.train_data)
 
     def make_prediction(self, subset):