diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py index 192b5e1054647c9fa9904195124f1f18afb6921e..8b69034fa5d3b69b39ba74aa8a6bbab873c06cc8 100644 --- a/mlair/run_modules/post_processing.py +++ b/mlair/run_modules/post_processing.py @@ -128,9 +128,10 @@ class PostProcessing(RunEnvironment): with TimeTracking(name="calculate_error_metrics", log_on_enter=True): skill_score_competitive, _, skill_score_climatological, errors = self.calculate_error_metrics() self.skill_scores = (skill_score_competitive, skill_score_climatological) - self.report_error_metrics(errors) - self.report_error_metrics({self.forecast_indicator: skill_score_climatological}) - self.report_error_metrics({"skill_score": skill_score_competitive}) + with TimeTracking(name="report_error_metrics", log_on_enter=True): + self.report_error_metrics(errors) + self.report_error_metrics({self.forecast_indicator: skill_score_climatological}) + self.report_error_metrics({"skill_score": skill_score_competitive}) # plotting self.plot() @@ -631,9 +632,11 @@ class PostProcessing(RunEnvironment): except Exception as e: logging.error(f"Could not create plot PlotSampleUncertaintyFromBootstrap due to the following error: {e}" f"\n{sys.exc_info()[0]}\n{sys.exc_info()[1]}\n{sys.exc_info()[2]}") - + + @TimeTrackingWrapper def calculate_test_score(self): """Evaluate test score of model and save locally.""" + logging.info(f"start to calculate test scores") # test scores on transformed data test_score = self.model.evaluate(self.test_data_distributed, @@ -644,8 +647,10 @@ class PostProcessing(RunEnvironment): logging.info(f"{self.model.metrics_names[index]} (test), {item}") f.write(f"{self.model.metrics_names[index]}, {item}\n") + @TimeTrackingWrapper def train_ols_model(self): """Train ordinary least squared model on train data.""" + logging.info(f"start train_ols_model on train data") self.ols_model = OrdinaryLeastSquaredModel(self.train_data) def make_prediction(self, subset):