From d0735fd124c137561c66f448f95b5e8152ebad34 Mon Sep 17 00:00:00 2001 From: leufen1 <l.leufen@fz-juelich.de> Date: Tue, 12 Oct 2021 18:03:18 +0200 Subject: [PATCH] errors of external competitors are now added too --- mlair/run_modules/post_processing.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py index 425b26cf..0ff4afb3 100644 --- a/mlair/run_modules/post_processing.py +++ b/mlair/run_modules/post_processing.py @@ -771,10 +771,20 @@ class PostProcessing(RunEnvironment): *map(lambda x: external_data.sel(**{self.model_type_dim: x}), [model_type, "obs"]), dim="index") - # skill score + # load competitors competitor = self.load_competitors(station) combined = self._combine_forecasts(external_data, competitor, dim=self.model_type_dim) model_list = remove_items(combined.coords[self.model_type_dim].values.tolist(), "obs") if combined is not None else None + + # test errors of competitors + for model_type in remove_items(model_list, errors.keys()): + if model_type not in errors.keys(): + errors[model_type] = {} + errors[model_type][station] = statistics.calculate_error_metrics( + *map(lambda x: combined.sel(**{self.model_type_dim: x}), + [model_type, "obs"]), dim="index") + + # skill score skill_score = statistics.SkillScores(combined, models=model_list, ahead_dim=self.ahead_dim) if external_data is not None: skill_score_competitive[station] = skill_score.skill_scores() @@ -828,7 +838,7 @@ class PostProcessing(RunEnvironment): station_errors = {str(i.values): station_errors.sel(**{dim: i}) for i in sel_index} for metric, vals in station_errors.items(): if metric == "n": - continue + metric = "count" pd_vals = pd.DataFrame.from_dict({station: vals}).T pd_vals.columns = [f"{metric}(t+{x})" for x in vals.coords["ahead"].values] mc = metric_collection.get(metric, pd.DataFrame()) -- GitLab