diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py index 425b26cfbde6a22b2dd61b87478f00c52ccda87e..0ff4afb37e76c0c03127bd84258ccb5fd2eb4a90 100644 --- a/mlair/run_modules/post_processing.py +++ b/mlair/run_modules/post_processing.py @@ -771,10 +771,20 @@ class PostProcessing(RunEnvironment): *map(lambda x: external_data.sel(**{self.model_type_dim: x}), [model_type, "obs"]), dim="index") - # skill score + # load competitors competitor = self.load_competitors(station) combined = self._combine_forecasts(external_data, competitor, dim=self.model_type_dim) model_list = remove_items(combined.coords[self.model_type_dim].values.tolist(), "obs") if combined is not None else None + + # test errors of competitors + for model_type in remove_items(model_list, errors.keys()): + if model_type not in errors.keys(): + errors[model_type] = {} + errors[model_type][station] = statistics.calculate_error_metrics( + *map(lambda x: combined.sel(**{self.model_type_dim: x}), + [model_type, "obs"]), dim="index") + + # skill score skill_score = statistics.SkillScores(combined, models=model_list, ahead_dim=self.ahead_dim) if external_data is not None: skill_score_competitive[station] = skill_score.skill_scores() @@ -828,7 +838,7 @@ class PostProcessing(RunEnvironment): station_errors = {str(i.values): station_errors.sel(**{dim: i}) for i in sel_index} for metric, vals in station_errors.items(): if metric == "n": - continue + metric = "count" pd_vals = pd.DataFrame.from_dict({station: vals}).T pd_vals.columns = [f"{metric}(t+{x})" for x in vals.coords["ahead"].values] mc = metric_collection.get(metric, pd.DataFrame())