Skip to content
Snippets Groups Projects
Commit a997bce5 authored by leufen1's avatar leufen1
Browse files

implemented mean error (bias) as additional error metric

parent 1038a4bb
Branches
No related tags found
3 merge requests!500Develop,!499Resolve "release v2.3.0",!492Resolve "new metric bias / mean error"
Pipeline #112352 failed
This commit is part of merge request !499. Comments created here will be created in the context of that merge request.
...@@ -213,6 +213,11 @@ def mean_absolute_error(a, b, dim=None): ...@@ -213,6 +213,11 @@ def mean_absolute_error(a, b, dim=None):
return np.abs(a - b).mean(dim) return np.abs(a - b).mean(dim)
def mean_error(a, b, dim=None):
"""Calculate mean error where a is forecast and b the reference (e.g. observation)."""
return a.mean(dim) - b.mean(dim)
def index_of_agreement(a, b, dim=None): def index_of_agreement(a, b, dim=None):
"""Calculate index of agreement (IOA) where a is the forecast and b the reference (e.g. observation).""" """Calculate index of agreement (IOA) where a is the forecast and b the reference (e.g. observation)."""
num = (np.square(b - a)).sum(dim) num = (np.square(b - a)).sum(dim)
...@@ -234,7 +239,7 @@ def modified_normalized_mean_bias(a, b, dim=None): ...@@ -234,7 +239,7 @@ def modified_normalized_mean_bias(a, b, dim=None):
def calculate_error_metrics(a, b, dim): def calculate_error_metrics(a, b, dim):
"""Calculate MSE, RMSE, MAE, IOA, and MNMB. Additionally, return number of used values for calculation. """Calculate MSE, ME, RMSE, MAE, IOA, and MNMB. Additionally, return number of used values for calculation.
:param a: forecast data to calculate metrics for :param a: forecast data to calculate metrics for
:param b: reference (e.g. observation) :param b: reference (e.g. observation)
...@@ -243,12 +248,13 @@ def calculate_error_metrics(a, b, dim): ...@@ -243,12 +248,13 @@ def calculate_error_metrics(a, b, dim):
:returns: dict with results for all metrics indicated by lowercase metric short name :returns: dict with results for all metrics indicated by lowercase metric short name
""" """
mse = mean_squared_error(a, b, dim) mse = mean_squared_error(a, b, dim)
me = mean_error(a, b, dim)
rmse = np.sqrt(mse) rmse = np.sqrt(mse)
mae = mean_absolute_error(a, b, dim) mae = mean_absolute_error(a, b, dim)
ioa = index_of_agreement(a, b, dim) ioa = index_of_agreement(a, b, dim)
mnmb = modified_normalized_mean_bias(a, b, dim) mnmb = modified_normalized_mean_bias(a, b, dim)
n = (a - b).notnull().sum(dim) n = (a - b).notnull().sum(dim)
return {"mse": mse, "rmse": rmse, "mae": mae, "ioa": ioa, "mnmb": mnmb, "n": n} return {"mse": mse, "me": me, "rmse": rmse, "mae": mae, "ioa": ioa, "mnmb": mnmb, "n": n}
def mann_whitney_u_test(data: pd.DataFrame, reference_col_name: str, **kwargs): def mann_whitney_u_test(data: pd.DataFrame, reference_col_name: str, **kwargs):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment