Skip to content
Snippets Groups Projects
Commit 1f9b9eb8 authored by lukas leufen's avatar lukas leufen
Browse files

Merge branch 'lukas_issue380_feat_add-metric-mnmb' into 'develop'

Resolve "add metric MNMB"

See merge request !418
parents 8f51e9d0 59ae0b88
Branches
Tags
4 merge requests!432IOA works now also with xarray and on identical data, IOA is included in...,!431Resolve "release v2.1.0",!430update recent developments,!418Resolve "add metric MNMB"
Pipeline #100530 passed
...@@ -225,14 +225,28 @@ def index_of_agreement(a, b, dim=None): ...@@ -225,14 +225,28 @@ def index_of_agreement(a, b, dim=None):
return 1 - frac return 1 - frac
def modified_normalized_mean_bias(a, b, dim=None):
"""Calculate modified normalized mean bias (MNMB) where a is the forecast and b the reference (e.g. observation)."""
N = np.count_nonzero(a) if len(a.shape) == 1 else a.notnull().sum(dim)
return 2 * ((a - b) / (a + b)).sum(dim) / N
def calculate_error_metrics(a, b, dim): def calculate_error_metrics(a, b, dim):
"""Calculate MSE, RMSE, MAE, and IOA. Aditionally, return number of used values for calculation.""" """Calculate MSE, RMSE, MAE, IOA, and MNMB. Additionally, return number of used values for calculation.
:param a: forecast data to calculate metrics for
:param b: reference (e.g. observation)
:param dim: dimension to calculate metrics along
:returns: dict with results for all metrics indicated by lowercase metric short name
"""
mse = mean_squared_error(a, b, dim) mse = mean_squared_error(a, b, dim)
rmse = np.sqrt(mse) rmse = np.sqrt(mse)
mae = mean_absolute_error(a, b, dim) mae = mean_absolute_error(a, b, dim)
ioa = index_of_agreement(a, b, dim) ioa = index_of_agreement(a, b, dim)
mnmb = modified_normalized_mean_bias(a, b, dim)
n = (a - b).notnull().sum(dim) n = (a - b).notnull().sum(dim)
return {"mse": mse, "rmse": rmse, "mae": mae, "ioa": ioa, "n": n} return {"mse": mse, "rmse": rmse, "mae": mae, "ioa": ioa, "mnmb": mnmb, "n": n}
def mann_whitney_u_test(data: pd.DataFrame, reference_col_name: str, **kwargs): def mann_whitney_u_test(data: pd.DataFrame, reference_col_name: str, **kwargs):
......
...@@ -6,7 +6,7 @@ import xarray as xr ...@@ -6,7 +6,7 @@ import xarray as xr
from mlair.helpers.statistics import standardise, standardise_inverse, standardise_apply, centre, centre_inverse, \ from mlair.helpers.statistics import standardise, standardise_inverse, standardise_apply, centre, centre_inverse, \
centre_apply, apply_inverse_transformation, min_max, min_max_inverse, min_max_apply, log, log_inverse, log_apply, \ centre_apply, apply_inverse_transformation, min_max, min_max_inverse, min_max_apply, log, log_inverse, log_apply, \
create_single_bootstrap_realization, calculate_average, create_n_bootstrap_realizations, mean_squared_error, \ create_single_bootstrap_realization, calculate_average, create_n_bootstrap_realizations, mean_squared_error, \
mean_absolute_error, calculate_error_metrics, index_of_agreement mean_absolute_error, calculate_error_metrics, index_of_agreement, modified_normalized_mean_bias
from mlair.helpers.testing import check_nested_equality from mlair.helpers.testing import check_nested_equality
lazy = pytest.lazy_fixture lazy = pytest.lazy_fixture
...@@ -323,6 +323,41 @@ class TestIndexOfAgreement: ...@@ -323,6 +323,41 @@ class TestIndexOfAgreement:
assert xr.testing.assert_allclose(res, expected, atol=10**-2) is None assert xr.testing.assert_allclose(res, expected, atol=10**-2) is None
class TestMNMB:
def test_modified_normalized_mean_bias(self):
d1 = np.array([1, 2, 3, 4, 5])
d2 = np.array([1, 2, 3, 4, 5])
assert modified_normalized_mean_bias(d1, d2) == 0
d1 = np.array([1, 2, 3, 4, 7])
assert np.testing.assert_almost_equal(modified_normalized_mean_bias(d1, d2), 0.0666, 3) is None
d1 = np.array([3, 4, 5, 6, 7])
assert np.testing.assert_almost_equal(modified_normalized_mean_bias(d1, d2), 0.58, 3) is None
assert np.testing.assert_almost_equal(modified_normalized_mean_bias(d2, d1), -0.58, 3) is None
def test_modified_normalized_mean_bias_xarray(self):
d1 = np.array([np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4, 5])])
d2 = np.array([np.array([2, 4, 3, 4, 6]), np.array([2, 3, 3, 4, 5]), np.array([0, 1, 3, 4, 5])])
shape = d1.shape
coords = {'index': range(shape[0]), 'value': range(shape[1])}
x_array1 = xr.DataArray(d1, coords=coords, dims=coords.keys())
x_array2 = xr.DataArray(d2, coords=coords, dims=coords.keys())
expected = xr.DataArray(np.array([0, 0, 0]), coords={"index": [0, 1, 2]}, dims=["index"])
res = modified_normalized_mean_bias(x_array1, x_array1, dim="value")
assert xr.testing.assert_equal(res, expected) is None
expected = xr.DataArray(np.array([0, 0, 0, 0, 0]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"])
res = modified_normalized_mean_bias(x_array1, x_array1, dim="index")
assert xr.testing.assert_equal(res, expected) is None
expected = xr.DataArray(np.array([-0.3030, -0.2133, 0.5333]), coords={"index": [0, 1, 2]}, dims=["index"])
res = modified_normalized_mean_bias(x_array1, x_array2, dim="value")
assert xr.testing.assert_allclose(res, expected, atol=10**-2) is None
res = modified_normalized_mean_bias(x_array2, x_array1, dim="value")
assert xr.testing.assert_allclose(res, -expected, atol=10**-2) is None
expected = xr.DataArray(np.array([0.2222, -0.1333, 0, 0, -0.0606]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"])
res = modified_normalized_mean_bias(x_array1, x_array2, dim="index")
assert xr.testing.assert_allclose(res, expected, atol=10**-2) is None
class TestCalculateErrorMetrics: class TestCalculateErrorMetrics:
def test_calculate_error_metrics(self): def test_calculate_error_metrics(self):
...@@ -336,6 +371,7 @@ class TestCalculateErrorMetrics: ...@@ -336,6 +371,7 @@ class TestCalculateErrorMetrics:
"rmse": np.sqrt(xr.DataArray(np.array([1, 2, 0, 0, 1./3]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"])), "rmse": np.sqrt(xr.DataArray(np.array([1, 2, 0, 0, 1./3]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"])),
"mae": xr.DataArray(np.array([1, 4./3, 0, 0, 1./3]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"]), "mae": xr.DataArray(np.array([1, 4./3, 0, 0, 1./3]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"]),
"ioa": xr.DataArray(np.array([0.3721, 0.4255, 1, 1, 0.4706]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"]), "ioa": xr.DataArray(np.array([0.3721, 0.4255, 1, 1, 0.4706]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"]),
"mnmb": xr.DataArray(np.array([0.2222, -0.1333, 0, 0, -0.0606]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"]),
"n": xr.DataArray(np.array([3, 3, 3, 3, 3]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"])} "n": xr.DataArray(np.array([3, 3, 3, 3, 3]), coords={"value": [0, 1, 2, 3, 4]}, dims=["value"])}
assert check_nested_equality(expected, calculate_error_metrics(x_array1, x_array2, "index"), 3) is True assert check_nested_equality(expected, calculate_error_metrics(x_array1, x_array2, "index"), 3) is True
...@@ -343,5 +379,6 @@ class TestCalculateErrorMetrics: ...@@ -343,5 +379,6 @@ class TestCalculateErrorMetrics:
"rmse": np.sqrt(xr.DataArray(np.array([1.2, 0.4, 0.4]), coords={"index": [0, 1, 2]}, dims=["index"])), "rmse": np.sqrt(xr.DataArray(np.array([1.2, 0.4, 0.4]), coords={"index": [0, 1, 2]}, dims=["index"])),
"mae": xr.DataArray(np.array([0.8, 0.4, 0.4]), coords={"index": [0, 1, 2]}, dims=["index"]), "mae": xr.DataArray(np.array([0.8, 0.4, 0.4]), coords={"index": [0, 1, 2]}, dims=["index"]),
"ioa": xr.DataArray(np.array([0.8478, 0.9333, 0.9629]), coords={"index": [0, 1, 2]}, dims=["index"]), "ioa": xr.DataArray(np.array([0.8478, 0.9333, 0.9629]), coords={"index": [0, 1, 2]}, dims=["index"]),
"mnmb": xr.DataArray(np.array([-0.3030, -0.2133, 0.5333]), coords={"index": [0, 1, 2]}, dims=["index"]),
"n": xr.DataArray(np.array([5, 5, 5]), coords={"index": [0, 1, 2]}, dims=["index"])} "n": xr.DataArray(np.array([5, 5, 5]), coords={"index": [0, 1, 2]}, dims=["index"])}
assert check_nested_equality(expected, calculate_error_metrics(x_array1, x_array2, "value"), 3) is True assert check_nested_equality(expected, calculate_error_metrics(x_array1, x_array2, "value"), 3) is True
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment