Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
MLAir
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
esde
machine-learning
MLAir
Commits
36905028
Commit
36905028
authored
3 years ago
by
leufen1
Browse files
Options
Downloads
Patches
Plain Diff
solved issue with zero devision
parent
194fd5de
No related branches found
No related tags found
4 merge requests
!432
IOA works now also with xarray and on identical data, IOA is included in...
,
!431
Resolve "release v2.1.0"
,
!430
update recent developments
,
!415
Resolve "add metric IOA"
Pipeline
#99727
passed
3 years ago
Stage: test
Stage: docs
Stage: pages
Stage: deploy
Changes
3
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
mlair/helpers/statistics.py
+5
-1
5 additions, 1 deletion
mlair/helpers/statistics.py
mlair/run_modules/post_processing.py
+11
-4
11 additions, 4 deletions
mlair/run_modules/post_processing.py
test/test_helpers/test_statistics.py
+4
-2
4 additions, 2 deletions
test/test_helpers/test_statistics.py
with
20 additions
and
7 deletions
mlair/helpers/statistics.py
+
5
−
1
View file @
36905028
...
@@ -217,7 +217,11 @@ def index_of_agreement(a, b, dim=None):
...
@@ -217,7 +217,11 @@ def index_of_agreement(a, b, dim=None):
b_mean
=
(
b
*
np
.
ones
(
1
)).
mean
(
dim
)
b_mean
=
(
b
*
np
.
ones
(
1
)).
mean
(
dim
)
den
=
(
np
.
square
(
np
.
abs
(
b
-
b_mean
)
+
np
.
abs
(
a
-
b_mean
))).
sum
(
dim
)
den
=
(
np
.
square
(
np
.
abs
(
b
-
b_mean
)
+
np
.
abs
(
a
-
b_mean
))).
sum
(
dim
)
frac
=
num
/
den
frac
=
num
/
den
frac
[
num
==
0
]
=
0
# 0/0 division for exactly equal arrays
# issue with 0/0 division for exactly equal arrays
if
isinstance
(
frac
,
(
int
,
float
)):
frac
=
0
if
num
==
0
else
frac
else
:
frac
[
num
==
0
]
=
0
return
1
-
frac
return
1
-
frac
...
...
This diff is collapsed.
Click to expand it.
mlair/run_modules/post_processing.py
+
11
−
4
View file @
36905028
...
@@ -86,6 +86,7 @@ class PostProcessing(RunEnvironment):
...
@@ -86,6 +86,7 @@ class PostProcessing(RunEnvironment):
self
.
skill_scores
=
None
self
.
skill_scores
=
None
self
.
feature_importance_skill_scores
=
None
self
.
feature_importance_skill_scores
=
None
self
.
uncertainty_estimate
=
None
self
.
uncertainty_estimate
=
None
self
.
block_mse_per_station
=
None
self
.
competitor_path
=
self
.
data_store
.
get
(
"
competitor_path
"
)
self
.
competitor_path
=
self
.
data_store
.
get
(
"
competitor_path
"
)
self
.
competitors
=
to_list
(
self
.
data_store
.
get_default
(
"
competitors
"
,
default
=
[]))
self
.
competitors
=
to_list
(
self
.
data_store
.
get_default
(
"
competitors
"
,
default
=
[]))
self
.
forecast_indicator
=
"
nn
"
self
.
forecast_indicator
=
"
nn
"
...
@@ -148,8 +149,10 @@ class PostProcessing(RunEnvironment):
...
@@ -148,8 +149,10 @@ class PostProcessing(RunEnvironment):
block_length
=
self
.
data_store
.
get_default
(
"
block_length
"
,
default
=
"
1m
"
,
scope
=
"
uncertainty_estimate
"
)
block_length
=
self
.
data_store
.
get_default
(
"
block_length
"
,
default
=
"
1m
"
,
scope
=
"
uncertainty_estimate
"
)
evaluate_competitors
=
self
.
data_store
.
get_default
(
"
evaluate_competitors
"
,
default
=
True
,
evaluate_competitors
=
self
.
data_store
.
get_default
(
"
evaluate_competitors
"
,
default
=
True
,
scope
=
"
uncertainty_estimate
"
)
scope
=
"
uncertainty_estimate
"
)
block_mse
=
self
.
calculate_block_mse
(
evaluate_competitors
=
evaluate_competitors
,
separate_ahead
=
separate_ahead
,
block_mse
,
block_mse_per_station
=
self
.
calculate_block_mse
(
evaluate_competitors
=
evaluate_competitors
,
separate_ahead
=
separate_ahead
,
block_length
=
block_length
)
block_length
=
block_length
)
self
.
block_mse_per_station
=
block_mse_per_station
self
.
uncertainty_estimate
=
statistics
.
create_n_bootstrap_realizations
(
self
.
uncertainty_estimate
=
statistics
.
create_n_bootstrap_realizations
(
block_mse
,
dim_name_time
=
self
.
index_dim
,
dim_name_model
=
self
.
model_type_dim
,
block_mse
,
dim_name_time
=
self
.
index_dim
,
dim_name_model
=
self
.
model_type_dim
,
dim_name_boots
=
self
.
uncertainty_estimate_boot_dim
,
n_boots
=
n_boots
)
dim_name_boots
=
self
.
uncertainty_estimate_boot_dim
,
n_boots
=
n_boots
)
...
@@ -227,12 +230,16 @@ class PostProcessing(RunEnvironment):
...
@@ -227,12 +230,16 @@ class PostProcessing(RunEnvironment):
# calc mse for each block (single station)
# calc mse for each block (single station)
mse
=
errors
.
resample
(
indexer
=
{
index_dim
:
block_length
}).
mean
(
skipna
=
True
)
mse
=
errors
.
resample
(
indexer
=
{
index_dim
:
block_length
}).
mean
(
skipna
=
True
)
collector
.
append
(
mse
.
assign_coords
({
coll_dim
:
station
}))
collector
.
append
(
mse
.
assign_coords
({
coll_dim
:
station
}))
# combine all mse blocks
mse_blocks_per_station
=
xr
.
concat
(
collector
,
dim
=
coll_dim
)
# calc mse for each block (average over all stations)
# calc mse for each block (average over all stations)
mse_blocks
=
xr
.
concat
(
collector
,
dim
=
coll_dim
)
.
mean
(
dim
=
coll_dim
,
skipna
=
True
)
mse_blocks
=
mse_blocks_per_station
.
mean
(
dim
=
coll_dim
,
skipna
=
True
)
# average also on ahead steps
# average also on ahead steps
if
separate_ahead
is
False
:
if
separate_ahead
is
False
:
mse_blocks
=
mse_blocks
.
mean
(
dim
=
self
.
ahead_dim
,
skipna
=
True
)
mse_blocks
=
mse_blocks
.
mean
(
dim
=
self
.
ahead_dim
,
skipna
=
True
)
return
mse_blocks
mse_blocks_per_station
=
mse_blocks_per_station
.
mean
(
dim
=
self
.
ahead_dim
,
skipna
=
True
)
return
mse_blocks
,
mse_blocks_per_station
def
create_error_array
(
self
,
data
):
def
create_error_array
(
self
,
data
):
"""
Calculate squared error of all given time series in relation to observation.
"""
"""
Calculate squared error of all given time series in relation to observation.
"""
...
...
This diff is collapsed.
Click to expand it.
test/test_helpers/test_statistics.py
+
4
−
2
View file @
36905028
...
@@ -315,10 +315,12 @@ class TestIndexOfAgreement:
...
@@ -315,10 +315,12 @@ class TestIndexOfAgreement:
coords
=
{
'
index
'
:
range
(
shape
[
0
]),
'
value
'
:
range
(
shape
[
1
])}
coords
=
{
'
index
'
:
range
(
shape
[
0
]),
'
value
'
:
range
(
shape
[
1
])}
x_array1
=
xr
.
DataArray
(
d1
,
coords
=
coords
,
dims
=
coords
.
keys
())
x_array1
=
xr
.
DataArray
(
d1
,
coords
=
coords
,
dims
=
coords
.
keys
())
x_array2
=
xr
.
DataArray
(
d2
,
coords
=
coords
,
dims
=
coords
.
keys
())
x_array2
=
xr
.
DataArray
(
d2
,
coords
=
coords
,
dims
=
coords
.
keys
())
expected
=
xr
.
DataArray
(
np
.
array
([
1
,
1
,
1
]),
coords
=
{
"
index
"
:
[
0
,
1
,
2
]},
dims
=
[
"
index
"
])
res
=
index_of_agreement
(
x_array1
,
x_array1
,
dim
=
"
value
"
)
res
=
index_of_agreement
(
x_array1
,
x_array1
,
dim
=
"
value
"
)
assert
np
.
testing
.
assert_almost_equal
(
res
,
1
)
is
None
assert
xr
.
testing
.
assert_equal
(
res
,
expected
)
is
None
expected
=
xr
.
DataArray
(
np
.
array
([
0.8478
,
0.9333
,
0.9629
]),
coords
=
{
"
index
"
:
[
0
,
1
,
2
]},
dims
=
[
"
index
"
])
res
=
index_of_agreement
(
x_array1
,
x_array2
,
dim
=
"
value
"
)
res
=
index_of_agreement
(
x_array1
,
x_array2
,
dim
=
"
value
"
)
assert
np
.
testing
.
assert_al
most_equal
(
res
,
0.919
,
2
)
is
None
assert
xr
.
testing
.
assert_al
lclose
(
res
,
expected
,
atol
=
10
**-
2
)
is
None
class
TestCalculateErrorMetrics
:
class
TestCalculateErrorMetrics
:
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment