Skip to content
Snippets Groups Projects
Commit aebd85ff authored by leufen1's avatar leufen1
Browse files

added more time tracking info to know more about postprocessing.

parent d8e4f926
Branches
Tags
4 merge requests!413update release branch,!412Resolve "release v2.0.0",!363update 339 by develop,!362Resolve "free the workers"
Pipeline #84351 passed
......@@ -68,12 +68,13 @@ class TimeTracking(object):
The only disadvantage of the latter implementation is, that the duration is logged but not returned.
"""
def __init__(self, start=True, name="undefined job", logging_level=logging.INFO):
def __init__(self, start=True, name="undefined job", logging_level=logging.INFO, log_on_enter=False):
"""Construct time tracking and start if enabled."""
self.start = None
self.end = None
self._name = name
self._logging = {logging.INFO: logging.info, logging.DEBUG: logging.debug}.get(logging_level, logging.info)
self._log_on_enter = log_on_enter
if start:
self._start()
......@@ -124,6 +125,7 @@ class TimeTracking(object):
def __enter__(self):
"""Context manager."""
self._logging(f"start {self._name}") if self._log_on_enter is True else None
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
......
......@@ -18,7 +18,7 @@ import xarray as xr
from mlair.configuration import path_config
from mlair.data_handler import Bootstraps, KerasIterator
from mlair.helpers.datastore import NameNotFoundInDataStore
from mlair.helpers import TimeTracking, statistics, extract_value, remove_items, to_list, tables
from mlair.helpers import TimeTracking, TimeTrackingWrapper, statistics, extract_value, remove_items, to_list, tables
from mlair.model_modules.linear_model import OrdinaryLeastSquaredModel
from mlair.model_modules import AbstractModelClass
from mlair.plotting.postprocessing_plotting import PlotMonthlySummary, PlotClimatologicalSkillScore, \
......@@ -114,7 +114,7 @@ class PostProcessing(RunEnvironment):
# feature importance bootstraps
if self.data_store.get("evaluate_feature_importance", "postprocessing"):
with TimeTracking(name="calculate feature importance using bootstraps"):
with TimeTracking(name="evaluate_feature_importance", log_on_enter=True):
create_new_bootstraps = self.data_store.get("create_new_bootstraps", "feature_importance")
bootstrap_method = self.data_store.get("bootstrap_method", "feature_importance")
bootstrap_type = self.data_store.get("bootstrap_type", "feature_importance")
......@@ -124,7 +124,7 @@ class PostProcessing(RunEnvironment):
self.report_feature_importance_results(self.feature_importance_skill_scores)
# skill scores and error metrics
with TimeTracking(name="calculate skill scores"):
with TimeTracking(name="calculate_error_metrics", log_on_enter=True):
skill_score_competitive, _, skill_score_climatological, errors = self.calculate_error_metrics()
self.skill_scores = (skill_score_competitive, skill_score_climatological)
self.report_error_metrics(errors)
......@@ -134,12 +134,14 @@ class PostProcessing(RunEnvironment):
# plotting
self.plot()
@TimeTrackingWrapper
def estimate_sample_uncertainty(self, separate_ahead=False):
"""
Estimate sample uncertainty by using a bootstrap approach. Forecasts are split into individual blocks along time
and randomly drawn with replacement. The resulting behaviour of the error indicates the robustness of each
analyzed model to quantify which model might be superior compared to others.
"""
logging.info("start estimate_sample_uncertainty")
n_boots = self.data_store.get_default("n_boots", default=1000, scope="uncertainty_estimate")
block_length = self.data_store.get_default("block_length", default="1m", scope="uncertainty_estimate")
evaluate_competitors = self.data_store.get_default("evaluate_competitors", default=True,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment