diff --git a/mlair/configuration/defaults.py b/mlair/configuration/defaults.py index d1563b23fd7c4eefc25fdc1debc3e940ded648c5..9bb15068ce3a5ad934f7b0251b84cb19f37702f6 100644 --- a/mlair/configuration/defaults.py +++ b/mlair/configuration/defaults.py @@ -69,6 +69,7 @@ DEFAULT_DATA_ORIGIN = {"cloudcover": "REA", "humidity": "REA", "pblheight": "REA DEFAULT_USE_MULTIPROCESSING = True DEFAULT_USE_MULTIPROCESSING_ON_DEBUG = False DEFAULT_MAX_NUMBER_MULTIPROCESSING = 16 +DEFAULT_CREATE_SNAPSHOT = False def get_defaults(): diff --git a/mlair/configuration/snapshot_names.py b/mlair/configuration/snapshot_names.py new file mode 100644 index 0000000000000000000000000000000000000000..8526363eecc068efbd08a108f58a14dad9425490 --- /dev/null +++ b/mlair/configuration/snapshot_names.py @@ -0,0 +1,354 @@ +animals = ["Aardvark", + "Aardwolf", + "Albatross", + "Alligator", + "Alpaca", + "Amphibian", + "Anaconda", + "Angelfish", + "Anglerfish", + "Ant", + "Anteater", + "Antelope", + "Antlion", + "Ape", + "Aphid", + "Armadillo", + "Asp", + "Baboon", + "Badger", + "Bandicoot", + "Barnacle", + "Barracuda", + "Basilisk", + "Bass", + "Bat", + "Bear", + "Beaver", + "Bedbug", + "Bee", + "Beetle", + "Bird", + "Bison", + "Blackbird", + "Boa", + "Boar", + "Bobcat", + "Bobolink", + "Bonobo", + "Booby", + "Bovid", + "Bug", + "Butterfly", + "Buzzard", + "Camel", + "Canid", + "Canidae", + "Capybara", + "Cardinal", + "Caribou", + "Carp", + "Cat", + "Caterpillar", + "Catfish", + "Catshark", + "Cattle", + "Centipede", + "Cephalopod", + "Chameleon", + "Cheetah", + "Chickadee", + "Chicken", + "Chimpanzee", + "Chinchilla", + "Chipmunk", + "Cicada", + "Clam", + "Clownfish", + "Cobra", + "Cockroach", + "Cod", + "Condor", + "Constrictor", + "Coral", + "Cougar", + "Cow", + "Coyote", + "Crab", + "Crane", + "Crawdad", + "Crayfish", + "Cricket", + "Crocodile", + "Crow", + "Cuckoo", + "Damselfly", + "Deer", + "Dingo", + "Dinosaur", + "Dog", + "Dolphin", + "Donkey", + "Dormouse", + "Dove", + "Dragon", + "Dragonfly", + "Duck", + "Eagle", + "Earthworm", + "Earwig", + "Echidna", + "Eel", + "Egret", + "Elephant", + "Elk", + "Emu", + "Ermine", + "Falcon", + "Felidae", + "Ferret", + "Finch", + "Firefly", + "Fish", + "Flamingo", + "Flea", + "Fly", + "Flyingfish", + "Fowl", + "Fox", + "Frog", + "Galliform", + "Gamefowl", + "Gayal", + "Gazelle", + "Gecko", + "Gerbil", + "Gibbon", + "Giraffe", + "Goat", + "Goldfish", + "Goose", + "Gopher", + "Gorilla", + "Grasshopper", + "Grouse", + "Guan", + "Guanaco", + "Guineafowl", + "Gull", + "Guppy", + "Haddock", + "Halibut", + "Hamster", + "Hare", + "Harrier", + "Hawk", + "Hedgehog", + "Heron", + "Herring", + "Hippopotamus", + "Hookworm", + "Hornet", + "Horse", + "Hoverfly", + "Hummingbird", + "Hyena", + "Iguana", + "Impala", + "Jackal", + "Jaguar", + "Jay", + "Jellyfish", + "Junglefowl", + "Kangaroo", + "Kingfisher", + "Kite", + "Kiwi", + "Koala", + "Koi", + "Krill", + "Ladybug", + "Lamprey", + "Landfowl", + "Lark", + "Leech", + "Lemming", + "Lemur", + "Leopard", + "Leopon", + "Limpet", + "Lion", + "Lizard", + "Llama", + "Lobster", + "Locust", + "Loon", + "Louse", + "Lungfish", + "Lynx", + "Macaw", + "Mackerel", + "Magpie", + "Mammal", + "Manatee", + "Mandrill", + "Marlin", + "Marmoset", + "Marmot", + "Marsupial", + "Marten", + "Mastodon", + "Meadowlark", + "Meerkat", + "Mink", + "Minnow", + "Mite", + "Mockingbird", + "Mole", + "Mollusk", + "Mongoose", + "Monkey", + "Moose", + "Mosquito", + "Moth", + "Mouse", + "Mule", + "Muskox", + "Narwhal", + "Newt", + "Nightingale", + "Ocelot", + "Octopus", + "Opossum", + "Orangutan", + "Orca", + "Ostrich", + "Otter", + "Owl", + "Ox", + "Panda", + "Panther", + "Parakeet", + "Parrot", + "Parrotfish", + "Partridge", + "Peacock", + "Peafowl", + "Pelican", + "Penguin", + "Perch", + "Pheasant", + "Pig", + "Pigeon", + "Pike", + "Pinniped", + "Piranha", + "Planarian", + "Platypus", + "Pony", + "Porcupine", + "Porpoise", + "Possum", + "Prawn", + "Primate", + "Ptarmigan", + "Puffin", + "Puma", + "Python", + "Quail", + "Quelea", + "Quokka", + "Rabbit", + "Raccoon", + "Rat", + "Rattlesnake", + "Raven", + "Reindeer", + "Reptile", + "Rhinoceros", + "Roadrunner", + "Rodent", + "Rook", + "Rooster", + "Roundworm", + "Sailfish", + "Salamander", + "Salmon", + "Sawfish", + "Scallop", + "Scorpion", + "Seahorse", + "Shark", + "Sheep", + "Shrew", + "Shrimp", + "Silkworm", + "Silverfish", + "Skink", + "Skunk", + "Sloth", + "Slug", + "Smelt", + "Snail", + "Snake", + "Snipe", + "Sole", + "Sparrow", + "Spider", + "Spoonbill", + "Squid", + "Squirrel", + "Starfish", + "Stingray", + "Stoat", + "Stork", + "Sturgeon", + "Swallow", + "Swan", + "Swift", + "Swordfish", + "Swordtail", + "Tahr", + "Takin", + "Tapir", + "Tarantula", + "Tarsier", + "Termite", + "Tern", + "Thrush", + "Tick", + "Tiger", + "Tiglon", + "Toad", + "Tortoise", + "Toucan", + "Trout", + "Tuna", + "Turkey", + "Turtle", + "Tyrannosaurus", + "Urial", + "Vicuna", + "Viper", + "Vole", + "Vulture", + "Wallaby", + "Walrus", + "Warbler", + "Wasp", + "Weasel", + "Whale", + "Whippet", + "Whitefish", + "Wildcat", + "Wildebeest", + "Wildfowl", + "Wolf", + "Wolverine", + "Wombat", + "Woodpecker", + "Worm", + "Wren", + "Xerinae", + "Yak", + "Zebra", ] diff --git a/mlair/helpers/testing.py b/mlair/helpers/testing.py index eb8982ae3625cfccedf894717eebf299faffb3ee..08ac7cab21567166149d7c05f1fd6450760856a5 100644 --- a/mlair/helpers/testing.py +++ b/mlair/helpers/testing.py @@ -1,4 +1,5 @@ """Helper functions that are used to simplify testing.""" +import logging import re from typing import Union, Pattern, List import inspect @@ -105,52 +106,72 @@ def get_all_args(*args, remove=None, add=None): return res -def check_nested_equality(obj1, obj2, precision=None): +def check_nested_equality(obj1, obj2, precision=None, skip_args=None): """Check for equality in nested structures. Use precision to indicate number of decimals to check for consistency""" assert precision is None or isinstance(precision, int) - + message = "" try: - print(f"check type {type(obj1)} and {type(obj2)}") + # print(f"check type {type(obj1)} and {type(obj2)}") + message = f"{type(obj1)}!={type(obj2)}\n{obj1} and {obj2} do not match" assert type(obj1) == type(obj2) - if isinstance(obj1, (tuple, list)): - print(f"check length {len(obj1)} and {len(obj2)}") + # print(f"check length {len(obj1)} and {len(obj2)}") + message = f"{len(obj1)}!={len(obj2)}\nlengths of {obj1} and {obj2} do not match" assert len(obj1) == len(obj2) for pos in range(len(obj1)): - print(f"check pos {obj1[pos]} and {obj2[pos]}") - assert check_nested_equality(obj1[pos], obj2[pos], precision) is True + # print(f"check pos {obj1[pos]} and {obj2[pos]}") + message = f"{obj1[pos]}!={obj2[pos]}\nobjects on pos {pos} of {obj1} and {obj2} do not match" + assert check_nested_equality(obj1[pos], obj2[pos], precision=precision, skip_args=skip_args) is True elif isinstance(obj1, dict): - print(f"check keys {obj1.keys()} and {obj2.keys()}") - assert sorted(obj1.keys()) == sorted(obj2.keys()) - for k in obj1.keys(): - print(f"check pos {obj1[k]} and {obj2[k]}") - assert check_nested_equality(obj1[k], obj2[k], precision) is True + obj1_keys, obj2_keys = obj1.keys(), obj2.keys() + if skip_args is not None and isinstance(skip_args, (str, list)): + skip_args = to_list(skip_args) + obj1_keys = list(set(obj1_keys).difference(skip_args)) + obj2_keys = list(set(obj2_keys).difference(skip_args)) + # print(f"check keys {obj1.keys()} and {obj2.keys()}") + message = f"{sorted(obj1_keys)}!={sorted(obj2_keys)}\n{set(obj1_keys).symmetric_difference(obj2_keys)} " \ + f"are not in both sorted key lists" + assert sorted(obj1_keys) == sorted(obj2_keys) + for k in obj1_keys: + # print(f"check pos {obj1[k]} and {obj2[k]}") + message = f"{obj1[k]}!={obj2[k]}\nobjects for key {k} of {obj1} and {obj2} do not match" + assert check_nested_equality(obj1[k], obj2[k], precision=precision, skip_args=skip_args) is True elif isinstance(obj1, xr.DataArray): if precision is None: - print(f"check xr {obj1} and {obj2}") + # print(f"check xr {obj1} and {obj2}") + message = f"{obj1}!={obj2}\n{obj1} and {obj2} do not match" assert xr.testing.assert_equal(obj1, obj2) is None else: - print(f"check xr {obj1} and {obj2} with precision {precision}") + # print(f"check xr {obj1} and {obj2} with precision {precision}") + message = f"{obj1}!={obj2} with precision {precision}\n{obj1} and {obj2} do not match" assert xr.testing.assert_allclose(obj1, obj2, atol=10**(-precision)) is None elif isinstance(obj1, np.ndarray): if precision is None: - print(f"check np {obj1} and {obj2}") + # print(f"check np {obj1} and {obj2}") + message = f"{obj1}!={obj2}\n{obj1} and {obj2} do not match" assert np.testing.assert_array_equal(obj1, obj2) is None else: - print(f"check np {obj1} and {obj2} with precision {precision}") + # print(f"check np {obj1} and {obj2} with precision {precision}") + message = f"{obj1}!={obj2} with precision {precision}\n{obj1} and {obj2} do not match" assert np.testing.assert_array_almost_equal(obj1, obj2, decimal=precision) is None else: if isinstance(obj1, (int, float)) and isinstance(obj2, (int, float)): if precision is None: - print(f"check number equal {obj1} and {obj2}") + # print(f"check number equal {obj1} and {obj2}") + message = f"{obj1}!={obj2}\n{obj1} and {obj2} do not match" assert np.testing.assert_equal(obj1, obj2) is None else: - print(f"check number equal {obj1} and {obj2} with precision {precision}") + # print(f"check number equal {obj1} and {obj2} with precision {precision}") + message = f"{obj1}!={obj2} with precision {precision}\n{obj1} and {obj2} do not match" assert np.testing.assert_almost_equal(obj1, obj2, decimal=precision) is None else: - print(f"check equal {obj1} and {obj2}") + # print(f"check equal {obj1} and {obj2}") + message = f"{obj1}!={obj2}\n{obj1} and {obj2} do not match" assert obj1 == obj2 except AssertionError: + message = message.split("\n") + logging.info(message[0]) + logging.debug(message[1]) return False return True diff --git a/mlair/run_modules/experiment_setup.py b/mlair/run_modules/experiment_setup.py index adef978498b619d744f5b06f9fdbb219c52ee5ec..f89633cbe0f80f26dbb2481ca24a7fd294ee6888 100644 --- a/mlair/run_modules/experiment_setup.py +++ b/mlair/run_modules/experiment_setup.py @@ -23,8 +23,8 @@ from mlair.configuration.defaults import DEFAULT_STATIONS, DEFAULT_VAR_ALL_DICT, DEFAULT_USE_MULTIPROCESSING, DEFAULT_USE_MULTIPROCESSING_ON_DEBUG, DEFAULT_MAX_NUMBER_MULTIPROCESSING, \ DEFAULT_FEATURE_IMPORTANCE_BOOTSTRAP_TYPE, DEFAULT_FEATURE_IMPORTANCE_BOOTSTRAP_METHOD, DEFAULT_OVERWRITE_LAZY_DATA, \ DEFAULT_UNCERTAINTY_ESTIMATE_BLOCK_LENGTH, DEFAULT_UNCERTAINTY_ESTIMATE_EVALUATE_COMPETITORS, \ - DEFAULT_UNCERTAINTY_ESTIMATE_N_BOOTS, DEFAULT_DO_UNCERTAINTY_ESTIMATE, DEFAULT_EARLY_STOPPING_EPOCHS, \ - DEFAULT_RESTORE_BEST_MODEL_WEIGHTS, DEFAULT_COMPETITORS + DEFAULT_UNCERTAINTY_ESTIMATE_N_BOOTS, DEFAULT_DO_UNCERTAINTY_ESTIMATE, DEFAULT_CREATE_SNAPSHOT, \ + DEFAULT_EARLY_STOPPING_EPOCHS, DEFAULT_RESTORE_BEST_MODEL_WEIGHTS, DEFAULT_COMPETITORS from mlair.data_handler import DefaultDataHandler from mlair.run_modules.run_environment import RunEnvironment from mlair.model_modules.fully_connected_networks import FCN_64_32_16 as VanillaModel @@ -196,7 +196,12 @@ class ExperimentSetup(RunEnvironment): :param transformation_file: Use transformation options from this file for transformation :param calculate_fresh_transformation: can either be True or False, indicates if new transformation options should be calculated in any case (transformation_file is not used in this case!). - + :param snapshot_path: path to store snapshot of current run (default inside experiment path) + :param create_snapshot: indicate if a snapshot is taken from current run or not (default False) + :param snapshot_load_path: path to load a snapshot from (default None). In contrast to `snapshot_path`, which is + only for storing a snapshot, `snapshot_load_path` indicates where to load the snapshot from. If this parameter + is not provided at all, no snapshot is loaded. Note, the workflow will apply the default preprocessing without + loading a snapshot only if this parameter is None! """ def __init__(self, @@ -236,7 +241,8 @@ class ExperimentSetup(RunEnvironment): overwrite_lazy_data: bool = None, uncertainty_estimate_block_length: str = None, uncertainty_estimate_evaluate_competitors: bool = None, uncertainty_estimate_n_boots: int = None, do_uncertainty_estimate: bool = None, model_display_name: str = None, transformation_file: str = None, - calculate_fresh_transformation: bool = None, **kwargs): + calculate_fresh_transformation: bool = None, snapshot_load_path: str = None, + create_snapshot: bool = None, snapshot_path: str = None, **kwargs): # create run framework super().__init__() @@ -312,6 +318,13 @@ class ExperimentSetup(RunEnvironment): self._set_param("tmp_path", None, os.path.join(experiment_path, "tmp")) path_config.check_path_and_create(self.data_store.get("tmp_path"), remove_existing=True) + # snapshot settings + self._set_param("snapshot_path", snapshot_path, default=os.path.join(experiment_path, "snapshot")) + path_config.check_path_and_create(self.data_store.get("snapshot_path"), remove_existing=False) + self._set_param("create_snapshot", create_snapshot, default=DEFAULT_CREATE_SNAPSHOT) + if snapshot_load_path is not None: + self._set_param("snapshot_load_path", snapshot_load_path) + # setup for data self._set_param("stations", stations, default=DEFAULT_STATIONS, apply=helpers.to_list) self._set_param("statistics_per_var", statistics_per_var, default=DEFAULT_VAR_ALL_DICT) diff --git a/mlair/run_modules/pre_processing.py b/mlair/run_modules/pre_processing.py index 2b3bfa123dda3a07ab572ab34e94b18f38d20fcb..41d863a1cec5b86487374e08655c0c86815f3417 100644 --- a/mlair/run_modules/pre_processing.py +++ b/mlair/run_modules/pre_processing.py @@ -16,9 +16,10 @@ import dill import pandas as pd from mlair.data_handler import DataCollection, AbstractDataHandler -from mlair.helpers import TimeTracking, to_list, tables +from mlair.helpers import TimeTracking, to_list, tables, remove_items from mlair.configuration import path_config from mlair.helpers.data_sources.toar_data import EmptyQueryResult +from mlair.helpers.testing import check_nested_equality from mlair.run_modules.run_environment import RunEnvironment @@ -59,16 +60,22 @@ class PreProcessing(RunEnvironment): self._run() def _run(self): - stations = self.data_store.get("stations") - data_handler = self.data_store.get("data_handler") - _, valid_stations = self.validate_station(data_handler, stations, - "preprocessing") # , store_processed_data=False) - if len(valid_stations) == 0: - raise ValueError("Couldn't find any valid data according to given parameters. Abort experiment run.") - self.data_store.set("stations", valid_stations) - self.split_train_val_test() + snapshot_load_path = self.data_store.get_default("snapshot_load_path", default=None) + if snapshot_load_path is None: + stations = self.data_store.get("stations") + data_handler = self.data_store.get("data_handler") + _, valid_stations = self.validate_station(data_handler, stations, + "preprocessing") # , store_processed_data=False) + if len(valid_stations) == 0: + raise ValueError("Couldn't find any valid data according to given parameters. Abort experiment run.") + self.data_store.set("stations", valid_stations) + self.split_train_val_test() + else: + self.load_snapshot(snapshot_load_path) self.report_pre_processing() self.prepare_competitors() + if self.data_store.get_default("create_snapshot", False) is True: + self.create_snapshot() def report_pre_processing(self): """Log some metrics on data and create latex report.""" @@ -129,13 +136,10 @@ class PreProcessing(RunEnvironment): tables.save_to_tex(path=path, filename="station_sample_size_short.tex", column_format=column_format, df=df_nometa) tables.save_to_md(path=path, filename="station_sample_size_short.md", df=df_nometa) - # df_nometa.to_latex(os.path.join(path, "station_sample_size_short.tex"), na_rep='---', - # column_format=column_format) df_descr = self.create_describe_df(df_nometa) column_format = tables.create_column_format_for_tex(df_descr) tables.save_to_tex(path=path, filename="station_describe_short.tex", column_format=column_format, df=df_descr) tables.save_to_md(path=path, filename="station_describe_short.md", df=df_descr) - # df_descr.to_latex(os.path.join(path, "station_describe_short.tex"), na_rep='---', column_format=column_format) @staticmethod def create_describe_df(df, percentiles=None, ignore_last_lines: int = 2): @@ -305,7 +309,8 @@ class PreProcessing(RunEnvironment): def store_data_handler_attributes(self, data_handler, collection): store_attributes = data_handler.store_attributes() if len(store_attributes) > 0: - logging.info("store data requested by the data handler") + logging.info(f"store following parameters ({len(store_attributes)}) requested by the data handler: " + f"{','.join(store_attributes)}") attrs = {} for dh in collection: station = str(dh) @@ -386,6 +391,47 @@ class PreProcessing(RunEnvironment): else: logging.info("No preparation required because no competitor was provided to the workflow.") + def create_snapshot(self): + logging.info("create snapshot for preprocessing") + from mlair.configuration.snapshot_names import animals + for i_try in range(10): + snapshot_name = random.choice(animals).lower() + snapshot_path = os.path.abspath(self.data_store.get("snapshot_path")) + path_config.check_path_and_create(snapshot_path, remove_existing=False) + _snapshot_file = os.path.join(snapshot_path, f"snapshot_preprocessing_{snapshot_name}.pickle") + if not os.path.exists(_snapshot_file): + logging.info(f"store snapshot at: {_snapshot_file}") + with open(_snapshot_file, "wb") as f: + dill.dump(self.data_store, f, protocol=4) + print(_snapshot_file) + return + logging.info(f"Could not create snapshot at {_snapshot_file} as file is already existing ({i_try + 1}/10)") + logging.info(f"Could not create any snapshot after 10/10 tries.") + + def load_snapshot(self, file): + logging.info(f"load snapshot for preprocessing from {file}") + with open(file, "rb") as f: + snapshot = dill.load(f) + + excluded_params = ["batch_path", "batch_size", "block_length", "bootstrap_method", "bootstrap_path", + "bootstrap_type", "competitor_path", "competitors", "create_new_bootstraps", + "create_new_model", "create_snapshot", "data_collection", "debug_mode", + "do_uncertainty_estimate", "early_stopping_epochs", "epochs", "evaluate_competitors", + "evaluate_feature_importance", "experiment_name", "experiment_path", "forecast_path", + "fraction_of_training", "hostname", "hpc_hosts", "log_level_stream", "logging_path", + "login_nodes", "max_number_multiprocessing", "model_class", "model_path", "n_boots", + "neighbors", "plot_list", "plot_path", "restore_best_model_weights", "snapshot_load_path", + "snapshot_path", "stations", "tmp_path", "train_model", "transformation", + "use_multiprocessing", ] + data_handler = self.data_store.get("data_handler") + excluded_params = list(set(excluded_params + data_handler.store_attributes())) + + if check_nested_equality(self.data_store._store, snapshot._store, skip_args=excluded_params) is True: + self.update_datastore(snapshot, excluded_params=remove_items(excluded_params, ["transformation", + "data_collection"])) + else: + raise ReferenceError("provided snapshot does not match with the current experiment setup. Abort this run!") + def f_proc(data_handler, station, name_affix, store, return_strategy="", tmp_path=None, **kwargs): """ @@ -398,8 +444,8 @@ def f_proc(data_handler, station, name_affix, store, return_strategy="", tmp_pat res = data_handler.build(station, name_affix=name_affix, store_processed_data=store, **kwargs) except (AttributeError, EmptyQueryResult, KeyError, requests.ConnectionError, ValueError, IndexError) as e: formatted_lines = traceback.format_exc().splitlines() - logging.info( - f"remove station {station} because it raised an error: {e} -> {' | '.join(f_inspect_error(formatted_lines))}") + logging.info(f"remove station {station} because it raised an error: {e} -> " + f"{' | '.join(f_inspect_error(formatted_lines))}") logging.debug(f"detailed information for removal of station {station}: {traceback.format_exc()}") res = None if return_strategy == "result": @@ -414,11 +460,11 @@ def f_proc(data_handler, station, name_affix, store, return_strategy="", tmp_pat def f_proc_create_info_df(data, meta_cols): - station_name = str(data.id_class) - meta = data.id_class.meta - res = {"station_name": station_name, "Y_shape": data.get_Y()[0].shape[0], - "meta": meta.reindex(meta_cols).values.flatten()} - return res + station_name = str(data.id_class) + meta = data.id_class.meta + res = {"station_name": station_name, "Y_shape": data.get_Y()[0].shape[0], + "meta": meta.reindex(meta_cols).values.flatten()} + return res def f_inspect_error(formatted): diff --git a/mlair/run_modules/run_environment.py b/mlair/run_modules/run_environment.py index 2d42b1868736a3684ffe7b507e3e83f3d8d8b3da..191ee30f0485c6abc42a8d718612b7b26feb221a 100644 --- a/mlair/run_modules/run_environment.py +++ b/mlair/run_modules/run_environment.py @@ -11,8 +11,7 @@ import time from mlair.helpers.datastore import DataStoreByScope as DataStoreObject from mlair.helpers.datastore import NameNotFoundInDataStore -from mlair.helpers import Logger -from mlair.helpers import TimeTracking +from mlair.helpers import Logger, to_list, TimeTracking from mlair.plotting.tracker_plot import TrackPlot @@ -172,6 +171,22 @@ class RunEnvironment(object): new_file = filename_pattern % counter return new_file + @classmethod + def update_datastore(cls, new_data_store: DataStoreObject, excluded_params=None, apply_full_replacement=False): + if apply_full_replacement is True: + RunEnvironment.data_store = new_data_store + else: + assert type(RunEnvironment.data_store) == type(new_data_store) + scopes = new_data_store.list_all_scopes() + excluded_params = to_list(excluded_params) + for s in scopes: + # name, scope and value + entries = new_data_store.search_scope(s, current_scope_only=True, return_all=True) + for e in entries: + if e[0] not in excluded_params: + # name, value, scope + RunEnvironment.data_store.set(e[0], e[2], e[1]) + @staticmethod def do_stuff(length=2): """Just a placeholder method for testing without any sense."""