diff --git a/test/test_run_modules/test_training.py b/test/test_run_modules/test_training.py index b16c0c2586f87af8368ac0059edc8a3997780f69..1b83b3823519d63d5dcbc10f0e31fc3433f98f34 100644 --- a/test/test_run_modules/test_training.py +++ b/test/test_run_modules/test_training.py @@ -234,7 +234,7 @@ class TestTraining: statistics_per_var, window_history_size, window_lead_time) -> Training: channels = len(list(statistics_per_var.keys())) - model = FCN([(window_history_size + 1, 1, channels)], [window_lead_time]) + model = FCN([(window_history_size + 1, 1, channels)], [window_lead_time]) obj = object.__new__(Training) super(Training, obj).__init__() @@ -306,7 +306,7 @@ class TestTraining: assert init_without_run.train_set._collection.return_value == "mock_train_gen" def test_set_generators(self, init_without_run): - sets = ["train", "val", "test"] + sets = ["train", "val"] assert all([getattr(init_without_run, f"{obj}_set") is None for obj in sets]) init_without_run.set_generators() assert not all([getattr(init_without_run, f"{obj}_set") is None for obj in sets]) @@ -366,10 +366,10 @@ class TestTraining: def test_resume_training1(self, path: str, model_path, batch_path, data_collection, statistics_per_var, window_history_size, window_lead_time): - obj_1st = self.create_training_obj(2, path, data_collection, batch_path, model_path, statistics_per_var, + obj_1st = self.create_training_obj(4, path, data_collection, batch_path, model_path, statistics_per_var, window_history_size, window_lead_time) keras.utils.get_custom_objects().update(obj_1st.model.custom_objects) assert obj_1st._run() is None - obj_2nd = self.create_training_obj(4, path, data_collection, batch_path, model_path, statistics_per_var, + obj_2nd = self.create_training_obj(8, path, data_collection, batch_path, model_path, statistics_per_var, window_history_size, window_lead_time) assert obj_2nd._run() is None