From f601fadce07f45cb9be9b8c3d8e1d5c9539db91e Mon Sep 17 00:00:00 2001
From: Falco Weichselbaum <f.weichselbaum@fz-juelich.de>
Date: Thu, 21 Oct 2021 13:28:49 +0200
Subject: [PATCH] model.evaluate_generator changed, because it is deprecated
 and works with model.evaluate(generator, ...), updated mlt_modules_juwels.sh
 to Stages/2020 modules with Python/3.8.5

---
 HPC_setup/mlt_modules_juwels.sh | 20 ++++++++++----------
 mlair/run_modules/training.py   |  4 ++--
 2 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/HPC_setup/mlt_modules_juwels.sh b/HPC_setup/mlt_modules_juwels.sh
index 01eecbab..8944b440 100755
--- a/HPC_setup/mlt_modules_juwels.sh
+++ b/HPC_setup/mlt_modules_juwels.sh
@@ -8,14 +8,14 @@
 module --force purge
 module use $OTHERSTAGES
 
-ml Stages/2019a
-ml GCCcore/.8.3.0
+ml Stages/2020
+ml GCCcore/.9.3.0
 
-ml Jupyter/2019a-Python-3.6.8
-ml Python/3.6.8
-ml TensorFlow/1.13.1-GPU-Python-3.6.8
-ml Keras/2.2.4-GPU-Python-3.6.8
-ml SciPy-Stack/2019a-Python-3.6.8
-ml dask/1.1.5-Python-3.6.8
-ml GEOS/3.7.1-Python-3.6.8
-ml Graphviz/2.40.1
+ml Jupyter/2020.3.0-Python-3.8.5
+ml Python/3.8.5
+# ml TensorFlow/1.13.1-GPU-Python-3.6.8
+ml TensorFlow/2.3.1-Python-3.8.5
+ml SciPy-Stack/2020-Python-3.8.5
+ml dask/2.22.0-Python-3.8.5
+ml GEOS/3.8.1-Python-3.8.5
+ml Graphviz/2.44.1
diff --git a/mlair/run_modules/training.py b/mlair/run_modules/training.py
index cb538abb..0696c2e7 100644
--- a/mlair/run_modules/training.py
+++ b/mlair/run_modules/training.py
@@ -123,7 +123,7 @@ class Training(RunEnvironment):
 
     def train(self) -> None:
         """
-        Perform training using keras fit_generator().
+        Perform training using keras fit().
 
         Callbacks are stored locally in the experiment directory. Best model from training is saved for class
         variable model. If the file path of checkpoint is not empty, this method assumes, that this is not a new
@@ -261,7 +261,7 @@ class Training(RunEnvironment):
         tables.save_to_md(path, "training_settings.md", df=df)
 
         # calculate val scores
-        val_score = self.model.evaluate_generator(generator=self.val_set, use_multiprocessing=True, verbose=0)
+        val_score = self.model.evaluate(self.val_set, use_multiprocessing=True, verbose=0)
         path = self.data_store.get("model_path")
         with open(os.path.join(path, "val_scores.txt"), "a") as f:
             for index, item in enumerate(to_list(val_score)):
-- 
GitLab