From adff3e91e59ca5b7dcbb374dde3756f7c098f924 Mon Sep 17 00:00:00 2001
From: leufen1 <l.leufen@fz-juelich.de>
Date: Fri, 26 Feb 2021 16:18:26 +0100
Subject: [PATCH] report val loss, use log loss monitoring plot scale

---
 ACKNOWLEDGMENTS .md => ACKNOWLEDGMENTS.md | 0
 mlair/plotting/training_monitoring.py     | 2 +-
 mlair/run_modules/training.py             | 6 +++++-
 3 files changed, 6 insertions(+), 2 deletions(-)
 rename ACKNOWLEDGMENTS .md => ACKNOWLEDGMENTS.md (100%)

diff --git a/ACKNOWLEDGMENTS .md b/ACKNOWLEDGMENTS.md
similarity index 100%
rename from ACKNOWLEDGMENTS .md
rename to ACKNOWLEDGMENTS.md
diff --git a/mlair/plotting/training_monitoring.py b/mlair/plotting/training_monitoring.py
index 09f49c84..4b4ebbc3 100644
--- a/mlair/plotting/training_monitoring.py
+++ b/mlair/plotting/training_monitoring.py
@@ -86,7 +86,7 @@ class PlotModelHistory:
         """
         ax = self._data[[self._plot_metric, f"val_{self._plot_metric}"]].plot(linewidth=0.7)
         if len(self._additional_columns) > 0:
-            self._data[self._additional_columns].plot(linewidth=0.7, secondary_y=True, ax=ax)
+            self._data[self._additional_columns].plot(linewidth=0.7, secondary_y=True, ax=ax, logy=True)
         title = f"Model {self._plot_metric}: best = {self._data[[f'val_{self._plot_metric}']].min().values}"
         ax.set(xlabel="epoch", ylabel=self._plot_metric, title=title)
         ax.axhline(y=0, color="gray", linewidth=0.5)
diff --git a/mlair/run_modules/training.py b/mlair/run_modules/training.py
index 6c993d56..4409b643 100644
--- a/mlair/run_modules/training.py
+++ b/mlair/run_modules/training.py
@@ -246,4 +246,8 @@ class Training(RunEnvironment):
         path_config.check_path_and_create(path)
         df.to_latex(os.path.join(path, "training_settings.tex"), na_rep='---', column_format=column_format)
         df.to_markdown(open(os.path.join(path, "training_settings.md"), mode="w", encoding='utf-8'),
-                       tablefmt="github")
\ No newline at end of file
+                       tablefmt="github")
+
+        val_score = self.model.evaluate_generator(generator=self.val_set, use_multiprocessing=True, verbose=0, steps=1)
+        for index, item in enumerate(val_score):
+            logging.info(f"{self.model.metrics_names[index]}, {item}")
-- 
GitLab