Skip to content
Snippets Groups Projects

Resolve "release v1.4.0"

Merged Ghost User requested to merge release_v1.4.0 into master
1 file
+ 19
11
Compare changes
  • Side-by-side
  • Inline
@@ -10,13 +10,15 @@ from typing import Union
import keras
from keras.callbacks import Callback, History
import psutil
import pandas as pd
from mlair.data_handler import KerasIterator
from mlair.model_modules.keras_extensions import CallbackHandler
from mlair.plotting.training_monitoring import PlotModelHistory, PlotModelLearningRate
from mlair.run_modules.run_environment import RunEnvironment
from mlair.configuration import path_config
from mlair.helpers import to_list
from mlair.helpers import to_list, tables
class Training(RunEnvironment):
@@ -141,7 +143,8 @@ class Training(RunEnvironment):
verbose=2,
validation_data=self.val_set,
validation_steps=len(self.val_set),
callbacks=self.callbacks.get_callbacks(as_dict=False))
callbacks=self.callbacks.get_callbacks(as_dict=False),
workers=psutil.cpu_count(logical=False))
else:
logging.info("Found locally stored model and checkpoints. Training is resumed from the last checkpoint.")
self.callbacks.load_callbacks()
@@ -156,7 +159,8 @@ class Training(RunEnvironment):
validation_data=self.val_set,
validation_steps=len(self.val_set),
callbacks=self.callbacks.get_callbacks(as_dict=False),
initial_epoch=initial_epoch)
initial_epoch=initial_epoch,
workers=psutil.cpu_count(logical=False))
history = hist
try:
lr = self.callbacks.get_callback_by_name("lr")
@@ -233,22 +237,26 @@ class Training(RunEnvironment):
PlotModelLearningRate(filename=os.path.join(path, f"{name}_history_learning_rate.pdf"), lr_sc=lr_sc)
def report_training(self):
# create training summary
data = {"mini batches": len(self.train_set),
"upsampling extremes": self.train_set.upsampling,
"shuffling": self.train_set.shuffle,
"created new model": self._create_new_model,
"epochs": self.epochs,
"batch size": self.batch_size}
import pandas as pd
df = pd.DataFrame.from_dict(data, orient="index", columns=["training setting"])
df.sort_index(inplace=True)
column_format = "ll"
path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
path_config.check_path_and_create(path)
df.to_latex(os.path.join(path, "training_settings.tex"), na_rep='---', column_format=column_format)
df.to_markdown(open(os.path.join(path, "training_settings.md"), mode="w", encoding='utf-8'),
tablefmt="github")
val_score = self.model.evaluate_generator(generator=self.val_set, use_multiprocessing=True, verbose=0, steps=1)
for index, item in enumerate(to_list(val_score)):
logging.info(f"{self.model.metrics_names[index]} (val), {item}")
# store as .tex and .md
tables.save_to_tex(path, "training_settings.tex", column_format="ll", df=df)
tables.save_to_md(path, "training_settings.md", df=df)
# calculate val scores
val_score = self.model.evaluate_generator(generator=self.val_set, use_multiprocessing=True, verbose=0)
path = self.data_store.get("model_path")
with open(os.path.join(path, "val_scores.txt"), "a") as f:
for index, item in enumerate(to_list(val_score)):
logging.info(f"{self.model.metrics_names[index]} (val), {item}")
f.write(f"{self.model.metrics_names[index]}, {item}\n")
Loading