Commit b61cf8f3 authored by mova's avatar mova
Browse files

dont monitor the validation in debug mode

parent 45bea717
......@@ -29,12 +29,13 @@ def validate(train_state: TrainState) -> None:
logger.info(f"Validation Loss: {mean_loss}")
train_state.state.val_losses.append(float(mean_loss))
train_state.writer.add_scalar(
"val_loss", mean_loss, train_state.state["grad_step"]
)
train_state.experiment.log_metric(
"val_loss", mean_loss, train_state.state["grad_step"]
)
if not conf.debug:
train_state.writer.add_scalar(
"val_loss", mean_loss, train_state.state["grad_step"]
)
train_state.experiment.log_metric(
"val_loss", mean_loss, train_state.state["grad_step"]
)
mean_loss = float(mean_loss)
if (
......@@ -42,17 +43,19 @@ def validate(train_state: TrainState) -> None:
or train_state.state.min_val_loss > mean_loss
):
train_state.state.min_val_loss = mean_loss
train_state.experiment.log_metric("min_val_loss", mean_loss)
train_state.state.best_grad_step = train_state.state["grad_step"]
train_state.experiment.log_metric(
"best_grad_step", train_state.state["grad_step"]
)
train_state.experiment.log_metric(
"best_grad_epoch", train_state.state["epoch"]
)
train_state.experiment.log_metric(
"best_grad_batch", train_state.state["ibatch"]
)
train_state.holder.best_model_state = deepcopy(
train_state.holder.model.state_dict()
)
if not conf.debug:
train_state.experiment.log_metric("min_val_loss", mean_loss)
train_state.experiment.log_metric(
"best_grad_step", train_state.state["grad_step"]
)
train_state.experiment.log_metric(
"best_grad_epoch", train_state.state["epoch"]
)
train_state.experiment.log_metric(
"best_grad_batch", train_state.state["ibatch"]
)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment