Skip to content
Snippets Groups Projects
Commit 7a40c660 authored by lukas leufen's avatar lukas leufen
Browse files

Merge branch 'lukas_issue284_feat_create-fcn-model-class' into 'develop'

new var loss and custom loss methods

See merge request toar/mlair!265
parents 31e10e8f f033d85f
No related branches found
No related tags found
6 merge requests!319add all changes of dev into release v1.4.0 branch,!318Resolve "release v1.4.0",!283Merge latest develop into falcos issue,!278Felix issue295 transformation parameters in data handler,!265new var loss and custom loss methods,!259Draft: Resolve "WRF-Datahandler should inherit from SingleStationDatahandler"
Pipeline #62217 passed
This commit is part of merge request !318. Comments created here will be created in the context of that merge request.
......@@ -5,6 +5,7 @@ from functools import reduce, partial
from mlair.model_modules import AbstractModelClass
from mlair.helpers import select_from_dict
from mlair.model_modules.loss import var_loss, custom_loss
import keras
......@@ -64,7 +65,9 @@ class FCN(AbstractModelClass):
_activation = {"relu": keras.layers.ReLU, "tanh": partial(keras.layers.Activation, "tanh"),
"sigmoid": partial(keras.layers.Activation, "sigmoid"),
"linear": partial(keras.layers.Activation, "linear")}
"linear": partial(keras.layers.Activation, "linear"),
"selu": partial(keras.layers.Activation, "selu")}
_initializer = {"selu": keras.initializers.lecun_normal()}
_optimizer = {"adam": keras.optimizers.adam, "sgd": keras.optimizers.SGD}
_requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov"]
......@@ -87,6 +90,7 @@ class FCN(AbstractModelClass):
self.optimizer = self._set_optimizer(optimizer, **kwargs)
self.layer_configuration = (n_layer, n_hidden)
self._update_model_name()
self.kernel_initializer = self._initializer.get(activation, "glorot_uniform")
# apply to model
self.set_model()
......@@ -126,11 +130,12 @@ class FCN(AbstractModelClass):
x_in = keras.layers.Flatten()(x_input)
n_layer, n_hidden = self.layer_configuration
for layer in range(n_layer):
x_in = keras.layers.Dense(n_hidden)(x_in)
x_in = keras.layers.Dense(n_hidden, kernel_initializer=self.kernel_initializer)(x_in)
x_in = self.activation()(x_in)
x_in = keras.layers.Dense(self._output_shape)(x_in)
out = self.activation_output()(x_in)
self.model = keras.Model(inputs=x_input, outputs=[out])
def set_compile_options(self):
self.compile_options = {"loss": [keras.losses.mean_squared_error], "metrics": ["mse", "mae"]}
self.compile_options = {"loss": [custom_loss([keras.losses.mean_squared_error, var_loss])],
"metrics": ["mse", "mae", var_loss]}
......@@ -20,3 +20,21 @@ def l_p_loss(power: int) -> Callable:
return K.mean(K.pow(K.abs(y_pred - y_true), power), axis=-1)
return loss
def var_loss(y_true, y_pred) -> Callable:
return K.mean(K.square(K.var(y_true) - K.var(y_pred)))
def custom_loss(loss_list, loss_weights=None) -> Callable:
n = len(loss_list)
if loss_weights is None:
loss_weights = [1. / n for _ in range(n)]
else:
assert len(loss_weights) == n
loss_weights = [w / sum(loss_weights) for w in loss_weights]
def loss(y_true, y_pred):
return sum([loss_weights[i] * loss_list[i](y_true, y_pred) for i in range(n)])
return loss
import keras
import numpy as np
from mlair.model_modules.loss import l_p_loss
from mlair.model_modules.loss import l_p_loss, var_loss, custom_loss
import pytest
class TestLoss:
class TestLPLoss:
def test_l_p_loss(self):
model = keras.Sequential()
......@@ -15,3 +17,41 @@ class TestLoss:
model.compile(optimizer=keras.optimizers.Adam(), loss=l_p_loss(3))
hist = model.fit(np.array([1, 0, -2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
assert hist.history['loss'][0] == 2.25
class TestVarLoss:
def test_var_loss(self):
model = keras.Sequential()
model.add(keras.layers.Lambda(lambda x: x, input_shape=(None,)))
model.compile(optimizer=keras.optimizers.Adam(), loss=var_loss)
hist = model.fit(np.array([1, 0, 2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
assert hist.history['loss'][0] == 0.140625
class TestCustomLoss:
def test_custom_loss_no_weights(self):
cust_loss = custom_loss([l_p_loss(2), var_loss])
model = keras.Sequential()
model.add(keras.layers.Lambda(lambda x: x, input_shape=(None,)))
model.compile(optimizer=keras.optimizers.Adam(), loss=cust_loss)
hist = model.fit(np.array([1, 0, 2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
assert hist.history['loss'][0] == (0.5 * 0.140625 + 0.5 * 1.25)
@pytest.mark.parametrize("weights", [[0.3, 0.7], [0.5, 0.5], [1, 1], [4, 1]])
def test_custom_loss_with_weights(self, weights):
cust_loss = custom_loss([l_p_loss(2), var_loss], weights)
model = keras.Sequential()
model.add(keras.layers.Lambda(lambda x: x, input_shape=(None,)))
model.compile(optimizer=keras.optimizers.Adam(), loss=cust_loss)
hist = model.fit(np.array([1, 0, 2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
weights_adjusted = list(map(lambda x: x / sum(weights), weights))
expected = (weights_adjusted[0] * 1.25 + weights_adjusted[1] * 0.140625)
assert np.testing.assert_almost_equal(hist.history['loss'][0], expected, decimal=6) is None
def test_custom_loss_invalid_weights(self):
with pytest.raises(AssertionError):
custom_loss([l_p_loss(2), var_loss], [0.3])
with pytest.raises(AssertionError):
custom_loss([l_p_loss(2), var_loss], [0.4, 3, 1])
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment