Skip to content
Snippets Groups Projects
Commit 0a74a59c authored by leufen1's avatar leufen1
Browse files

add vanilla rnn

parent d591487b
No related branches found
No related tags found
5 merge requests!319add all changes of dev into release v1.4.0 branch,!318Resolve "release v1.4.0",!317enabled window_lead_time=1,!295Resolve "data handler FIR filter",!259Draft: Resolve "WRF-Datahandler should inherit from SingleStationDatahandler"
Pipeline #68468 passed with warnings
......@@ -67,7 +67,8 @@ class DataHandlerMixedSamplingSingleStation(DataHandlerSingleStation):
self.station_type, self.network, self.store_data_locally, self.data_origin,
self.start, self.end)
data = self.interpolate(data, dim=self.time_dim, method=self.interpolation_method[ind],
limit=self.interpolation_limit[ind])
limit=self.interpolation_limit[ind], sampling=self.sampling[ind])
return data
def set_inputs_and_targets(self):
......
__author__ = "Lukas Leufen"
__date__ = '2021-02-'
__date__ = '2021-02-18'
from functools import reduce, partial
......
__author__ = "Lukas Leufen"
__date__ = '2021-05-25'
from functools import reduce, partial
from mlair.model_modules import AbstractModelClass
from mlair.helpers import select_from_dict
from mlair.model_modules.loss import var_loss, custom_loss
import keras
class RNN(AbstractModelClass):
"""
"""
_activation = {"relu": keras.layers.ReLU, "tanh": partial(keras.layers.Activation, "tanh"),
"sigmoid": partial(keras.layers.Activation, "sigmoid"),
"linear": partial(keras.layers.Activation, "linear"),
"selu": partial(keras.layers.Activation, "selu"),
"prelu": partial(keras.layers.PReLU, alpha_initializer=keras.initializers.constant(value=0.25))}
_initializer = {"tanh": "glorot_uniform", "sigmoid": "glorot_uniform", "linear": "glorot_uniform",
"relu": keras.initializers.he_normal(), "selu": keras.initializers.lecun_normal(),
"prelu": keras.initializers.he_normal()}
_optimizer = {"adam": keras.optimizers.adam, "sgd": keras.optimizers.SGD}
_regularizer = {"l1": keras.regularizers.l1, "l2": keras.regularizers.l2, "l1_l2": keras.regularizers.l1_l2}
_requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov", "l1", "l2"]
_dropout = {"selu": keras.layers.AlphaDropout}
def __init__(self, input_shape: list, output_shape: list, activation="relu", activation_output="linear",
optimizer="adam", n_layer=1, n_hidden=10, regularizer=None, dropout=None, layer_configuration=None,
**kwargs):
"""
Sets model and loss depending on the given arguments.
:param input_shape: list of input shapes (expect len=1 with shape=(window_hist, station, variables))
:param output_shape: list of output shapes (expect len=1 with shape=(window_forecast))
"""
assert len(input_shape) == 1
assert len(output_shape) == 1
super().__init__(input_shape[0], output_shape[0])
# settings
# self.activation = self._set_activation(activation)
# self.activation_name = activation
self.activation_output = self._set_activation(activation_output)
self.activation_output_name = activation_output
self.optimizer = self._set_optimizer(optimizer, **kwargs)
# self.layer_configuration = (n_layer, n_hidden) if layer_configuration is None else layer_configuration
# self._update_model_name()
# self.kernel_initializer = self._initializer.get(activation, "glorot_uniform")
# self.kernel_regularizer = self._set_regularizer(regularizer, **kwargs)
self.dropout, self.dropout_rate = self._set_dropout(activation, dropout)
# apply to model
self.set_model()
self.set_compile_options()
self.set_custom_objects(loss=self.compile_options["loss"][0], var_loss=var_loss)
def set_model(self):
"""
Build the model.
"""
x_input = keras.layers.Input(shape=self._input_shape)
x_in = keras.layers.Reshape((self._input_shape[0], reduce((lambda x, y: x * y), self._input_shape[1:])))(
x_input)
x_in = keras.layers.LSTM(32, return_sequences=True)(x_in)
if self.dropout is not None:
x_in = self.dropout(self.dropout_rate)(x_in)
x_in = keras.layers.LSTM(8)(x_in)
if self.dropout is not None:
x_in = self.dropout(self.dropout_rate)(x_in)
out = keras.layers.Dense(self._output_shape)(x_in)
self.model = keras.Model(inputs=x_input, outputs=[out])
print(self.model.summary())
# x_input = keras.layers.Input(shape=self._input_shape)
# x_in = keras.layers.Reshape((self._input_shape[0], reduce((lambda x, y: x * y), self._input_shape[1:])))(
# x_input)
# x_in = keras.layers.LSTM(32)(x_in)
# x_in = keras.layers.RepeatVector(self._output_shape)(x_in)
# x_in = keras.layers.LSTM(32, return_sequences=True)(x_in)
# out = keras.layers.TimeDistributed(keras.layers.Dense(1))(x_in)
# out = keras.layers.Flatten()(out)
# self.model = keras.Model(inputs=x_input, outputs=[out])
# print(self.model.summary())
def _set_dropout(self, activation, dropout_rate):
if dropout_rate is None:
return None, None
assert 0 <= dropout_rate < 1
return self._dropout.get(activation, keras.layers.Dropout), dropout_rate
def _set_activation(self, activation):
try:
return self._activation.get(activation.lower())
except KeyError:
raise AttributeError(f"Given activation {activation} is not supported in this model class.")
def set_compile_options(self):
self.compile_options = {"loss": [custom_loss([keras.losses.mean_squared_error, var_loss])],
"metrics": ["mse", "mae", var_loss]}
def _set_optimizer(self, optimizer, **kwargs):
try:
opt_name = optimizer.lower()
opt = self._optimizer.get(opt_name)
opt_kwargs = {}
if opt_name == "adam":
opt_kwargs = select_from_dict(kwargs, ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad"])
elif opt_name == "sgd":
opt_kwargs = select_from_dict(kwargs, ["lr", "momentum", "decay", "nesterov"])
return opt(**opt_kwargs)
except KeyError:
raise AttributeError(f"Given optimizer {optimizer} is not supported in this model class.")
#
# def _set_regularizer(self, regularizer, **kwargs):
# if regularizer is None or (isinstance(regularizer, str) and regularizer.lower() == "none"):
# return None
# try:
# reg_name = regularizer.lower()
# reg = self._regularizer.get(reg_name)
# reg_kwargs = {}
# if reg_name in ["l1", "l2"]:
# reg_kwargs = select_from_dict(kwargs, reg_name, remove_none=True)
# if reg_name in reg_kwargs:
# reg_kwargs["l"] = reg_kwargs.pop(reg_name)
# elif reg_name == "l1_l2":
# reg_kwargs = select_from_dict(kwargs, ["l1", "l2"], remove_none=True)
# return reg(**reg_kwargs)
# except KeyError:
# raise AttributeError(f"Given regularizer {regularizer} is not supported in this model class.")
#
#
# def _update_model_name(self):
# n_input = str(reduce(lambda x, y: x * y, self._input_shape))
# n_output = str(self._output_shape)
# if isinstance(self.layer_configuration, tuple) and len(self.layer_configuration) == 2:
# n_layer, n_hidden = self.layer_configuration
# self.model_name += "_".join(["", n_input, *[f"{n_hidden}" for _ in range(n_layer)], n_output])
# else:
# self.model_name += "_".join(["", n_input, *[f"{n}" for n in self.layer_configuration], n_output])
#
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment