Skip to content
Snippets Groups Projects

Resolve "release v1.4.0"

Merged Ghost User requested to merge release_v1.4.0 into master
1 file
+ 10
4
Compare changes
  • Side-by-side
  • Inline
@@ -31,7 +31,7 @@ class RNN(AbstractModelClass):
@@ -31,7 +31,7 @@ class RNN(AbstractModelClass):
_rnn = {"lstm": keras.layers.LSTM, "gru": keras.layers.GRU}
_rnn = {"lstm": keras.layers.LSTM, "gru": keras.layers.GRU}
def __init__(self, input_shape: list, output_shape: list, activation="relu", activation_output="linear",
def __init__(self, input_shape: list, output_shape: list, activation="relu", activation_output="linear",
activation_rnn="tanh",
activation_rnn="tanh", dropout_rnn=0,
optimizer="adam", n_layer=1, n_hidden=10, regularizer=None, dropout=None, layer_configuration=None,
optimizer="adam", n_layer=1, n_hidden=10, regularizer=None, dropout=None, layer_configuration=None,
batch_normalization=False, rnn_type="lstm", add_dense_layer=False, **kwargs):
batch_normalization=False, rnn_type="lstm", add_dense_layer=False, **kwargs):
"""
"""
@@ -42,8 +42,10 @@ class RNN(AbstractModelClass):
@@ -42,8 +42,10 @@ class RNN(AbstractModelClass):
Customize this RNN model via the following parameters:
Customize this RNN model via the following parameters:
:param activation: set your desired activation function. Chose from relu, tanh, sigmoid, linear, selu, prelu,
:param activation: set your desired activation function for appended dense layers (add_dense_layer=True=. Choose
leakyrelu. (Default relu)
from relu, tanh, sigmoid, linear, selu, prelu, leakyrelu. (Default relu)
 
:param activation_rnn: set your desired activation function of the rnn output. Choose from relu, tanh, sigmoid,
 
linear, selu, prelu, leakyrelu. (Default tanh)
:param activation_output: same as activation parameter but exclusively applied on output layer only. (Default
:param activation_output: same as activation parameter but exclusively applied on output layer only. (Default
linear)
linear)
:param optimizer: set optimizer method. Can be either adam or sgd. (Default adam)
:param optimizer: set optimizer method. Can be either adam or sgd. (Default adam)
@@ -55,6 +57,8 @@ class RNN(AbstractModelClass):
@@ -55,6 +57,8 @@ class RNN(AbstractModelClass):
hidden layer. The number of hidden layers is equal to the total length of this list.
hidden layer. The number of hidden layers is equal to the total length of this list.
:param dropout: use dropout with given rate. If no value is provided, dropout layers are not added to the
:param dropout: use dropout with given rate. If no value is provided, dropout layers are not added to the
network at all. (Default None)
network at all. (Default None)
 
:param dropout_rnn: use recurrent dropout with given rate. This is applied along the recursion and not after
 
a rnn layer. (Default 0)
:param batch_normalization: use batch normalization layer in the network if enabled. These layers are inserted
:param batch_normalization: use batch normalization layer in the network if enabled. These layers are inserted
between the linear part of a layer (the nn part) and the non-linear part (activation function). No BN layer
between the linear part of a layer (the nn part) and the non-linear part (activation function). No BN layer
is added if set to false. (Default false)
is added if set to false. (Default false)
@@ -82,6 +86,8 @@ class RNN(AbstractModelClass):
@@ -82,6 +86,8 @@ class RNN(AbstractModelClass):
self.kernel_initializer = self._initializer.get(activation, "glorot_uniform")
self.kernel_initializer = self._initializer.get(activation, "glorot_uniform")
# self.kernel_regularizer = self._set_regularizer(regularizer, **kwargs)
# self.kernel_regularizer = self._set_regularizer(regularizer, **kwargs)
self.dropout, self.dropout_rate = self._set_dropout(activation, dropout)
self.dropout, self.dropout_rate = self._set_dropout(activation, dropout)
 
assert 0 <= dropout_rnn <= 1
 
self.dropout_rnn = dropout_rnn
# apply to model
# apply to model
self.set_model()
self.set_model()
@@ -105,7 +111,7 @@ class RNN(AbstractModelClass):
@@ -105,7 +111,7 @@ class RNN(AbstractModelClass):
for layer, n_hidden in enumerate(conf):
for layer, n_hidden in enumerate(conf):
return_sequences = (layer < len(conf) - 1)
return_sequences = (layer < len(conf) - 1)
x_in = self.RNN(n_hidden, return_sequences=return_sequences)(x_in)
x_in = self.RNN(n_hidden, return_sequences=return_sequences, recurrent_dropout=self.dropout_rnn)(x_in)
if self.bn is True:
if self.bn is True:
x_in = keras.layers.BatchNormalization()(x_in)
x_in = keras.layers.BatchNormalization()(x_in)
x_in = self.activation_rnn(name=f"{self.activation_rnn_name}_{layer + 1}")(x_in)
x_in = self.activation_rnn(name=f"{self.activation_rnn_name}_{layer + 1}")(x_in)
Loading