Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
MLAir
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
esde
machine-learning
MLAir
Commits
964e15f1
Commit
964e15f1
authored
Feb 16, 2022
by
lukas leufen
Browse files
Options
Downloads
Patches
Plain Diff
can now add any number of dense layer and use dropout
parent
a2db6580
No related branches found
No related tags found
6 merge requests
!430
update recent developments
,
!413
update release branch
,
!412
Resolve "release v2.0.0"
,
!390
Lukas issue362 feat branched rnn
,
!389
Lukas issue361 feat custom dense layers in rnn
,
!387
Resolve "custom dense layers in rnn"
Pipeline
#92219
failed
Feb 16, 2022
Stage: test
Stage: docs
Stage: pages
Stage: deploy
Changes
1
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
mlair/model_modules/recurrent_networks.py
+27
-4
27 additions, 4 deletions
mlair/model_modules/recurrent_networks.py
with
27 additions
and
4 deletions
mlair/model_modules/recurrent_networks.py
+
27
−
4
View file @
964e15f1
...
@@ -33,7 +33,7 @@ class RNN(AbstractModelClass): # pragma: no cover
...
@@ -33,7 +33,7 @@ class RNN(AbstractModelClass): # pragma: no cover
def
__init__
(
self
,
input_shape
:
list
,
output_shape
:
list
,
activation
=
"
relu
"
,
activation_output
=
"
linear
"
,
def
__init__
(
self
,
input_shape
:
list
,
output_shape
:
list
,
activation
=
"
relu
"
,
activation_output
=
"
linear
"
,
activation_rnn
=
"
tanh
"
,
dropout_rnn
=
0
,
activation_rnn
=
"
tanh
"
,
dropout_rnn
=
0
,
optimizer
=
"
adam
"
,
n_layer
=
1
,
n_hidden
=
10
,
regularizer
=
None
,
dropout
=
None
,
layer_configuration
=
None
,
optimizer
=
"
adam
"
,
n_layer
=
1
,
n_hidden
=
10
,
regularizer
=
None
,
dropout
=
None
,
layer_configuration
=
None
,
batch_normalization
=
False
,
rnn_type
=
"
lstm
"
,
add_dense_layer
=
False
,
**
kwargs
):
batch_normalization
=
False
,
rnn_type
=
"
lstm
"
,
add_dense_layer
=
False
,
dense_layer_configuration
=
None
,
**
kwargs
):
"""
"""
Sets model and loss depending on the given arguments.
Sets model and loss depending on the given arguments.
...
@@ -64,6 +64,15 @@ class RNN(AbstractModelClass): # pragma: no cover
...
@@ -64,6 +64,15 @@ class RNN(AbstractModelClass): # pragma: no cover
is added if set to false. (Default false)
is added if set to false. (Default false)
:param rnn_type: define which kind of recurrent network should be applied. Chose from either lstm or gru. All
:param rnn_type: define which kind of recurrent network should be applied. Chose from either lstm or gru. All
units will be of this kind. (Default lstm)
units will be of this kind. (Default lstm)
:param add_dense_layer: set True to use additional dense layers between last recurrent layer and output layer.
If no further specification is made on the exact dense_layer_configuration, a single layer as added with n
neurons where n is equal to min(n_previous_layer, n_output**2). If set to False, the output layer directly
follows after the last recurrent layer.
:param dense_layer_configuration: specify the number of dense layers and the number of neurons given as list
where each element corresponds to the number of neurons to add. The position / length of the list specifies
the number of layers to add. The last layer is followed by the output layer. In case a value is given for
the number of neurons that is less than the number of output neurons, the addition of dense layers is
stopped immediately.
"""
"""
assert
len
(
input_shape
)
==
1
assert
len
(
input_shape
)
==
1
...
@@ -80,6 +89,7 @@ class RNN(AbstractModelClass): # pragma: no cover
...
@@ -80,6 +89,7 @@ class RNN(AbstractModelClass): # pragma: no cover
self
.
optimizer
=
self
.
_set_optimizer
(
optimizer
.
lower
(),
**
kwargs
)
self
.
optimizer
=
self
.
_set_optimizer
(
optimizer
.
lower
(),
**
kwargs
)
self
.
bn
=
batch_normalization
self
.
bn
=
batch_normalization
self
.
add_dense_layer
=
add_dense_layer
self
.
add_dense_layer
=
add_dense_layer
self
.
dense_layer_configuration
=
dense_layer_configuration
or
[]
self
.
layer_configuration
=
(
n_layer
,
n_hidden
)
if
layer_configuration
is
None
else
layer_configuration
self
.
layer_configuration
=
(
n_layer
,
n_hidden
)
if
layer_configuration
is
None
else
layer_configuration
self
.
RNN
=
self
.
_rnn
.
get
(
rnn_type
.
lower
())
self
.
RNN
=
self
.
_rnn
.
get
(
rnn_type
.
lower
())
self
.
_update_model_name
(
rnn_type
)
self
.
_update_model_name
(
rnn_type
)
...
@@ -119,9 +129,22 @@ class RNN(AbstractModelClass): # pragma: no cover
...
@@ -119,9 +129,22 @@ class RNN(AbstractModelClass): # pragma: no cover
x_in
=
self
.
dropout
(
self
.
dropout_rate
)(
x_in
)
x_in
=
self
.
dropout
(
self
.
dropout_rate
)(
x_in
)
if
self
.
add_dense_layer
is
True
:
if
self
.
add_dense_layer
is
True
:
if
len
(
self
.
dense_layer_configuration
)
==
0
:
x_in
=
keras
.
layers
.
Dense
(
min
(
self
.
_output_shape
**
2
,
conf
[
-
1
]),
name
=
f
"
Dense_
{
len
(
conf
)
+
1
}
"
,
x_in
=
keras
.
layers
.
Dense
(
min
(
self
.
_output_shape
**
2
,
conf
[
-
1
]),
name
=
f
"
Dense_
{
len
(
conf
)
+
1
}
"
,
kernel_initializer
=
self
.
kernel_initializer
,
)(
x_in
)
kernel_initializer
=
self
.
kernel_initializer
,
)(
x_in
)
x_in
=
self
.
activation
(
name
=
f
"
{
self
.
activation_name
}
_
{
len
(
conf
)
+
1
}
"
)(
x_in
)
x_in
=
self
.
activation
(
name
=
f
"
{
self
.
activation_name
}
_
{
len
(
conf
)
+
1
}
"
)(
x_in
)
if
self
.
dropout
is
not
None
:
x_in
=
self
.
dropout
(
self
.
dropout_rate
)(
x_in
)
else
:
for
layer
,
n_hidden
in
enumerate
(
self
.
dense_layer_configuration
):
if
n_hidden
<
self
.
_output_shape
:
break
x_in
=
keras
.
layers
.
Dense
(
n_hidden
),
name
=
f
"
Dense_
{
layer
+
1
}
"
,
kernel_initializer
=
self
.
kernel_initializer
,
)(
x_in
)
x_in
=
self
.
activation
(
name
=
f
"
{
self
.
activation_name
}
_
{
layer
+
1
}
"
)(
x_in
)
if
self
.
dropout
is
not
None
:
x_in
=
self
.
dropout
(
self
.
dropout_rate
)(
x_in
)
x_in
=
keras
.
layers
.
Dense
(
self
.
_output_shape
)(
x_in
)
x_in
=
keras
.
layers
.
Dense
(
self
.
_output_shape
)(
x_in
)
out
=
self
.
activation_output
(
name
=
f
"
{
self
.
activation_output_name
}
_output
"
)(
x_in
)
out
=
self
.
activation_output
(
name
=
f
"
{
self
.
activation_output_name
}
_output
"
)(
x_in
)
self
.
model
=
keras
.
Model
(
inputs
=
x_input
,
outputs
=
[
out
])
self
.
model
=
keras
.
Model
(
inputs
=
x_input
,
outputs
=
[
out
])
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment