Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
MLAir
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
esde
machine-learning
MLAir
Commits
259e00b3
Commit
259e00b3
authored
3 years ago
by
leufen1
Browse files
Options
Downloads
Patches
Plain Diff
new class CNNfromConfig to be able to configure CNN in much more detail
parent
7ee698ae
No related branches found
No related tags found
5 merge requests
!430
update recent developments
,
!413
update release branch
,
!412
Resolve "release v2.0.0"
,
!406
Lukas issue368 feat prepare cnn class for filter benchmarking
,
!403
Resolve "prepare CNN class for filter benchmarking"
Pipeline
#93782
passed
3 years ago
Stage: test
Stage: docs
Stage: pages
Stage: deploy
Changes
1
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
mlair/model_modules/convolutional_networks.py
+118
-0
118 additions, 0 deletions
mlair/model_modules/convolutional_networks.py
with
118 additions
and
0 deletions
mlair/model_modules/convolutional_networks.py
+
118
−
0
View file @
259e00b3
...
@@ -11,6 +11,124 @@ from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, Symmetric
...
@@ -11,6 +11,124 @@ from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, Symmetric
import
tensorflow.keras
as
keras
import
tensorflow.keras
as
keras
class
CNNfromConfig
(
AbstractModelClass
):
_activation
=
{
"
relu
"
:
keras
.
layers
.
ReLU
,
"
tanh
"
:
partial
(
keras
.
layers
.
Activation
,
"
tanh
"
),
"
sigmoid
"
:
partial
(
keras
.
layers
.
Activation
,
"
sigmoid
"
),
"
linear
"
:
partial
(
keras
.
layers
.
Activation
,
"
linear
"
),
"
prelu
"
:
partial
(
keras
.
layers
.
PReLU
,
alpha_initializer
=
keras
.
initializers
.
constant
(
value
=
0.25
)),
"
leakyrelu
"
:
partial
(
keras
.
layers
.
LeakyReLU
)}
_initializer
=
{
"
tanh
"
:
"
glorot_uniform
"
,
"
sigmoid
"
:
"
glorot_uniform
"
,
"
linear
"
:
"
glorot_uniform
"
,
"
relu
"
:
keras
.
initializers
.
he_normal
(),
"
selu
"
:
keras
.
initializers
.
lecun_normal
(),
"
prelu
"
:
keras
.
initializers
.
he_normal
()}
_optimizer
=
{
"
adam
"
:
keras
.
optimizers
.
Adam
,
"
sgd
"
:
keras
.
optimizers
.
SGD
}
_regularizer
=
{
"
l1
"
:
keras
.
regularizers
.
l1
,
"
l2
"
:
keras
.
regularizers
.
l2
,
"
l1_l2
"
:
keras
.
regularizers
.
l1_l2
}
_requirements
=
[
"
lr
"
,
"
beta_1
"
,
"
beta_2
"
,
"
epsilon
"
,
"
decay
"
,
"
amsgrad
"
,
"
momentum
"
,
"
nesterov
"
,
"
l1
"
,
"
l2
"
]
"""
Use this class like the following. Note that all keys must match the corresponding tf/keras keys of the layer
```python
input_shape = [(65,1,9)]
output_shape = [(4, )]
layer_configuration=[
{
"
type
"
:
"
Conv2D
"
,
"
activation
"
:
"
relu
"
,
"
kernel_size
"
: (1, 1),
"
filters
"
: 8},
{
"
type
"
:
"
Dropout
"
,
"
rate
"
: 0.2},
{
"
type
"
:
"
Conv2D
"
,
"
activation
"
:
"
relu
"
,
"
kernel_size
"
: (5, 1),
"
filters
"
: 16},
{
"
type
"
:
"
Dropout
"
,
"
rate
"
: 0.2},
{
"
type
"
:
"
MaxPooling2D
"
,
"
pool_size
"
: (8, 1),
"
strides
"
: (1, 1)},
{
"
type
"
:
"
Conv2D
"
,
"
activation
"
:
"
relu
"
,
"
kernel_size
"
: (1, 1),
"
filters
"
: 16},
{
"
type
"
:
"
Dropout
"
,
"
rate
"
: 0.2},
{
"
type
"
:
"
Conv2D
"
,
"
activation
"
:
"
relu
"
,
"
kernel_size
"
: (5, 1),
"
filters
"
: 32},
{
"
type
"
:
"
Dropout
"
,
"
rate
"
: 0.2},
{
"
type
"
:
"
MaxPooling2D
"
,
"
pool_size
"
: (8, 1),
"
strides
"
: (1, 1)},
{
"
type
"
:
"
Conv2D
"
,
"
activation
"
:
"
relu
"
,
"
kernel_size
"
: (1, 1),
"
filters
"
: 32},
{
"
type
"
:
"
Dropout
"
,
"
rate
"
: 0.2},
{
"
type
"
:
"
Conv2D
"
,
"
activation
"
:
"
relu
"
,
"
kernel_size
"
: (5, 1),
"
filters
"
: 64},
{
"
type
"
:
"
Dropout
"
,
"
rate
"
: 0.2},
{
"
type
"
:
"
MaxPooling2D
"
,
"
pool_size
"
: (8, 1),
"
strides
"
: (1, 1)},
{
"
type
"
:
"
Conv2D
"
,
"
activation
"
:
"
relu
"
,
"
kernel_size
"
: (1, 1),
"
filters
"
: 64},
{
"
type
"
:
"
Dropout
"
,
"
rate
"
: 0.2},
{
"
type
"
:
"
Flatten
"
},
# {
"
type
"
:
"
Dense
"
,
"
units
"
: 128,
"
activation
"
:
"
relu
"
}
]
model = CNNfromConfig(input_shape, output_shape, layer_configuration)
```
"""
def
__init__
(
self
,
input_shape
:
list
,
output_shape
:
list
,
layer_configuration
:
list
,
**
kwargs
):
assert
len
(
input_shape
)
==
1
assert
len
(
output_shape
)
==
1
super
().
__init__
(
input_shape
[
0
],
output_shape
[
0
])
self
.
conf
=
layer_configuration
activation_output
=
kwargs
.
pop
(
"
activation_output
"
,
"
linear
"
)
self
.
activation_output
=
self
.
_activation
.
get
(
activation_output
)
self
.
activation_output_name
=
activation_output
self
.
kwargs
=
kwargs
# apply to model
self
.
set_model
()
self
.
set_compile_options
()
self
.
set_custom_objects
(
loss
=
custom_loss
([
keras
.
losses
.
mean_squared_error
,
var_loss
]),
var_loss
=
var_loss
)
def
set_model
(
self
):
x_input
=
keras
.
layers
.
Input
(
shape
=
self
.
_input_shape
)
x_in
=
x_input
for
layer_opts
in
self
.
conf
:
print
(
layer_opts
)
layer
,
layer_kwargs
,
follow_up_layer
=
self
.
_extract_layer_conf
(
layer_opts
)
x_in
=
layer
(
**
layer_kwargs
)(
x_in
)
if
follow_up_layer
is
not
None
:
x_in
=
follow_up_layer
()(
x_in
)
x_in
=
keras
.
layers
.
Dense
(
self
.
_output_shape
)(
x_in
)
out
=
self
.
activation_output
(
name
=
f
"
{
self
.
activation_output_name
}
_output
"
)(
x_in
)
self
.
model
=
keras
.
Model
(
inputs
=
x_input
,
outputs
=
[
out
])
print
(
self
.
model
.
summary
())
def
_set_regularizer
(
self
,
regularizer
,
**
kwargs
):
if
regularizer
is
None
or
(
isinstance
(
regularizer
,
str
)
and
regularizer
.
lower
()
==
"
none
"
):
return
None
try
:
reg_name
=
regularizer
.
lower
()
reg
=
self
.
_regularizer
.
get
(
reg_name
)
reg_kwargs
=
{}
if
reg_name
in
[
"
l1
"
,
"
l2
"
]:
reg_kwargs
=
select_from_dict
(
kwargs
,
reg_name
,
remove_none
=
True
)
if
reg_name
in
reg_kwargs
:
reg_kwargs
[
"
l
"
]
=
reg_kwargs
.
pop
(
reg_name
)
elif
reg_name
==
"
l1_l2
"
:
reg_kwargs
=
select_from_dict
(
kwargs
,
[
"
l1
"
,
"
l2
"
],
remove_none
=
True
)
return
reg
(
**
reg_kwargs
)
except
KeyError
:
raise
AttributeError
(
f
"
Given regularizer
{
regularizer
}
is not supported in this model class.
"
)
def
set_compile_options
(
self
):
# self.compile_options = {"loss": [custom_loss([keras.losses.mean_squared_error, var_loss])],
# "metrics": ["mse", "mae", var_loss]}
self
.
compile_options
=
{
"
loss
"
:
[
keras
.
losses
.
mean_squared_error
],
"
metrics
"
:
[
"
mse
"
,
"
mae
"
,
var_loss
]}
def
_extract_layer_conf
(
self
,
layer_opts
):
follow_up_layer
=
None
layer_type
=
layer_opts
.
pop
(
"
type
"
)
layer
=
getattr
(
keras
.
layers
,
layer_type
,
None
)
activation_type
=
layer_opts
.
pop
(
"
activation
"
,
None
)
if
activation_type
is
not
None
:
activation
=
self
.
_activation
.
get
(
activation_type
)
kernel_initializer
=
self
.
_initializer
.
get
(
activation_type
,
"
glorot_uniform
"
)
layer_opts
[
"
kernel_initializer
"
]
=
kernel_initializer
follow_up_layer
=
activation
regularizer_type
=
layer_opts
.
pop
(
"
kernel_regularizer
"
,
None
)
if
regularizer_type
is
not
None
:
layer_opts
[
"
kernel_regularizer
"
]
=
self
.
_set_regularizer
(
regularizer_type
,
**
self
.
kwargs
)
return
layer
,
layer_opts
,
follow_up_layer
class
CNN
(
AbstractModelClass
):
# pragma: no cover
class
CNN
(
AbstractModelClass
):
# pragma: no cover
_activation
=
{
"
relu
"
:
keras
.
layers
.
ReLU
,
"
tanh
"
:
partial
(
keras
.
layers
.
Activation
,
"
tanh
"
),
_activation
=
{
"
relu
"
:
keras
.
layers
.
ReLU
,
"
tanh
"
:
partial
(
keras
.
layers
.
Activation
,
"
tanh
"
),
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment