Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
MLAir
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
esde
machine-learning
MLAir
Commits
763ddcb0
Commit
763ddcb0
authored
5 years ago
by
lukas leufen
Browse files
Options
Downloads
Patches
Plain Diff
introduced model class and used in model_setup.py
parent
535d99e9
No related branches found
No related tags found
2 merge requests
!24
include recent development
,
!22
model class
Checking pipeline status
Changes
2
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
src/model_modules/model_class.py
+114
-0
114 additions, 0 deletions
src/model_modules/model_class.py
src/modules/model_setup.py
+3
-6
3 additions, 6 deletions
src/modules/model_setup.py
with
117 additions
and
6 deletions
src/model_modules/model_class.py
0 → 100644
+
114
−
0
View file @
763ddcb0
__author__
=
"
Lukas Leufen
"
__date__
=
'
2019-12-12
'
from
abc
import
ABC
from
typing
import
Any
,
Callable
import
keras
class
AbstractModelClass
(
ABC
):
"""
The AbstractModelClass provides a unified skeleton for any model provided to the machine learning workflow. The
model can always be accessed by calling ModelClass.model or directly by an model method without parsing the model
attribute name (e.g. ModelClass.model.compile -> ModelClass.compile). Beside the model, this class provides the
corresponding loss function.
"""
def
__init__
(
self
)
->
None
:
"""
Predefine internal attributes for model and loss.
"""
self
.
_model
=
None
self
.
_loss
=
None
def
__getattr__
(
self
,
name
:
str
)
->
Any
:
"""
Is called if __getattribute__ is not able to find requested attribute. Normally, the model class is saved into
a variable like `model = ModelClass()`. To bypass a call like `model.model` to access the _model attribute,
this method tries to search for the named attribute in the self.model namespace and returns this attribute if
available. Therefore, following expression is true: `ModelClass().compile == ModelClass().model.compile` as long
the called attribute/method is not part if the ModelClass itself.
:param name: name of the attribute or method to call
:return: attribute or method from self.model namespace
"""
return
self
.
model
.
__getattribute__
(
name
)
@property
def
model
(
self
)
->
keras
.
Model
:
"""
The model property containing a keras.Model instance.
:return: the keras model
"""
return
self
.
_model
@property
def
loss
(
self
)
->
Callable
:
"""
The loss property containing a callable loss function. The loss function can be any keras loss or a customised
function. If the loss is a customised function, it must contain the internal loss(y_true, y_pred) function:
def customised_loss(args):
def loss(y_true, y_pred):
return actual_function(y_true, y_pred, args)
return loss
:return: the loss function
"""
return
self
.
_loss
class
MyLittleModel
(
AbstractModelClass
):
"""
A customised model with a 1x1 Conv, and 4 Dense layers (64, 32, 16, window_lead_time), where the last layer is the
output layer depending on the window_lead_time parameter. Dropout is used between the Convolution and the first
Dense layer.
"""
def
__init__
(
self
,
activation
,
window_history_size
,
channels
,
regularizer
,
dropout_rate
,
window_lead_time
):
"""
Sets model and loss depending on the given arguments.
:param activation: activation function
:param window_history_size: number of historical time steps included in the input data
:param channels: number of variables used in input data
:param regularizer: <not used here>
:param dropout_rate: dropout rate used in the model [0, 1)
:param window_lead_time: number of time steps to forecast in the output layer
"""
super
().
__init__
()
self
.
set_model
(
activation
,
window_history_size
,
channels
,
dropout_rate
,
window_lead_time
)
self
.
set_loss
()
def
set_model
(
self
,
activation
,
window_history_size
,
channels
,
dropout_rate
,
window_lead_time
):
"""
Build the model.
:param activation: activation function
:param window_history_size: number of historical time steps included in the input data
:param channels: number of variables used in input data
:param dropout_rate: dropout rate used in the model [0, 1)
:param window_lead_time: number of time steps to forecast in the output layer
:return: built keras model
"""
X_input
=
keras
.
layers
.
Input
(
shape
=
(
window_history_size
+
1
,
1
,
channels
))
# add 1 to window_size to include current time step t0
X_in
=
keras
.
layers
.
Conv2D
(
32
,
(
1
,
1
),
padding
=
'
same
'
,
name
=
'
{}_Conv_1x1
'
.
format
(
"
major
"
))(
X_input
)
X_in
=
activation
(
name
=
'
{}_conv_act
'
.
format
(
"
major
"
))(
X_in
)
X_in
=
keras
.
layers
.
Flatten
(
name
=
'
{}
'
.
format
(
"
major
"
))(
X_in
)
X_in
=
keras
.
layers
.
Dropout
(
dropout_rate
,
name
=
'
{}_Dropout_1
'
.
format
(
"
major
"
))(
X_in
)
X_in
=
keras
.
layers
.
Dense
(
64
,
name
=
'
{}_Dense_64
'
.
format
(
"
major
"
))(
X_in
)
X_in
=
activation
()(
X_in
)
X_in
=
keras
.
layers
.
Dense
(
32
,
name
=
'
{}_Dense_32
'
.
format
(
"
major
"
))(
X_in
)
X_in
=
activation
()(
X_in
)
X_in
=
keras
.
layers
.
Dense
(
16
,
name
=
'
{}_Dense_16
'
.
format
(
"
major
"
))(
X_in
)
X_in
=
activation
()(
X_in
)
X_in
=
keras
.
layers
.
Dense
(
window_lead_time
,
name
=
'
{}_Dense
'
.
format
(
"
major
"
))(
X_in
)
out_main
=
activation
()(
X_in
)
self
.
_model
=
keras
.
Model
(
inputs
=
X_input
,
outputs
=
[
out_main
])
def
set_loss
(
self
):
"""
Set the loss
:return: loss function
"""
self
.
_loss
=
keras
.
losses
.
mean_squared_error
This diff is collapsed.
Click to expand it.
src/modules/model_setup.py
+
3
−
6
View file @
763ddcb0
...
...
@@ -15,6 +15,7 @@ from src.modules.run_environment import RunEnvironment
from
src.helpers
import
l_p_loss
,
LearningRateDecay
from
src.inception_model
import
InceptionModelBase
from
src.flatten
import
flatten_tail
from
src.model_modules.model_class
import
MyLittleModel
class
ModelSetup
(
RunEnvironment
):
...
...
@@ -53,7 +54,7 @@ class ModelSetup(RunEnvironment):
def
compile_model
(
self
):
optimizer
=
self
.
data_store
.
get
(
"
optimizer
"
,
self
.
scope
)
loss
=
self
.
data_store
.
get
(
"
loss
"
,
self
.
scope
)
loss
=
self
.
model
.
loss
self
.
model
.
compile
(
optimizer
=
optimizer
,
loss
=
loss
,
metrics
=
[
"
mse
"
,
"
mae
"
])
self
.
data_store
.
put
(
"
model
"
,
self
.
model
,
self
.
scope
)
...
...
@@ -71,7 +72,7 @@ class ModelSetup(RunEnvironment):
def
build_model
(
self
):
args_list
=
[
"
activation
"
,
"
window_history_size
"
,
"
channels
"
,
"
regularizer
"
,
"
dropout_rate
"
,
"
window_lead_time
"
]
args
=
self
.
data_store
.
create_args_dict
(
args_list
,
self
.
scope
)
self
.
model
=
my_l
ittle
_m
odel
(
**
args
)
self
.
model
=
MyL
ittle
M
odel
(
**
args
)
def
plot_model
(
self
):
# pragma: no cover
with
tf
.
device
(
"
/cpu:0
"
):
...
...
@@ -109,10 +110,6 @@ class ModelSetup(RunEnvironment):
activation
=
keras
.
layers
.
PReLU
# ELU #LeakyReLU keras.activations.tanh #
self
.
data_store
.
put
(
"
activation
"
,
activation
,
self
.
scope
)
# set los
loss_all
=
my_little_loss
()
self
.
data_store
.
put
(
"
loss
"
,
loss_all
,
self
.
scope
)
def
my_loss
():
loss
=
l_p_loss
(
4
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment