Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
MLAir
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
esde
machine-learning
MLAir
Commits
538a42c3
Commit
538a42c3
authored
5 years ago
by
lukas leufen
Browse files
Options
Downloads
Patches
Plain Diff
refac but now docs yet
parent
7af779f2
No related branches found
No related tags found
3 merge requests
!125
Release v0.10.0
,
!124
Update Master to new version v0.10.0
,
!91
WIP: Resolve "create sphinx docu"
Pipeline
#35421
passed
5 years ago
Stage: test
Stage: docs
Stage: pages
Stage: deploy
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
src/model_modules/flatten.py
+14
-1
14 additions, 1 deletion
src/model_modules/flatten.py
src/model_modules/inception_model.py
+14
-11
14 additions, 11 deletions
src/model_modules/inception_model.py
with
28 additions
and
12 deletions
src/model_modules/flatten.py
+
14
−
1
View file @
538a42c3
...
...
@@ -9,7 +9,20 @@ import keras
def
flatten_tail
(
input_X
:
keras
.
layers
,
name
:
str
,
bound_weight
:
bool
=
False
,
dropout_rate
:
float
=
0.0
,
window_lead_time
:
int
=
4
,
activation
:
Callable
=
keras
.
activations
.
relu
,
reduction_filter
:
int
=
64
,
first_dense
:
int
=
64
):
"""
Flatten output of
:param input_X:
:param name:
:param bound_weight:
:param dropout_rate:
:param window_lead_time:
:param activation:
:param reduction_filter:
:param first_dense:
:return:
"""
X_in
=
keras
.
layers
.
Conv2D
(
reduction_filter
,
(
1
,
1
),
padding
=
'
same
'
,
name
=
'
{}_Conv_1x1
'
.
format
(
name
))(
input_X
)
X_in
=
activation
(
name
=
'
{}_conv_act
'
.
format
(
name
))(
X_in
)
...
...
This diff is collapsed.
Click to expand it.
src/model_modules/inception_model.py
+
14
−
11
View file @
538a42c3
...
...
@@ -5,7 +5,8 @@ import logging
import
keras
import
keras.layers
as
layers
from
src.model_modules.advanced_paddings
import
PadUtils
,
ReflectionPadding2D
,
SymmetricPadding2D
,
Padding2D
from
src.model_modules.advanced_paddings
import
PadUtils
,
ReflectionPadding2D
,
Padding2D
class
InceptionModelBase
:
...
...
@@ -22,6 +23,7 @@ class InceptionModelBase:
def
block_part_name
(
self
):
"""
Use unicode due to some issues of keras with normal strings
:return:
"""
return
chr
(
self
.
ord_base
+
self
.
part_of_block
)
...
...
@@ -41,6 +43,7 @@ class InceptionModelBase:
"""
This function creates a
"
convolution tower block
"
containing a 1x1 convolution to reduce filter size followed by
convolution with given filter and kernel size
:param input_x: Input to network part
:param reduction_filter: Number of filters used in 1x1 convolution to reduce overall filter size before conv.
:param tower_filter: Number of filters for n x m convolution
...
...
@@ -79,8 +82,8 @@ class InceptionModelBase:
# name=f'Block_{self.number_of_blocks}{self.block_part_name()}_Pad'
# )(tower)
tower
=
Padding2D
(
padding
)(
padding
=
padding_size
,
name
=
f
'
Block_
{
self
.
number_of_blocks
}{
self
.
block_part_name
()
}
_Pad
'
)(
tower
)
name
=
f
'
Block_
{
self
.
number_of_blocks
}{
self
.
block_part_name
()
}
_Pad
'
)(
tower
)
tower
=
layers
.
Conv2D
(
tower_filter
,
tower_kernel
,
...
...
@@ -137,6 +140,7 @@ class InceptionModelBase:
def
create_pool_tower
(
self
,
input_x
,
pool_kernel
,
tower_filter
,
activation
=
'
relu
'
,
max_pooling
=
True
,
**
kwargs
):
"""
This function creates a
"
MaxPooling tower block
"
:param input_x: Input to network part
:param pool_kernel: size of pooling kernel
:param tower_filter: Number of filters used in 1x1 convolution to reduce filter size
...
...
@@ -160,11 +164,11 @@ class InceptionModelBase:
pooling
=
layers
.
AveragePooling2D
# tower = self.padding_layer(padding)(padding=padding_size, name=block_name+'Pad')(input_x)
tower
=
Padding2D
(
padding
)(
padding
=
padding_size
,
name
=
block_name
+
'
Pad
'
)(
input_x
)
tower
=
pooling
(
pool_kernel
,
strides
=
(
1
,
1
),
padding
=
'
valid
'
,
name
=
block_name
+
block_type
)(
tower
)
tower
=
Padding2D
(
padding
)(
padding
=
padding_size
,
name
=
block_name
+
'
Pad
'
)(
input_x
)
tower
=
pooling
(
pool_kernel
,
strides
=
(
1
,
1
),
padding
=
'
valid
'
,
name
=
block_name
+
block_type
)(
tower
)
# convolution block
tower
=
layers
.
Conv2D
(
tower_filter
,
(
1
,
1
),
padding
=
'
valid
'
,
name
=
block_name
+
"
1x1
"
)(
tower
)
tower
=
layers
.
Conv2D
(
tower_filter
,
(
1
,
1
),
padding
=
'
valid
'
,
name
=
block_name
+
"
1x1
"
)(
tower
)
tower
=
self
.
act
(
tower
,
activation
,
**
act_settings
)
return
tower
...
...
@@ -172,6 +176,7 @@ class InceptionModelBase:
def
inception_block
(
self
,
input_x
,
tower_conv_parts
,
tower_pool_parts
,
**
kwargs
):
"""
Crate a inception block
:param input_x: Input to block
:param tower_conv_parts: dict containing settings for parts of inception block; Example:
tower_conv_parts = {
'
tower_1
'
: {
'
reduction_filter
'
: 32,
...
...
@@ -211,7 +216,7 @@ class InceptionModelBase:
tower_build
[
'
avgpool
'
]
=
self
.
create_pool_tower
(
input_x
,
**
tower_pool_parts
,
**
kwargs
,
max_pooling
=
False
)
block
=
keras
.
layers
.
concatenate
(
list
(
tower_build
.
values
()),
axis
=
3
,
name
=
block_name
+
"
_Co
"
)
name
=
block_name
+
"
_Co
"
)
return
block
...
...
@@ -258,7 +263,7 @@ if __name__ == '__main__':
conv_settings_dict
=
{
'
tower_1
'
:
{
'
reduction_filter
'
:
64
,
'
tower_filter
'
:
64
,
'
tower_kernel
'
:
(
3
,
3
),
'
activation
'
:
LeakyReLU
,},
'
activation
'
:
LeakyReLU
,
},
'
tower_2
'
:
{
'
reduction_filter
'
:
64
,
'
tower_filter
'
:
64
,
'
tower_kernel
'
:
(
5
,
5
),
...
...
@@ -295,12 +300,10 @@ if __name__ == '__main__':
# compile
epochs
=
1
lrate
=
0.01
decay
=
lrate
/
epochs
decay
=
lrate
/
epochs
sgd
=
SGD
(
lr
=
lrate
,
momentum
=
0.9
,
decay
=
decay
,
nesterov
=
False
)
model
.
compile
(
loss
=
'
categorical_crossentropy
'
,
optimizer
=
sgd
,
metrics
=
[
'
accuracy
'
])
print
(
X_train
.
shape
)
keras
.
utils
.
plot_model
(
model
,
to_file
=
'
model.pdf
'
,
show_shapes
=
True
,
show_layer_names
=
True
)
# model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test))
print
(
'
test
'
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment