From 538a42c3bc7bcda76d08294deb258cbead47cbc4 Mon Sep 17 00:00:00 2001 From: lukas leufen <l.leufen@fz-juelich.de> Date: Tue, 28 Apr 2020 13:16:03 +0200 Subject: [PATCH] refac but now docs yet --- src/model_modules/flatten.py | 15 ++++++++++++++- src/model_modules/inception_model.py | 25 ++++++++++++++----------- 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/src/model_modules/flatten.py b/src/model_modules/flatten.py index bbe92472..2c692041 100644 --- a/src/model_modules/flatten.py +++ b/src/model_modules/flatten.py @@ -9,7 +9,20 @@ import keras def flatten_tail(input_X: keras.layers, name: str, bound_weight: bool = False, dropout_rate: float = 0.0, window_lead_time: int = 4, activation: Callable = keras.activations.relu, reduction_filter: int = 64, first_dense: int = 64): - + """ + Flatten output of + + :param input_X: + :param name: + :param bound_weight: + :param dropout_rate: + :param window_lead_time: + :param activation: + :param reduction_filter: + :param first_dense: + + :return: + """ X_in = keras.layers.Conv2D(reduction_filter, (1, 1), padding='same', name='{}_Conv_1x1'.format(name))(input_X) X_in = activation(name='{}_conv_act'.format(name))(X_in) diff --git a/src/model_modules/inception_model.py b/src/model_modules/inception_model.py index 6467b324..dbf644cf 100644 --- a/src/model_modules/inception_model.py +++ b/src/model_modules/inception_model.py @@ -5,7 +5,8 @@ import logging import keras import keras.layers as layers -from src.model_modules.advanced_paddings import PadUtils, ReflectionPadding2D, SymmetricPadding2D, Padding2D + +from src.model_modules.advanced_paddings import PadUtils, ReflectionPadding2D, Padding2D class InceptionModelBase: @@ -22,6 +23,7 @@ class InceptionModelBase: def block_part_name(self): """ Use unicode due to some issues of keras with normal strings + :return: """ return chr(self.ord_base + self.part_of_block) @@ -41,6 +43,7 @@ class InceptionModelBase: """ This function creates a "convolution tower block" containing a 1x1 convolution to reduce filter size followed by convolution with given filter and kernel size + :param input_x: Input to network part :param reduction_filter: Number of filters used in 1x1 convolution to reduce overall filter size before conv. :param tower_filter: Number of filters for n x m convolution @@ -79,8 +82,8 @@ class InceptionModelBase: # name=f'Block_{self.number_of_blocks}{self.block_part_name()}_Pad' # )(tower) tower = Padding2D(padding)(padding=padding_size, - name=f'Block_{self.number_of_blocks}{self.block_part_name()}_Pad' - )(tower) + name=f'Block_{self.number_of_blocks}{self.block_part_name()}_Pad' + )(tower) tower = layers.Conv2D(tower_filter, tower_kernel, @@ -137,6 +140,7 @@ class InceptionModelBase: def create_pool_tower(self, input_x, pool_kernel, tower_filter, activation='relu', max_pooling=True, **kwargs): """ This function creates a "MaxPooling tower block" + :param input_x: Input to network part :param pool_kernel: size of pooling kernel :param tower_filter: Number of filters used in 1x1 convolution to reduce filter size @@ -160,11 +164,11 @@ class InceptionModelBase: pooling = layers.AveragePooling2D # tower = self.padding_layer(padding)(padding=padding_size, name=block_name+'Pad')(input_x) - tower = Padding2D(padding)(padding=padding_size, name=block_name+'Pad')(input_x) - tower = pooling(pool_kernel, strides=(1, 1), padding='valid', name=block_name+block_type)(tower) + tower = Padding2D(padding)(padding=padding_size, name=block_name + 'Pad')(input_x) + tower = pooling(pool_kernel, strides=(1, 1), padding='valid', name=block_name + block_type)(tower) # convolution block - tower = layers.Conv2D(tower_filter, (1, 1), padding='valid', name=block_name+"1x1")(tower) + tower = layers.Conv2D(tower_filter, (1, 1), padding='valid', name=block_name + "1x1")(tower) tower = self.act(tower, activation, **act_settings) return tower @@ -172,6 +176,7 @@ class InceptionModelBase: def inception_block(self, input_x, tower_conv_parts, tower_pool_parts, **kwargs): """ Crate a inception block + :param input_x: Input to block :param tower_conv_parts: dict containing settings for parts of inception block; Example: tower_conv_parts = {'tower_1': {'reduction_filter': 32, @@ -211,7 +216,7 @@ class InceptionModelBase: tower_build['avgpool'] = self.create_pool_tower(input_x, **tower_pool_parts, **kwargs, max_pooling=False) block = keras.layers.concatenate(list(tower_build.values()), axis=3, - name=block_name+"_Co") + name=block_name + "_Co") return block @@ -258,7 +263,7 @@ if __name__ == '__main__': conv_settings_dict = {'tower_1': {'reduction_filter': 64, 'tower_filter': 64, 'tower_kernel': (3, 3), - 'activation': LeakyReLU,}, + 'activation': LeakyReLU, }, 'tower_2': {'reduction_filter': 64, 'tower_filter': 64, 'tower_kernel': (5, 5), @@ -295,12 +300,10 @@ if __name__ == '__main__': # compile epochs = 1 lrate = 0.01 - decay = lrate/epochs + decay = lrate / epochs sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(X_train.shape) keras.utils.plot_model(model, to_file='model.pdf', show_shapes=True, show_layer_names=True) # model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test)) print('test') - - -- GitLab