diff --git a/mlair/model_modules/model_class.py b/mlair/model_modules/model_class.py index 04293c8f5c0ecf73e7d51fe6ae97e8ff7db96252..96cfdccf9e29f0b4cf780432f2a14a4406a60580 100644 --- a/mlair/model_modules/model_class.py +++ b/mlair/model_modules/model_class.py @@ -121,8 +121,6 @@ __author__ = "Lukas Leufen, Felix Kleinert" __date__ = '2020-05-12' import tensorflow.keras as keras -import tensorflow as tf -import numpy as np from mlair.model_modules import AbstractModelClass from mlair.model_modules.inception_model import InceptionModelBase @@ -598,9 +596,9 @@ class MyUnet(AbstractModelClass): def __init__(self, input_shape: list, output_shape: list): super().__init__(input_shape[0], output_shape[0]) - self.filters = 32 - self.lstm_units = 64 - self.kernel_size = (3,1)# (3,1) + self.first_filter_size = self._input_shape[-1] # 16 + self.lstm_units = 64 * 2 + self.kernel_size = (3, 1) # (3,1) self.activation = "elu" self.pool_size = (2, 1) @@ -612,8 +610,6 @@ class MyUnet(AbstractModelClass): self.dense_units = 32*2 self.initial_lr = 0.001 - print(f"tf.__version__ = {tf.__version__}") - print(f"np.__version__ = {np.__version__}") # apply to model self.set_model() @@ -625,12 +621,12 @@ class MyUnet(AbstractModelClass): pad_size = PadUtils.get_padding_for_same(self.kernel_size) c1 = Padding2D("SymPad2D")(padding=pad_size)(input_train) - c1 = keras.layers.Conv2D(16, self.kernel_size, activation=self.activation, + c1 = keras.layers.Conv2D(self.first_filter_size, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c1) c1 = keras.layers.Dropout(0.1)(c1) c1 = Padding2D("SymPad2D")(padding=pad_size)(c1) - c1 = keras.layers.Conv2D(16, self.kernel_size, activation=self.activation, + c1 = keras.layers.Conv2D(self.first_filter_size, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, name='c1', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c1) @@ -638,12 +634,12 @@ class MyUnet(AbstractModelClass): # p1 = keras.layers.MaxPooling2D(self.pool_size)(c1) c2 = Padding2D("SymPad2D")(padding=pad_size)(p1) - c2 = keras.layers.Conv2D(32, self.kernel_size, activation=self.activation, + c2 = keras.layers.Conv2D(self.first_filter_size * 2, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c2) c2 = keras.layers.Dropout(0.1)(c2) c2 = Padding2D("SymPad2D")(padding=pad_size)(c2) - c2 = keras.layers.Conv2D(32, self.kernel_size, activation=self.activation, + c2 = keras.layers.Conv2D(self.first_filter_size * 2, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, name='c2', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c2) @@ -651,12 +647,12 @@ class MyUnet(AbstractModelClass): # p2 = keras.layers.MaxPooling2D(self.pool_size)(c2) c3 = Padding2D("SymPad2D")(padding=pad_size)(p2) - c3 = keras.layers.Conv2D(64, self.kernel_size, activation=self.activation, + c3 = keras.layers.Conv2D(self.first_filter_size * 4, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c3) c3 = keras.layers.Dropout(0.2)(c3) c3 = Padding2D("SymPad2D")(padding=pad_size)(c3) - c3 = keras.layers.Conv2D(64, self.kernel_size, activation=self.activation, + c3 = keras.layers.Conv2D(self.first_filter_size * 4, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, name='c3', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c3) @@ -665,14 +661,14 @@ class MyUnet(AbstractModelClass): ### own LSTM Block ### ls1 = keras.layers.Reshape((p3.shape[1], p3.shape[-1]))(p3) - ls1 = keras.layers.LSTM(64*2, return_sequences=True)(ls1) - ls1 = keras.layers.LSTM(64*2, return_sequences=True)(ls1) + ls1 = keras.layers.LSTM(self.lstm_units, return_sequences=True)(ls1) + ls1 = keras.layers.LSTM(self.lstm_units, return_sequences=True)(ls1) c4 = keras.layers.Reshape((p3.shape[1], 1, -1))(ls1) ### own 2nd LSTM Block ### ls2 = keras.layers.Reshape((c3.shape[1], c3.shape[-1]))(c3) - ls2 = keras.layers.LSTM(64 * 2, return_sequences=True)(ls2) - ls2 = keras.layers.LSTM(64 * 2, return_sequences=True)(ls2) + ls2 = keras.layers.LSTM(self.lstm_units, return_sequences=True)(ls2) + ls2 = keras.layers.LSTM(self.lstm_units, return_sequences=True)(ls2) c4_2 = keras.layers.Reshape((c3.shape[1], 1, -1))(ls2) # c4 = Padding2D("SymPad2D")(padding=pad_size)(p3) @@ -709,13 +705,13 @@ class MyUnet(AbstractModelClass): u7 = keras.layers.concatenate([u7, cn3], name="u7_c3") c7 = u7 # c7 = Padding2D("SymPad2D")(padding=pad_size)(u7) - c7 = keras.layers.Conv2D(64, self.kernel_size, activation=self.activation, + c7 = keras.layers.Conv2D(self.first_filter_size * 4, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c7) c7 = keras.layers.concatenate([c7, c4_2], name="Concat_2nd_LSTM") c7 = keras.layers.Dropout(0.2)(c7) c7 = Padding2D("SymPad2D")(padding=pad_size)(c7) - c7 = keras.layers.Conv2D(64, self.kernel_size, activation=self.activation, + c7 = keras.layers.Conv2D(self.first_filter_size * 4, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, name='c7_to_u8', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c7) @@ -726,12 +722,12 @@ class MyUnet(AbstractModelClass): # u8 = c3 u8 = keras.layers.concatenate([u8, c2], name="u8_c2") c8 = Padding2D("SymPad2D")(padding=pad_size)(u8) - c8 = keras.layers.Conv2D(32, self.kernel_size, activation=self.activation, + c8 = keras.layers.Conv2D(self.first_filter_size * 2, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c8) c8 = keras.layers.Dropout(0.1)(c8) c8 = Padding2D("SymPad2D")(padding=pad_size)(c8) - c8 = keras.layers.Conv2D(32, self.kernel_size, activation=self.activation, + c8 = keras.layers.Conv2D(self.first_filter_size * 2, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, name='c8_to_u9', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c8) @@ -741,12 +737,12 @@ class MyUnet(AbstractModelClass): u9 = c8 u9 = keras.layers.concatenate([u9, c1], name="u9_c1") c9 = Padding2D("SymPad2D")(padding=pad_size)(u9) - c9 = keras.layers.Conv2D(16, self.kernel_size, activation=self.activation, + c9 = keras.layers.Conv2D(self.first_filter_size, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c9) c9 = keras.layers.Dropout(0.1)(c9) c9 = Padding2D("SymPad2D")(padding=pad_size)(c9) - c9 = keras.layers.Conv2D(16, self.kernel_size, activation=self.activation, + c9 = keras.layers.Conv2D(self.first_filter_size, self.kernel_size, activation=self.activation, kernel_initializer=self.kernel_initializer, name='c9', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(c9)