diff --git a/src/model_modules/model_class.py b/src/model_modules/model_class.py index 559a7e28bd52db1d1c9c9db72bfdf54138d1b681..1d6344259b6e6e13bc19f0a726e8015e7a72b21f 100644 --- a/src/model_modules/model_class.py +++ b/src/model_modules/model_class.py @@ -376,11 +376,11 @@ class MyPaperModel(AbstractModelClass): self.channels = channels self.dropout_rate = .3 self.regularizer = keras.regularizers.l2(0.001) - self.initial_lr = 1e-2 + self.initial_lr = 1e-3 # self.optimizer = keras.optimizers.adam(lr=self.initial_lr, amsgrad=True) self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9) self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10) - self.epochs = 1 + self.epochs = 150 self.batch_size = int(256 * 2) self.activation = keras.layers.ELU self.padding = "SymPad2D" @@ -447,11 +447,12 @@ class MyPaperModel(AbstractModelClass): # X_in = adv_pad.SymmetricPadding2D(padding=pad_size)(X_input) # X_in = inception_model.padding_layer("SymPad2D")(padding=pad_size, name="SymPad")(X_input) # adv_pad.SymmetricPadding2D(padding=pad_size)(X_input) X_in = inception_model.padding_layer("SymPad2D")(padding=pad_size, name="SymPad")(X_input) - X_in = self.activation(name='FirstAct')(X_in) X_in = keras.layers.Conv2D(filters=first_filters, kernel_size=first_kernel, kernel_regularizer=self.regularizer, name="First_conv_{}x{}".format(first_kernel[0], first_kernel[1]))(X_in) + X_in = self.activation(name='FirstAct')(X_in) + X_in = inception_model.inception_block(X_in, conv_settings_dict1, pool_settings_dict1, regularizer=self.regularizer,