diff --git a/src/inception_model.py b/src/inception_model.py
index fddd3e04788031354daf08c347f6cd0f0ccc6f3c..09e032ca70a8e63d9459464e968d25fac29cd4f1 100644
--- a/src/inception_model.py
+++ b/src/inception_model.py
@@ -1,13 +1,8 @@
 __author__ = 'Felix Kleinert, Lukas Leufen'
+__date__ = '2019-10-22'
 
 import keras
-from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, \
-    Concatenate, Reshape, Activation, ReLU
 import keras.layers as layers
-from keras.models import Model
-from keras.regularizers import l2
-from keras.optimizers import SGD
-from keras.layers.advanced_activations import LeakyReLU, PReLU, ELU
 
 
 class InceptionModelBase:
@@ -19,13 +14,7 @@ class InceptionModelBase:
         self.number_of_blocks = 0
         self.part_of_block = 0
         self.act_number = 0
-        # conversion between chr and ord:
-        # >>> chr(97)
-        # 'a'
-        # >>> ord('a')
-        # 97
-        # set to 96 as always add +1 for new part of block
-        self.ord_base = 96
+        self.ord_base = 96  # set to 96 as always add +1 for new part of block, chr(97)='a'
 
     def block_part_name(self):
         """
@@ -59,7 +48,7 @@ class InceptionModelBase:
         """
         self.part_of_block += 1
         self.act_number = 1
-        regularizer = kwargs.get('regularizer', l2(0.01))
+        regularizer = kwargs.get('regularizer', keras.regularizers.l2(0.01))
         bn_settings = kwargs.get('bn_settings', {})
         act_settings = kwargs.get('act_settings', {})
         print(f'Inception Block with activation: {activation}')
@@ -67,25 +56,25 @@ class InceptionModelBase:
         block_name = f'Block_{self.number_of_blocks}{self.block_part_name()}_{tower_kernel[0]}x{tower_kernel[1]}'
 
         if tower_kernel == (1, 1):
-            tower = Conv2D(tower_filter,
-                           tower_kernel,
-                           padding='same',
-                           kernel_regularizer=regularizer,
-                           name=block_name)(input_x)
+            tower = layers.Conv2D(tower_filter,
+                                  tower_kernel,
+                                  padding='same',
+                                  kernel_regularizer=regularizer,
+                                  name=block_name)(input_x)
             tower = self.act(tower, activation, **act_settings)
         else:
-            tower = Conv2D(reduction_filter,
-                           (1, 1),
-                           padding='same',
-                           kernel_regularizer=regularizer,
-                           name=f'Block_{self.number_of_blocks}{self.block_part_name()}_1x1')(input_x)
+            tower = layers.Conv2D(reduction_filter,
+                                  (1, 1),
+                                  padding='same',
+                                  kernel_regularizer=regularizer,
+                                  name=f'Block_{self.number_of_blocks}{self.block_part_name()}_1x1')(input_x)
             tower = self.act(tower, activation, **act_settings)
 
-            tower = Conv2D(tower_filter,
-                           tower_kernel,
-                           padding='same',
-                           kernel_regularizer=regularizer,
-                           name=block_name)(tower)
+            tower = layers.Conv2D(tower_filter,
+                                  tower_kernel,
+                                  padding='same',
+                                  kernel_regularizer=regularizer,
+                                  name=block_name)(tower)
             if batch_normalisation:
                 tower = self.batch_normalisation(tower, **bn_settings)
             tower = self.act(tower, activation, **act_settings)
@@ -128,13 +117,11 @@ class InceptionModelBase:
         block_name = f"Block_{self.number_of_blocks}{self.block_part_name()}_"
         if max_pooling:
             block_type = "MaxPool"
-            pooling = MaxPooling2D
+            pooling = layers.MaxPooling2D
         else:
             block_type = "AvgPool"
-            pooling = AveragePooling2D
+            pooling = layers.AveragePooling2D
         tower = pooling(pool_kernel, strides=(1, 1), padding='same', name=block_name+block_type)(input_x)
-        # tower = MaxPooling2D(pool_kernel, strides=(1, 1), padding='same', name=block_name)(input_x)
-        # tower = AveragePooling2D(pool_kernel, strides=(1, 1), padding='same', name=block_name)(input_x)
 
         # convolution block
         tower = Conv2D(tower_filter, (1, 1), padding='same', name=block_name+"1x1")(tower)
@@ -179,18 +166,18 @@ class InceptionModelBase:
         block = keras.layers.concatenate(list(tower_build.values()), axis=3)
         return block
 
-    @staticmethod
-    def flatten_tail(input_x, tail_block):
-        input_x = Flatten()(input_x)
-        tail = tail_block(input_x)
-        return tail
-
 
 if __name__ == '__main__':
     print(__name__)
     from keras.datasets import cifar10
     from keras.utils import np_utils
     from keras.layers import Input
+    from keras.layers.advanced_activations import LeakyReLU
+    from keras.optimizers import SGD
+    from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
+    from keras.models import Model
+
+    # network settings
     conv_settings_dict = {'tower_1': {'reduction_filter': 64,
                                       'tower_filter': 64,
                                       'tower_kernel': (3, 3),
@@ -203,8 +190,8 @@ if __name__ == '__main__':
     pool_settings_dict = {'pool_kernel': (3, 3),
                           'tower_filter': 64,
                           'activation': 'relu'}
-    myclass = True
 
+    # load data
     (X_train, y_train), (X_test, y_test) = cifar10.load_data()
     X_train = X_train.astype('float32')
     X_test = X_test.astype('float32')
@@ -214,36 +201,19 @@ if __name__ == '__main__':
     y_test = np_utils.to_categorical(y_test)
     input_img = Input(shape=(32, 32, 3))
 
-    if myclass:
-        googLeNet = InceptionModelBase()
-        output = googLeNet.inception_block(input_img, conv_settings_dict, pool_settings_dict)
-    else:
-        tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
-        tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)
-
-        tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
-        tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2)
-
-        tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)
-        tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3)
-
-        output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=3)
-
+    # create inception net
+    inception_net = InceptionModelBase()
+    output = inception_net.inception_block(input_img, conv_settings_dict, pool_settings_dict)
     output = Flatten()(output)
     output = Dense(10, activation='softmax')(output)
     model = Model(inputs=input_img, outputs=output)
     print(model.summary())
 
+    # compile
     epochs = 10
     lrate = 0.01
     decay = lrate/epochs
     sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
-
     model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
-
     print(X_train.shape)
     keras.utils.plot_model(model, to_file='model.pdf', show_shapes=True, show_layer_names=True)
-    # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=32)
-    #
-    # scores = model.evaluate(X_test, y_test, verbose=0)
-    # print("Accuracy: %.2f%%" % (scores[1]*100))