Skip to content
Snippets Groups Projects
Commit e801995b authored by lukas leufen's avatar lukas leufen
Browse files

Merge branch 'lukas_issue001_feat_setup-repo' into 'develop'

setup repo

See merge request toar/machinelearningtools!1
parents cce397b2 b037ffcf
No related branches found
No related tags found
2 merge requests!2First version of MachineLearningTools,!1setup repo
# Compiled source #
###################
*.pyc
*.com
*.class
*.dll
*.exe
*.o
*.so
# Packages #
############
# it's better to unpack these files and commit the raw source
# git has its own built in compression methods
*.7z
*.dmg
*.gz
*.iso
*.jar
*.rar
*.tar
*.zip
# Logs and databases #
######################
*.log
*.sql
*.sqlite
*.sqlite3
# OS generated files #
######################
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
.idea/
/venv/
# check plot folder #
#####################
/plots/
# tmp folder #
##############
/tmp/
Keras==2.2.4
numpy==1.15.4
tensorflow==1.12.0
__author__ = 'Felix Kleinert, Lukas Leufen'
import keras
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, \
Concatenate, Reshape, Activation
from keras.models import Model
from keras.regularizers import l2
from keras.optimizers import SGD
class InceptionModelBase:
"""
This class contains all necessary construction blocks
"""
def __init__(self):
self.number_of_blocks = 0
self.part_of_block = 0
# conversion between chr and ord:
# >>> chr(97)
# 'a'
# >>> ord('a')
# 97
# set to 96 as always add +1 for new part of block
self.ord_base = 96
def block_part_name(self):
"""
Use unicode due to some issues of keras with normal strings
:return:
"""
return chr(self.ord_base + self.part_of_block)
def create_conv_tower(self,
input_X,
reduction_filter,
tower_filter,
tower_kernel,
activation='relu',
regularizer=l2(0.01)):
"""
This function creates a "convolution tower block" containing a 1x1 convolution to reduce filter size followed by convolution
with given filter and kernel size
:param input_X: Input to network part
:param reduction_filter: Number of filters used in 1x1 convolution to reduce overall filter size before conv.
:param tower_filter: Number of filters for n x m convolution
:param tower_kernel: kernel size for convolution (n,m)
:param activation: activation function for convolution
:return:
"""
self.part_of_block += 1
if tower_kernel == (1, 1):
tower = Conv2D(tower_filter,
tower_kernel,
activation=activation,
padding='same',
kernel_regularizer=regularizer,
name='Block_{}{}_{}x{}'.format(self.number_of_blocks,
self.block_part_name(),
tower_kernel[0],
tower_kernel[1]))(input_X)
else:
tower = Conv2D(reduction_filter,
(1, 1),
activation=activation,
padding='same',
kernel_regularizer=regularizer,
name='Block_{}{}_1x1'.format(self.number_of_blocks, self.block_part_name()))(input_X)
tower = Conv2D(tower_filter,
tower_kernel,
activation=activation,
padding='same',
kernel_regularizer=regularizer,
name='Block_{}{}_{}x{}'.format(self.number_of_blocks,
self.block_part_name(),
tower_kernel[0],
tower_kernel[1]))(tower)
return tower
@staticmethod
def create_pool_tower(input_X, pool_kernel, tower_filter):
"""
This function creates a "MaxPooling tower block"
:param input_X: Input to network part
:param pool_kernel: size of pooling kernel
:param tower_filter: Number of filters used in 1x1 convolution to reduce filter size
:return:
"""
tower = MaxPooling2D(pool_kernel, strides=(1, 1), padding='same')(input_X)
tower = Conv2D(tower_filter, (1, 1), padding='same', activation='relu')(tower)
return tower
def inception_block(self, input_X, tower_conv_parts, tower_pool_parts):
"""
Crate a inception block
:param input_X: Input to block
:param tower_conv_parts: dict containing settings for parts of inception block; Example:
tower_conv_parts = {'tower_1': {'reduction_filter': 32,
'tower_filter': 64,
'tower_kernel': (3, 1)},
'tower_2': {'reduction_filter': 32,
'tower_filter': 64,
'tower_kernel': (5, 1)},
'tower_3': {'reduction_filter': 32,
'tower_filter': 64,
'tower_kernel': (1, 1)},
}
:param tower_pool_parts: dict containing settings for pool part of inception block; Example:
tower_pool_parts = {'pool_kernel': (3, 1), 'tower_filter': 64}
:return:
"""
self.number_of_blocks += 1
self.part_of_block = 0
tower_build = {}
for part, part_settings in tower_conv_parts.items():
tower_build[part] = self.create_conv_tower(input_X,
part_settings['reduction_filter'],
part_settings['tower_filter'],
part_settings['tower_kernel']
)
tower_build['pool'] = self.create_pool_tower(input_X,
tower_pool_parts['pool_kernel'],
tower_pool_parts['tower_filter']
)
block = keras.layers.concatenate(list(tower_build.values()), axis=3)
return block
@staticmethod
def flatten_tail(input_X, tail_block):
input_X = Flatten()(input_X)
tail = tail_block(input_X)
return tail
if __name__ == '__main__':
print(__name__)
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.layers import Input
conv_settings_dict = {'tower_1': {'reduction_filter': 64,
'tower_filter': 64,
'tower_kernel': (3, 3)},
'tower_2': {'reduction_filter': 64,
'tower_filter': 64,
'tower_kernel': (5, 5)},
}
pool_settings_dict = {'pool_kernel': (3, 3),
'tower_filter': 64}
myclass = True
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
input_img = Input(shape=(32, 32, 3))
if myclass:
googLeNet = InceptionModelBase()
output = googLeNet.inception_block(input_img, conv_settings_dict, pool_settings_dict)
else:
tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)
tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2)
tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)
tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3)
output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=3)
output = Flatten()(output)
out = Dense(10, activation='softmax')(output)
model = Model(inputs=input_img, outputs=out)
print(model.summary())
epochs = 10
lrate = 0.01
decay = lrate/epochs
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print(X_train.shape)
# model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=32)
#
# scores = model.evaluate(X_test, y_test, verbose=0)
# print("Accuracy: %.2f%%" % (scores[1]*100))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment