diff --git a/run.py b/run.py
index 4fcb6cbf0a5644ebfbf370fe88f6373284e68ed5..b1f26e162397e7bece1451a7ba1cd05d000ed95a 100644
--- a/run.py
+++ b/run.py
@@ -27,7 +27,7 @@ def main(parser_args):
                         # stations=['DEBW107', 'DEBY081', 'DEBW013', 'DEBW076', 'DEBW087', 'DEBW001'],
                         station_type='background', window_lead_time=4, window_history_size=6,
                         trainable=False, create_new_model=True, permute_data_on_training=True,
-                        extreme_values=3., train_min_length=365*4, val_min_length=365, test_min_length=365,
+                        extreme_values=3., train_min_length=365, val_min_length=365, test_min_length=365,
                         create_new_bootstraps=True,hpc_hosts=["za"])
 
         PreProcessing()
diff --git a/src/model_modules/model_class.py b/src/model_modules/model_class.py
index 865cab8842536d2ead53c42f959c51df6f2635be..255041518b488f7b564049ff3900791167a686f6 100644
--- a/src/model_modules/model_class.py
+++ b/src/model_modules/model_class.py
@@ -14,6 +14,7 @@ import logging
 from src.model_modules.inception_model import InceptionModelBase
 from src.model_modules.flatten import flatten_tail
 from src.model_modules.advanced_paddings import PadUtils, Padding2D
+from src.helpers import l_p_loss
 
 
 class AbstractModelClass(ABC):
@@ -500,20 +501,20 @@ class MyPaperModel(AbstractModelClass):
         self.window_history_size = window_history_size
         self.window_lead_time = window_lead_time
         self.channels = channels
-        self.dropout_rate = .3
-        self.regularizer = keras.regularizers.l2(0.001)
-        # self.initial_lr = 1e-4
-        self.initial_lr = 0.01
-        self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
-        self.epochs = 3 # 350
-        self.batch_size = int(256 * 2)
+        self.dropout_rate = .35
+        self.regularizer = keras.regularizers.l2(0.01)
+        self.initial_lr = 1e-4
+        # self.initial_lr = 0.01
+        # self.lr_decay = src.model_modules.keras_extensions.LearningRateDecay(base_lr=self.initial_lr, drop=.94, epochs_drop=10)
+        self.epochs = 300
+        self.batch_size = int(256 *2)
         self.activation = keras.layers.ELU
         self.padding = "SymPad2D"
 
         # apply to model
         self.set_model()
         self.set_compile_options()
-        self.set_custom_objects(loss=self.compile_options["loss"], Padding2D=Padding2D)
+        self.set_custom_objects(loss=self.compile_options["loss"][0], Padding2D=Padding2D)
 
     def set_model(self):
 
@@ -531,26 +532,26 @@ class MyPaperModel(AbstractModelClass):
         first_filters = 16
 
         conv_settings_dict1 = {
-            'tower_1': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (3, 1),
+            'tower_1': {'reduction_filter': 8, 'tower_filter': 16 * 2 , 'tower_kernel': (3, 1),
                         'activation': activation},
-            'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (5, 1),
+            'tower_2': {'reduction_filter': 8, 'tower_filter': 16 * 2 , 'tower_kernel': (5, 1),
                         'activation': activation},
-            'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2, 'tower_kernel': (1, 1),
+            'tower_3': {'reduction_filter': 8, 'tower_filter': 16 * 2 , 'tower_kernel': (1, 1),
                         'activation': activation},
             # 'tower_4':{'reduction_filter':8, 'tower_filter':8*2, 'tower_kernel':(7,1), 'activation':activation},
         }
         pool_settings_dict1 = {'pool_kernel': (3, 1), 'tower_filter': 16, 'activation': activation}
 
         conv_settings_dict2 = {
-            'tower_1': {'reduction_filter': 64, 'tower_filter': 32 * 2 * 2, 'tower_kernel': (3, 1),
+            'tower_1': {'reduction_filter': 64, 'tower_filter': 32 * 2 *2 , 'tower_kernel': (3, 1),
                         'activation': activation},
-            'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2 * 2, 'tower_kernel': (5, 1),
+            'tower_2': {'reduction_filter': 64, 'tower_filter': 32 * 2 *2, 'tower_kernel': (5, 1),
                         'activation': activation},
-            'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2 * 2, 'tower_kernel': (1, 1),
+            'tower_3': {'reduction_filter': 64, 'tower_filter': 32 * 2 *2, 'tower_kernel': (1, 1),
                         'activation': activation},
             # 'tower_4':{'reduction_filter':8*2, 'tower_filter':16*2, 'tower_kernel':(7,1), 'activation':activation},
         }
-        pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32 * 2, 'activation': activation}
+        pool_settings_dict2 = {'pool_kernel': (3, 1), 'tower_filter': 32 *2, 'activation': activation}
 
         conv_settings_dict3 = {
             'tower_1': {'reduction_filter': 64 * 2, 'tower_filter': 32 * 4, 'tower_kernel': (3, 1),
@@ -584,10 +585,10 @@ class MyPaperModel(AbstractModelClass):
                                                regularizer=self.regularizer,
                                                batch_normalisation=True,
                                                padding=self.padding)
-        # out_minor1 = flatten_tail(X_in, 'minor_1', False, self.dropout_rate, self.window_lead_time,
-        #                           self.activation, 32, 64)
+        
+
         out_minor1 = flatten_tail(X_in, inner_neurons=64, activation=activation, output_neurons=self.window_lead_time,
-                                  output_activation='linear', reduction_filter=32,
+                                  output_activation='linear', reduction_filter=32*2 ,
                                   name='minor_1', bound_weight=False, dropout_rate=self.dropout_rate,
                                   kernel_regularizer=self.regularizer
                                   )
@@ -610,10 +611,15 @@ class MyPaperModel(AbstractModelClass):
                                 )
 
         self.model = keras.Model(inputs=X_input, outputs=[out_minor1, out_main])
+        # self.model = keras.Model(inputs=X_input, outputs=out_main)
 
     def set_compile_options(self):
-        self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
-        self.compile_options = {"loss": [keras.losses.mean_squared_error, keras.losses.mean_squared_error],
-                                "metrics": ['mse', 'mea']}
+        # self.optimizer = keras.optimizers.SGD(lr=self.initial_lr, momentum=0.9)
+        self.compile_options = {"optimizer": keras.optimizers.adam(lr=self.initial_lr, amsgrad=True),
+                                "loss": [l_p_loss(4), keras.losses.mean_squared_error],
+				#"loss": [keras.losses.mean_squared_error,keras.losses.mean_squared_error],
+                                "metrics": ['mse'],
+                                "loss_weights": [.01, .99],
+                               }