diff --git a/mlair/model_modules/fully_connected_networks.py b/mlair/model_modules/fully_connected_networks.py
index e9d577e8166ed912d7bd720a3eeca84dbf40d98a..a4c61b5b55f56e974d90a83fca32771019778154 100644
--- a/mlair/model_modules/fully_connected_networks.py
+++ b/mlair/model_modules/fully_connected_networks.py
@@ -1,6 +1,8 @@
 __author__ = "Lukas Leufen"
 __date__ = '2021-02-'
 
+from functools import reduce
+
 from mlair.model_modules import AbstractModelClass
 from mlair.helpers import select_from_dict
 
@@ -59,7 +61,7 @@ class FCN_64_32_16(AbstractModelClass):
 
 class FCN(AbstractModelClass):
     """
-    A customised model 4 Dense layers (64, 32, 16, window_lead_time), where the last layer is the output layer depending
+    A customisable fully connected network (64, 32, 16, window_lead_time), where the last layer is the output layer depending
     on the window_lead_time parameter.
     """
 
@@ -69,7 +71,7 @@ class FCN(AbstractModelClass):
     _requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov"]
 
     def __init__(self, input_shape: list, output_shape: list, activation="relu", optimizer="adam",
-                 layers=1, neurons=10, **kwargs):
+                 n_layer=1, n_hidden=10, **kwargs):
         """
         Sets model and loss depending on the given arguments.
 
@@ -84,7 +86,8 @@ class FCN(AbstractModelClass):
         # settings
         self.activation = self._set_activation(activation)
         self.optimizer = self._set_optimizer(optimizer, **kwargs)
-        self.layer_configuration = (layers, neurons)
+        self.layer_configuration = (n_layer, n_hidden)
+        self._update_model_name()
 
         # apply to model
         self.set_model()
@@ -110,6 +113,12 @@ class FCN(AbstractModelClass):
         except KeyError:
             raise AttributeError(f"Given optimizer {optimizer} is not supported in this model class.")
 
+    def _update_model_name(self):
+        n_layer, n_hidden = self.layer_configuration
+        n_input = str(reduce(lambda x, y: x * y, self._input_shape))
+        n_output = str(self._output_shape)
+        self.model_name += "_".join([n_input, *[f"{n_hidden}" for _ in range(n_layer)], n_output])
+
     def set_model(self):
         """
         Build the model.
diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py
index feaaff9b387dc1b2ec76bdb83223023075303326..5dd73d50f711387a65a9bc7e4daa7c1d430bfb26 100644
--- a/mlair/run_modules/model_setup.py
+++ b/mlair/run_modules/model_setup.py
@@ -137,7 +137,6 @@ class ModelSetup(RunEnvironment):
 
     def build_model(self):
         """Build model using input and output shapes from data store."""
-        # args_list = ["input_shape", "output_shape"]
         model = self.data_store.get("model_class")
         args_list = model.requirements()
         args = self.data_store.create_args_dict(args_list, self.scope)
@@ -170,6 +169,7 @@ class ModelSetup(RunEnvironment):
     def report_model(self):
         model_settings = self.model.get_settings()
         model_settings.update(self.model.compile_options)
+        model_settings.update(self.model.optimizer.get_config())
         df = pd.DataFrame(columns=["model setting"])
         for k, v in model_settings.items():
             if v is None: