diff --git a/mlair/configuration/defaults.py b/mlair/configuration/defaults.py
index 31746ec889cc82ebbae8de82a05c5cff02a22ac0..b048712f8abf00bc3bab7ab7bddd3f42492ee9cf 100644
--- a/mlair/configuration/defaults.py
+++ b/mlair/configuration/defaults.py
@@ -18,7 +18,7 @@ DEFAULT_TRANSFORMATION = {"scope": "data", "method": "standardise"}
 DEFAULT_HPC_LOGIN_LIST = ["ju", "hdfmll"]  # ju[wels} #hdfmll(ogin)
 DEFAULT_HPC_HOST_LIST = ["jw", "hdfmlc"]  # first part of node names for Juwels (jw[comp], hdfmlc(ompute).
 DEFAULT_CREATE_NEW_MODEL = True
-DEFAULT_TRAINABLE = True
+DEFAULT_TRAIN_MODEL = True
 DEFAULT_FRACTION_OF_TRAINING = 0.8
 DEFAULT_EXTREME_VALUES = None
 DEFAULT_EXTREMES_ON_RIGHT_TAIL_ONLY = False
diff --git a/mlair/run_modules/experiment_setup.py b/mlair/run_modules/experiment_setup.py
index 51e710c2d08d759883080406fc988847de832aca..d66954b05e8dc40c840f6f2f0c05ee2e576a3c57 100644
--- a/mlair/run_modules/experiment_setup.py
+++ b/mlair/run_modules/experiment_setup.py
@@ -10,7 +10,7 @@ from mlair.configuration import path_config
 from mlair import helpers
 from mlair.configuration.defaults import DEFAULT_STATIONS, DEFAULT_VAR_ALL_DICT, DEFAULT_NETWORK, DEFAULT_STATION_TYPE, \
     DEFAULT_START, DEFAULT_END, DEFAULT_WINDOW_HISTORY_SIZE, DEFAULT_OVERWRITE_LOCAL_DATA, DEFAULT_TRANSFORMATION, \
-    DEFAULT_HPC_LOGIN_LIST, DEFAULT_HPC_HOST_LIST, DEFAULT_CREATE_NEW_MODEL, DEFAULT_TRAINABLE, \
+    DEFAULT_HPC_LOGIN_LIST, DEFAULT_HPC_HOST_LIST, DEFAULT_CREATE_NEW_MODEL, DEFAULT_TRAIN_MODEL, \
     DEFAULT_FRACTION_OF_TRAINING, DEFAULT_EXTREME_VALUES, DEFAULT_EXTREMES_ON_RIGHT_TAIL_ONLY, DEFAULT_PERMUTE_DATA, \
     DEFAULT_BATCH_SIZE, DEFAULT_EPOCHS, DEFAULT_TARGET_VAR, DEFAULT_TARGET_DIM, DEFAULT_WINDOW_LEAD_TIME, \
     DEFAULT_DIMENSIONS, DEFAULT_TIME_DIM, DEFAULT_INTERPOLATION_METHOD, DEFAULT_INTERPOLATION_LIMIT, \
@@ -39,7 +39,7 @@ class ExperimentSetup(RunEnvironment):
         * `data_path` [.]
         * `create_new_model` [.]
         * `bootstrap_path` [.]
-        * `trainable` [.]
+        * `train_model` [.]
         * `fraction_of_training` [.]
         * `extreme_values` [train]
         * `extremes_on_right_tail_only` [train]
@@ -94,7 +94,7 @@ class ExperimentSetup(RunEnvironment):
 
         # set post-processing instructions
         self._set_param("evaluate_bootstraps", evaluate_bootstraps, scope="general.postprocessing")
-        create_new_bootstraps = max([self.data_store.get("trainable", "general"), create_new_bootstraps or False])
+        create_new_bootstraps = max([self.data_store.get("train_model", "general"), create_new_bootstraps or False])
         self._set_param("create_new_bootstraps", create_new_bootstraps, scope="general.postprocessing")
         self._set_param("number_of_bootstraps", number_of_bootstraps, default=20, scope="general.postprocessing")
         self._set_param("plot_list", plot_list, default=DEFAULT_PLOT_LIST, scope="general.postprocessing")
@@ -144,8 +144,8 @@ class ExperimentSetup(RunEnvironment):
     :param test_start:
     :param test_end:
     :param use_all_stations_on_all_data_sets:
-    :param trainable: train a new model from scratch or resume training with existing model if `True` (default) or
-        freeze loaded model and do not perform any modification on it. ``trainable`` is set to `True` if
+    :param train_model: train a new model from scratch or resume training with existing model if `True` (default) or
+        freeze loaded model and do not perform any modification on it. ``train_model`` is set to `True` if
         ``create_new_model`` is `True`.
     :param fraction_of_train: given value is used to split between test data and train data (including validation data).
         The value of ``fraction_of_train`` must be in `(0, 1)` but is recommended to be in the interval `[0.6, 0.9]`.
@@ -166,7 +166,7 @@ class ExperimentSetup(RunEnvironment):
         parameter is set to `False`, make sure, that a suitable model already exists in the experiment path. This model
         must fit in terms of input and output dimensions as well as ``window_history_size`` and ``window_lead_time`` and
         must be implemented as a :py:mod:`model class <src.model_modules.model_class>` and imported in
-        :py:mod:`model setup <src.run_modules.model_setup>`. If ``create_new_model`` is `True`, parameter ``trainable``
+        :py:mod:`model setup <src.run_modules.model_setup>`. If ``create_new_model`` is `True`, parameter ``train_model``
         is automatically set to `True` too.
     :param bootstrap_path:
     :param permute_data_on_training: shuffle train data individually for each station if `True`. This is performed each
@@ -215,7 +215,7 @@ class ExperimentSetup(RunEnvironment):
                  time_dim=None,
                  interpolation_method=None,
                  interpolation_limit=None, train_start=None, train_end=None, val_start=None, val_end=None, test_start=None,
-                 test_end=None, use_all_stations_on_all_data_sets=None, trainable: bool = None, fraction_of_train: float = None,
+                 test_end=None, use_all_stations_on_all_data_sets=None, train_model: bool = None, fraction_of_train: float = None,
                  experiment_path=None, plot_path: str = None, forecast_path: str = None, overwrite_local_data = None, sampling: str = "daily",
                  create_new_model = None, bootstrap_path=None, permute_data_on_training = None, transformation=None,
                  train_min_length=None, val_min_length=None, test_min_length=None, extreme_values: list = None,
@@ -233,11 +233,11 @@ class ExperimentSetup(RunEnvironment):
         self._set_param("login_nodes", login_nodes, default=DEFAULT_HPC_LOGIN_LIST)
         self._set_param("create_new_model", create_new_model, default=DEFAULT_CREATE_NEW_MODEL)
         if self.data_store.get("create_new_model"):
-            trainable = True
+            train_model = True
         data_path = self.data_store.get("data_path")
         bootstrap_path = path_config.set_bootstrap_path(bootstrap_path, data_path, sampling)
         self._set_param("bootstrap_path", bootstrap_path)
-        self._set_param("trainable", trainable, default=DEFAULT_TRAINABLE)
+        self._set_param("train_model", train_model, default=DEFAULT_TRAIN_MODEL)
         self._set_param("fraction_of_training", fraction_of_train, default=DEFAULT_FRACTION_OF_TRAINING)
         self._set_param("extreme_values", extreme_values, default=DEFAULT_EXTREME_VALUES, scope="train")
         self._set_param("extremes_on_right_tail_only", extremes_on_right_tail_only,
@@ -331,7 +331,7 @@ class ExperimentSetup(RunEnvironment):
         # set post-processing instructions
         self._set_param("evaluate_bootstraps", evaluate_bootstraps, default=DEFAULT_EVALUATE_BOOTSTRAPS,
                         scope="general.postprocessing")
-        create_new_bootstraps = max([self.data_store.get("trainable", "general"),
+        create_new_bootstraps = max([self.data_store.get("train_model", "general"),
                                      create_new_bootstraps or DEFAULT_CREATE_NEW_BOOTSTRAPS])
         self._set_param("create_new_bootstraps", create_new_bootstraps, scope="general.postprocessing")
         self._set_param("number_of_bootstraps", number_of_bootstraps, default=DEFAULT_NUMBER_OF_BOOTSTRAPS,
diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py
index 3dc56f01c4f37ce9fc53086d837386af81e5f53d..e4bff5a621619e6d806fc7ae58ed093331463187 100644
--- a/mlair/run_modules/model_setup.py
+++ b/mlair/run_modules/model_setup.py
@@ -31,7 +31,7 @@ class ModelSetup(RunEnvironment):
     Required objects [scope] from data store:
         * `experiment_path` [.]
         * `experiment_name` [.]
-        * `trainable` [.]
+        * `train_model` [.]
         * `create_new_model` [.]
         * `generator` [train]
         * `model_class` [.]
@@ -64,7 +64,7 @@ class ModelSetup(RunEnvironment):
         self.model_name = self.path % "%s.h5"
         self.checkpoint_name = self.path % "model-best.h5"
         self.callbacks_name = self.path % "model-best-callbacks-%s.pickle"
-        self._trainable = self.data_store.get("trainable")
+        self._train_model = self.data_store.get("train_model")
         self._create_new_model = self.data_store.get("create_new_model")
         self._run()
 
@@ -80,7 +80,7 @@ class ModelSetup(RunEnvironment):
         self.plot_model()
 
         # load weights if no training shall be performed
-        if not self._trainable and not self._create_new_model:
+        if not self._train_model and not self._create_new_model:
             self.load_weights()
 
         # create checkpoint
diff --git a/mlair/run_modules/training.py b/mlair/run_modules/training.py
index f8909e15341f959455b1e8da0b0cb7502bdfa81b..113765e0d295bb0b1d756cd1cefba85093b20089 100644
--- a/mlair/run_modules/training.py
+++ b/mlair/run_modules/training.py
@@ -23,7 +23,7 @@ class Training(RunEnvironment):
     Train your model with this module.
 
     This module isn't required to run, if only a fresh post-processing is preformed. Either remove training call from
-    your run script or set create_new_model and trainable both to false.
+    your run script or set create_new_model and train_model both to false.
 
     Schedule of training:
         #. set_generators(): set generators for training, validation and testing and distribute according to batch size
@@ -40,7 +40,7 @@ class Training(RunEnvironment):
         * `model_name` [model]
         * `experiment_name` [.]
         * `experiment_path` [.]
-        * `trainable` [.]
+        * `train_model` [.]
         * `create_new_model` [.]
         * `generator` [train, val, test]
         * `plot_path` [.]
@@ -72,7 +72,7 @@ class Training(RunEnvironment):
         self.epochs = self.data_store.get("epochs")
         self.callbacks: CallbackHandler = self.data_store.get("callbacks", "model")
         self.experiment_name = self.data_store.get("experiment_name")
-        self._trainable = self.data_store.get("trainable")
+        self._train_model = self.data_store.get("train_model")
         self._create_new_model = self.data_store.get("create_new_model")
         self._run()
 
@@ -80,12 +80,12 @@ class Training(RunEnvironment):
         """Run training. Details in class description."""
         self.set_generators()
         self.make_predict_function()
-        if self._trainable:
+        if self._train_model:
             self.train()
             self.save_model()
             self.report_training()
         else:
-            logging.info("No training has started, because trainable parameter was false.")
+            logging.info("No training has started, because train_model parameter was false.")
 
     def make_predict_function(self) -> None:
         """
diff --git a/mlair/run_script.py b/mlair/run_script.py
index 6dea98ba9c67cfd9cf0ba07d78733a7b7d75909f..10851da1e3ef77b3d9a9c325ab018fdea5f31fb7 100644
--- a/mlair/run_script.py
+++ b/mlair/run_script.py
@@ -6,7 +6,7 @@ import inspect
 
 
 def run(stations=None,
-        trainable=None, create_new_model=None,
+        train_model=None, create_new_model=None,
         window_history_size=None,
         experiment_date="testrun",
         variables=None, statistics_per_var=None,
@@ -39,5 +39,5 @@ def run(stations=None,
 
 if __name__ == "__main__":
     from mlair.model_modules.model_class import MyBranchedModel
-    run(statistics_per_var={'o3': 'dma8eu', "temp": "maximum"}, trainable=True,
+    run(statistics_per_var={'o3': 'dma8eu', "temp": "maximum"}, train_model=True,
         create_new_model=True, model=MyBranchedModel, station_type="background")
diff --git a/mlair/workflows/default_workflow.py b/mlair/workflows/default_workflow.py
index 3dba7e6c5c5773fa4d74860b2cba67a5804123b7..85d6726b70b699968933bf9af7580895490b8a6d 100644
--- a/mlair/workflows/default_workflow.py
+++ b/mlair/workflows/default_workflow.py
@@ -14,7 +14,7 @@ class DefaultWorkflow(Workflow):
     the mentioned ordering."""
 
     def __init__(self, stations=None,
-        trainable=None, create_new_model=None,
+        train_model=None, create_new_model=None,
         window_history_size=None,
         experiment_date="testrun",
         variables=None, statistics_per_var=None,
@@ -58,7 +58,7 @@ class DefaultWorkflowHPC(Workflow):
     Training and PostProcessing in exact the mentioned ordering."""
 
     def __init__(self, stations=None,
-        trainable=None, create_new_model=None,
+        train_model=None, create_new_model=None,
         window_history_size=None,
         experiment_date="testrun",
         variables=None, statistics_per_var=None,
diff --git a/test/test_run_modules/test_experiment_setup.py b/test/test_run_modules/test_experiment_setup.py
index abd265f5815d974d6edb474e5a03ed08dc5843cc..ff35508542b694eb1def0ba791d9a5f70043f19c 100644
--- a/test/test_run_modules/test_experiment_setup.py
+++ b/test/test_run_modules/test_experiment_setup.py
@@ -38,7 +38,7 @@ class TestExperimentSetup:
         data_store = exp_setup.data_store
         # experiment setup
         assert data_store.get("data_path", "general") == prepare_host()
-        assert data_store.get("trainable", "general") is True
+        assert data_store.get("train_model", "general") is True
         assert data_store.get("create_new_model", "general") is True
         assert data_store.get("fraction_of_training", "general") == 0.8
         # set experiment name
@@ -99,7 +99,7 @@ class TestExperimentSetup:
         data_store = exp_setup.data_store
         # experiment setup
         assert data_store.get("data_path", "general") == prepare_host()
-        assert data_store.get("trainable", "general") is True
+        assert data_store.get("train_model", "general") is True
         assert data_store.get("create_new_model", "general") is True
         assert data_store.get("fraction_of_training", "general") == 0.5
         # set experiment name
@@ -145,22 +145,22 @@ class TestExperimentSetup:
         # use all stations on all data sets (train, val, test)
         assert data_store.get("use_all_stations_on_all_data_sets", "general.test") is False
 
-    def test_init_trainable_behaviour(self):
-        exp_setup = ExperimentSetup(trainable=False, create_new_model=True)
+    def test_init_train_model_behaviour(self):
+        exp_setup = ExperimentSetup(train_model=False, create_new_model=True)
         data_store = exp_setup.data_store
-        assert data_store.get("trainable", "general") is True
+        assert data_store.get("train_model", "general") is True
         assert data_store.get("create_new_model", "general") is True
-        exp_setup = ExperimentSetup(trainable=False, create_new_model=False)
+        exp_setup = ExperimentSetup(train_model=False, create_new_model=False)
         data_store = exp_setup.data_store
-        assert data_store.get("trainable", "general") is False
+        assert data_store.get("train_model", "general") is False
         assert data_store.get("create_new_model", "general") is False
-        exp_setup = ExperimentSetup(trainable=True, create_new_model=True)
+        exp_setup = ExperimentSetup(train_model=True, create_new_model=True)
         data_store = exp_setup.data_store
-        assert data_store.get("trainable", "general") is True
+        assert data_store.get("train_model", "general") is True
         assert data_store.get("create_new_model", "general") is True
-        exp_setup = ExperimentSetup(trainable=True, create_new_model=False)
+        exp_setup = ExperimentSetup(train_model=True, create_new_model=False)
         data_store = exp_setup.data_store
-        assert data_store.get("trainable", "general") is True
+        assert data_store.get("train_model", "general") is True
         assert data_store.get("create_new_model", "general") is False
 
     def test_compare_variables_and_statistics(self):
diff --git a/test/test_run_modules/test_training.py b/test/test_run_modules/test_training.py
index c5f1ba9d407340974946398a37dd25234d3cbd78..c0b625ef70deeb0686b236275e6bd1182ad48d41 100644
--- a/test/test_run_modules/test_training.py
+++ b/test/test_run_modules/test_training.py
@@ -73,7 +73,7 @@ class TestTraining:
         path_plot = os.path.join(path, "plots")
         os.makedirs(path_plot)
         obj.data_store.set("plot_path", path_plot, "general")
-        obj._trainable = True
+        obj._train_model = True
         obj._create_new_model = False
         yield obj
         if os.path.exists(path):
@@ -187,7 +187,7 @@ class TestTraining:
         obj.data_store.set("hist", hist, "general.model")
         obj.data_store.set("experiment_name", "TestExperiment", "general")
         obj.data_store.set("experiment_path", path, "general")
-        obj.data_store.set("trainable", True, "general")
+        obj.data_store.set("train_model", True, "general")
         obj.data_store.set("create_new_model", True, "general")
         os.makedirs(batch_path)
         obj.data_store.set("batch_path", batch_path, "general")
@@ -203,9 +203,9 @@ class TestTraining:
 
     def test_no_training(self, ready_to_init, caplog):
         caplog.set_level(logging.INFO)
-        ready_to_init.data_store.set("trainable", False)
+        ready_to_init.data_store.set("train_model", False)
         Training()
-        message = "No training has started, because trainable parameter was false."
+        message = "No training has started, because train_model parameter was false."
         assert caplog.record_tuples[-2] == ("root", 20, message)
 
     def test_run(self, ready_to_run):