diff --git a/mlair/configuration/defaults.py b/mlair/configuration/defaults.py
index dc1ffe2e6602a4ef0c63bca641fd5ebf3428ab49..9dc252b89455a3f18c995fa7992c51fbc1476a57 100644
--- a/mlair/configuration/defaults.py
+++ b/mlair/configuration/defaults.py
@@ -46,6 +46,7 @@ DEFAULT_TRAIN_VAL_MIN_LENGTH = 180
 DEFAULT_USE_ALL_STATIONS_ON_ALL_DATA_SETS = True
 DEFAULT_UNCERTAINTY_ESTIMATE_BLOCK_LENGTH = "1m"
 DEFAULT_UNCERTAINTY_ESTIMATE_EVALUATE_COMPETITORS = True
+DEFAULT_UNCERTAINTY_ESTIMATE_N_BOOTS = 1000
 DEFAULT_EVALUATE_BOOTSTRAPS = True
 DEFAULT_CREATE_NEW_BOOTSTRAPS = False
 DEFAULT_NUMBER_OF_BOOTSTRAPS = 20
diff --git a/mlair/run_modules/experiment_setup.py b/mlair/run_modules/experiment_setup.py
index 9126a4eec21d34b8441c1e440bc3af95b8c0b0d3..aa7a2bc1559899345c5b0a2f2317ff1f8b3e1c6d 100644
--- a/mlair/run_modules/experiment_setup.py
+++ b/mlair/run_modules/experiment_setup.py
@@ -22,7 +22,8 @@ from mlair.configuration.defaults import DEFAULT_STATIONS, DEFAULT_VAR_ALL_DICT,
     DEFAULT_NUMBER_OF_BOOTSTRAPS, DEFAULT_PLOT_LIST, DEFAULT_SAMPLING, DEFAULT_DATA_ORIGIN, DEFAULT_ITER_DIM, \
     DEFAULT_USE_MULTIPROCESSING, DEFAULT_USE_MULTIPROCESSING_ON_DEBUG, DEFAULT_MAX_NUMBER_MULTIPROCESSING, \
     DEFAULT_BOOTSTRAP_TYPE, DEFAULT_BOOTSTRAP_METHOD, DEFAULT_OVERWRITE_LAZY_DATA, \
-    DEFAULT_UNCERTAINTY_ESTIMATE_BLOCK_LENGTH, DEFAULT_UNCERTAINTY_ESTIMATE_EVALUATE_COMPETITORS
+    DEFAULT_UNCERTAINTY_ESTIMATE_BLOCK_LENGTH, DEFAULT_UNCERTAINTY_ESTIMATE_EVALUATE_COMPETITORS, \
+    DEFAULT_UNCERTAINTY_ESTIMATE_N_BOOTS
 from mlair.data_handler import DefaultDataHandler
 from mlair.run_modules.run_environment import RunEnvironment
 from mlair.model_modules.fully_connected_networks import FCN_64_32_16 as VanillaModel
@@ -221,7 +222,8 @@ class ExperimentSetup(RunEnvironment):
                  use_multiprocessing: bool = None, use_multiprocessing_on_debug: bool = None,
                  max_number_multiprocessing: int = None, start_script: Union[Callable, str] = None,
                  overwrite_lazy_data: bool = None, uncertainty_estimate_block_length: str = None,
-                 uncertainty_estimate_evaluate_competitors: bool = None, **kwargs):
+                 uncertainty_estimate_evaluate_competitors: bool = None, uncertainty_estimate_n_boots: int= None,
+                 **kwargs):
 
         # create run framework
         super().__init__()
@@ -355,6 +357,8 @@ class ExperimentSetup(RunEnvironment):
                         default=DEFAULT_UNCERTAINTY_ESTIMATE_BLOCK_LENGTH)
         self._set_param("uncertainty_estimate_evaluate_competitors", uncertainty_estimate_evaluate_competitors,
                         default=DEFAULT_UNCERTAINTY_ESTIMATE_EVALUATE_COMPETITORS)
+        self._set_param("uncertainty_estimate_n_boots", uncertainty_estimate_n_boots,
+                        default=DEFAULT_UNCERTAINTY_ESTIMATE_N_BOOTS)
 
         self._set_param("evaluate_bootstraps", evaluate_bootstraps, default=DEFAULT_EVALUATE_BOOTSTRAPS,
                         scope="general.postprocessing")
diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py
index e7ed04b2f8694e7e4e2c90d215cb042cb33beef8..e580a2b3acd04f4f8e82154d9e3f16000d75c84b 100644
--- a/mlair/run_modules/post_processing.py
+++ b/mlair/run_modules/post_processing.py
@@ -131,14 +131,15 @@ class PostProcessing(RunEnvironment):
         self.plot()
 
     def estimate_sample_uncertainty(self, separate_ahead=False):
-        #todo: set n_boots
         #todo: visualize
         #todo: write results on disk
+        n_boots = self.data_store.get_default("uncertainty_estimate_n_boots", default=1000)
         block_length = self.data_store.get_default("uncertainty_estimate_block_length", default="1m")
         evaluate_competitors = self.data_store.get_default("uncertainty_estimate_evaluate_competitors", default=True)
         block_mse = self.calculate_block_mse(evaluate_competitors=evaluate_competitors, separate_ahead=separate_ahead,
                                              block_length=block_length)
-        res = statistics.create_n_bootstrap_realizations(block_mse, self.index_dim, self.model_type_dim, n_boots=10)
+        res = statistics.create_n_bootstrap_realizations(block_mse, self.index_dim, self.model_type_dim,
+                                                         n_boots=n_boots)
         res
 
     def calculate_block_mse(self, evaluate_competitors=True, separate_ahead=False, block_length="1m"):