diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py
index 4387f359d05b7c2d06c98bb1ad346d058afc3056..56b5c363f15aa7f40a25bd02392dd9d85bf88396 100644
--- a/mlair/run_modules/post_processing.py
+++ b/mlair/run_modules/post_processing.py
@@ -88,6 +88,7 @@ class PostProcessing(RunEnvironment):
         self.competitors = to_list(self.data_store.get_default("competitors", default=[]))
         self.forecast_indicator = "nn"
         self.ahead_dim = "ahead"
+        self.boot_var_dim = "boot_var"
         self._run()
 
     def _run(self):
@@ -109,6 +110,8 @@ class PostProcessing(RunEnvironment):
                 bootstrap_type = self.data_store.get("bootstrap_type", "postprocessing")
                 self.bootstrap_postprocessing(create_new_bootstraps, bootstrap_type=bootstrap_type,
                                               bootstrap_method=bootstrap_method)
+            if self.bootstrap_skill_scores is not None:
+                self.report_bootstrap_results(self.bootstrap_skill_scores)
 
         # skill scores and error metrics
         with TimeTracking(name="calculate skill scores"):
@@ -155,7 +158,8 @@ class PostProcessing(RunEnvironment):
         :param _iter: internal counter to reduce unnecessary recursive calls (maximum number is 2, otherwise something
             went wrong).
         """
-        self.bootstrap_skill_scores = {}
+        if _iter == 0:
+            self.bootstrap_skill_scores = {}
         for boot_type in to_list(bootstrap_type):
             self.bootstrap_skill_scores[boot_type] = {}
             for boot_method in to_list(bootstrap_method):
@@ -265,7 +269,7 @@ class PostProcessing(RunEnvironment):
                     skill.loc[boot_var] = np.array(boot_scores)
 
                 # collect all results in single dictionary
-                score[str(station)] = xr.DataArray(skill, dims=["boot_var", self.ahead_dim])
+                score[str(station)] = xr.DataArray(skill, dims=[self.boot_var_dim, self.ahead_dim])
             return score
 
     def get_orig_prediction(self, path, file_name, number_of_bootstraps, prediction_name=None):
@@ -788,6 +792,23 @@ class PostProcessing(RunEnvironment):
                 avg_error[error_metric] = new_val
         return avg_error
 
+    def report_bootstrap_results(self, results):
+        """Create a csv file containing all results from bootstrapping."""
+        report_path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
+        path_config.check_path_and_create(report_path)
+        res = [["type", "method", "station", self.boot_var_dim, self.ahead_dim, "vals"]]
+        for boot_type, d0 in results.items():
+            for boot_method, d1 in d0.items():
+                for station_name, vals in d1.items():
+                    for boot_var in vals.coords[self.boot_var_dim].values.tolist():
+                        for ahead in vals.coords[self.ahead_dim].values.tolist():
+                            res.append([boot_type, boot_method, station_name, boot_var, ahead,
+                                        float(vals.sel({self.boot_var_dim: boot_var, self.ahead_dim: ahead}))])
+        col_names = res.pop(0)
+        df = pd.DataFrame(res, columns=col_names)
+        file_name = "bootstrap_skill_score_report_raw.csv"
+        df.to_csv(os.path.join(report_path, file_name), sep=";")
+
     def report_error_metrics(self, errors):
         report_path = os.path.join(self.data_store.get("experiment_path"), "latex_report")
         path_config.check_path_and_create(report_path)