diff --git a/video_prediction_tools/HPC_scripts/visualize_postprocess_era5_template.sh b/video_prediction_tools/HPC_scripts/visualize_postprocess_era5_template.sh
index 6239b82ff4b18e85b045d011dce50077bd93c1f2..6d9a9cefa5d17adeb0321b1dd90b9dcb7f3a3a6a 100644
--- a/video_prediction_tools/HPC_scripts/visualize_postprocess_era5_template.sh
+++ b/video_prediction_tools/HPC_scripts/visualize_postprocess_era5_template.sh
@@ -46,6 +46,7 @@ module purge
 # Note: source_dir is only needed for retrieving the base-directory
 checkpoint_dir=/my/trained/model/dir
 results_dir=/my/results/dir
+clim_f=/my/climtology/netcdf_file
 lquick=""
 
 # run postprocessing/generation of model results including evaluation metrics
@@ -56,6 +57,7 @@ srun --mpi=pspmix --cpu-bind=none \
      python3 ../main_scripts/main_visualize_postprocess.py --checkpoint  ${checkpoint_dir} --mode test  \
                                                            --results_dir ${results_dir} --batch_size 4 \
                                                            --num_stochastic_samples 1 ${lquick} \
+                                                           -clim_f ${clim_f} \ 
                                                            > postprocess_era5-out_all."${SLURM_JOB_ID}"
 
 # WITHOUT container usage, comment in the follwoing lines (and uncomment the lines above)
@@ -78,4 +80,4 @@ srun --mpi=pspmix --cpu-bind=none \
 # srun python3 ../main_scripts/main_visualize_postprocess.py --checkpoint  ${checkpoint_dir} --mode test  \
 #                                                           --results_dir ${results_dir} --batch_size 4 \
 #                                                           --num_stochastic_samples 1 ${lquick} \
-#                                                           > postprocess_era5-out_all."${SLURM_JOB_ID}"
\ No newline at end of file
+#                                                           > postprocess_era5-out_all."${SLURM_JOB_ID}"
diff --git a/video_prediction_tools/main_scripts/main_meta_postprocess.py b/video_prediction_tools/main_scripts/main_meta_postprocess.py
index aa07ed65849efd7aea6b31d968124e1c1fbc5b46..18d6c8b14ae79a58f7fc92f8a79f69929a90ee28 100644
--- a/video_prediction_tools/main_scripts/main_meta_postprocess.py
+++ b/video_prediction_tools/main_scripts/main_meta_postprocess.py
@@ -31,7 +31,8 @@ def skill_score(tar_score,ref_score,best_score):
 class MetaPostprocess(object):
 
     def __init__(self, root_dir: str = "/p/project/deepacf/deeprain/video_prediction_shared_folder/",
-            analysis_config: str = None, metric: str = "mse", exp_id: str=None, enable_skill_scores:bool=False, enable_persit_plot:bool=False):
+            analysis_config: str = None, metric: str = "mse", exp_id: str=None, 
+            enable_skill_scores:bool=False, enable_persit_plot:bool=False, metrics_filename="evaluation_metrics.nc"):
         """
         This class is used for calculating the evaluation metric, analyize the models' results and make comparsion
         args:
@@ -42,6 +43,7 @@ class MetaPostprocess(object):
             exp_id             :str,  the given exp_id which is used as the name of postfix of the folder to store the plot
             enable_skill_scores:bool, enable the skill scores plot
             enable_persis_plot: bool, enable the persis prediction in the plot
+            metrics_filename :str , the .nc file stores the evaluation metrics
         """
         self.root_dir = root_dir
         self.analysis_config = analysis_config
@@ -50,10 +52,11 @@ class MetaPostprocess(object):
         self.exp_id = exp_id
         self.persist = enable_persit_plot
         self.enable_skill_scores = enable_skill_scores
+        self.metrics_filename = metrics_filename
         self.models_type = []
         self.metric_values = []  # return the shape: [num_results, persi_values, model_values]
         self.skill_scores = []  # contain the calculated skill scores [num_results, skill_scores_values]
-
+         
 
     def __call__(self):
         self.sanity_check()
@@ -62,6 +65,7 @@ class MetaPostprocess(object):
         self.load_analysis_config()
         self.get_metrics_values()
         if self.enable_skill_scores:
+            print("Enable the skill scores")
             self.calculate_skill_scores()
             self.plot_skill_scores()
         else:
@@ -80,7 +84,7 @@ class MetaPostprocess(object):
         Function to create the analysis directory if it does not exist
         """
         if not os.path.exists(self.analysis_dir): os.makedirs(self.analysis_dir)
-        print("1. Create analysis dir successfully: The result will be stored to the folder:", self.analysis_dir)
+        print("Create analysis dir successfully: The result will be stored to the folder:", self.analysis_dir)
 
     def copy_analysis_config(self):
         """
@@ -89,7 +93,7 @@ class MetaPostprocess(object):
         try:
             shutil.copy(self.analysis_config, os.path.join(self.analysis_dir, "meta_config.json"))
             self.analysis_config = os.path.join(self.analysis_dir, "meta_config.json")
-            print("2. Copy analysis config successs ")
+            print("Copy analysis config successs ")
         except Exception as e:
             print("The meta_config.json is not found in the dictory: ", self.analysis_config)
         return None
@@ -104,7 +108,7 @@ class MetaPostprocess(object):
         print("*****The following results will be compared and ploted*****")
         [print(i) for i in self.f["results"].values()]
         print("*******************************************************")
-        print("3. Loading analysis config success")
+        print("Loading analysis config success")
 
         return None
 
@@ -131,27 +135,31 @@ class MetaPostprocess(object):
         self.get_meta_info()
 
         for i, result_dir in enumerate(self.f["results"].values()):
-            vals = MetaPostprocess.get_one_metric_values(result_dir, self.metric, self.models_type[i],self.enable_skill_scores)
+            vals = MetaPostprocess.get_one_metric_values(result_dir, self.metric, self.models_type[i],self.enable_skill_scores,self.metrics_filename)
             self.metric_values.append(vals)
-        print("4. Get metrics values success")
+        print(" Get metrics values success")
         return self.metric_values
 
     @staticmethod
-    def get_one_metric_values(result_dir: str = None, metric: str = "mse", model: str = None, enable_skill_scores:bool = False):
+    def get_one_metric_values(result_dir: str = None, metric: str = "mse", model: str = None, enable_skill_scores:bool = False, metrics_filename: str = "evaluation_metrics.nc"):
 
         """
         obtain the metric values (persistence and DL model) in the "evaluation_metrics.nc" file
         return:  list contains the evaluatioin metrics of one result. [persi,model]
         """
-        filename = 'evaluation_metrics.nc'
+        filename = metrics_filename
         filepath = os.path.join(result_dir, filename)
         try:
-            with xr.open_dataset(filepath) as dfiles:
+            with xr.open_dataset(filepath,engine="netcdf4") as dfiles:
                 if enable_skill_scores:
-                   persi = np.array(dfiles['2t_persistence_{}_bootstrapped'.format(metric)][:])
+                    persi =  np.array(dfiles['2t_persistence_{}_bootstrapped'.format(metriic)][:])
+                    if persi.shape[0]<30: #20210713T143850_gong1_savp_t2opt_3vars/evaluation_metrics_72x44.nc shape is not correct
+                        persi = np.transpose(persi)
                 else:
                     persi = []
-                model = np.array(dfiles['2t_{}_{}_bootstrapped'.format(model, metric)][:])
+                model  = np.array(dfiles['2t_{}_{}_bootstrapped'.format(model, metric)][:])
+                if model.shape[0]<30:
+                    model = np.transpose(model)
                 print("The values for evaluation metric '{}' values are obtained from file {}".format(metric, filepath))
                 return [persi, model]
         except Exception as e:
@@ -184,7 +192,8 @@ class MetaPostprocess(object):
             return None
 
     def get_lead_time_labels(self):
-        assert len(self.metric_values) == 2
+        assert len(self.metric_values[0]) == 2
+
         leadtimes = np.array(self.metric_values[0][1]).shape[1]
         leadtimelist = ["leadhour" + str(i + 1) for i in range(leadtimes)]
         return leadtimelist
@@ -199,7 +208,7 @@ class MetaPostprocess(object):
     @staticmethod
     def map_ylabels(metric):
         if metric == "mse":
-            ylabel = "MSE"
+            ylabel = "MSE[K$^2$]"
         elif metric == "acc":
             ylabel = "ACC"
         elif metric == "ssim":
@@ -216,9 +225,10 @@ class MetaPostprocess(object):
         fig = plt.figure(figsize = (8, 6))
         ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
         for i in range(len(self.metric_values)): #loop number of test samples
-            assert len(self.metric_values)==2
+            assert len(self.metric_values[0])==2
             score_plot = np.nanquantile(self.metric_values[i][1], 0.5, axis = 0)
-           
+            print("score_plot",len(score_plot))
+            print("self.n_leadtime",self.n_leadtime)
             assert len(score_plot) == self.n_leadtime
             plt.plot(np.arange(1, 1 + self.n_leadtime), list(score_plot),label = self.labels[i], color = self.colors[i],
                      marker = self.markers[i],   markeredgecolor = 'k', linewidth = 1.2)
@@ -238,11 +248,12 @@ class MetaPostprocess(object):
 
         plt.yticks(fontsize = 16)
         plt.xticks(np.arange(1, self.n_leadtime+1), np.arange(1, self.n_leadtime + 1, 1), fontsize = 16)
-        legend = ax.legend(loc = 'upper right', bbox_to_anchor = (1.46, 0.95),
-                           fontsize = 14)  # 'upper right', bbox_to_anchor=(1.38, 0.8),
+        legend = ax.legend(loc = 'upper right', bbox_to_anchor = (0.92, 0.40),
+                           fontsize = 12) # 'upper right', bbox_to_anchor=(1.38, 0.8),
         ylabel = MetaPostprocess.map_ylabels(self.metric)
         ax.set_xlabel("Lead time (hours)", fontsize = 21)
         ax.set_ylabel(ylabel, fontsize = 21)
+        plt.title("Sensitivity analysis for domain sizes",fontsize=16)
         fig_path = os.path.join(self.analysis_dir, self.metric + "_abs_values.png")
         # fig_path = os.path.join(prefix,fig_name)
         plt.savefig(fig_path, bbox_inches = "tight")
@@ -291,10 +302,11 @@ def main():
     parser.add_argument("--exp_id", help="The experiment id which will be used as postfix of the output directory",default="exp1")
     parser.add_argument("--enable_skill_scores", help="compared by skill scores or the absolute evaluation values",default=False)
     parser.add_argument("--enable_persit_plot", help="If plot persistent foreasts",default=False)
+    parser.add_argument("--metrics_filename", help="The .nc file contain the evaluation metrics",default="evaluation_metrics.nc")
     args = parser.parse_args()
 
     meta = MetaPostprocess(root_dir=args.root_dir,analysis_config=args.analysis_config, metric=args.metric, exp_id=args.exp_id,
-                           enable_skill_scores=args.enable_skill_scores,enable_persit_plot=args.enable_persit_plot)
+                           enable_skill_scores=args.enable_skill_scores,enable_persit_plot=args.enable_persit_plot, metrics_filename=args.metrics_filename) 
     meta()