diff --git a/.gitignore b/.gitignore
index 4b7022228f10be8b83d42ef87eb66b01a4e0687f..6dc2f3392135d3b43923f87460f274b5aea104fe 100644
--- a/.gitignore
+++ b/.gitignore
@@ -85,7 +85,7 @@ celerybeat-schedule
 
 # virtualenv
 .venv
-venv/
+venv*/
 ENV/
 virtual_env*/
 virt_env*/
@@ -122,8 +122,8 @@ virt_env*/
 **/era5_size_64_64_3_3t_norm
 
 # Ignore (Batch) runscripts
-HPC_scripts/*.sh
-!HPC_scripts/*_template.sh
-Zam347_scripts/*.sh
-!Zam347_scripts/*_template.sh
+video_prediction_tools/HPC_scripts/**
+!video_prediction_tools/HPC_scripts/*_template.sh
+video_prediction_tools/Zam347_scripts/**
+!video_prediction_tools/Zam347_scripts/*_template.sh
  
diff --git a/video_prediction_savp/HPC_scripts/hyperparam_setup.sh b/video_prediction_savp/HPC_scripts/hyperparam_setup.sh
deleted file mode 100644
index 34894da8c3e345955c05b69994f4f3cf431174ff..0000000000000000000000000000000000000000
--- a/video_prediction_savp/HPC_scripts/hyperparam_setup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-# for choosing the model convLSTM vae mcnet savp
-export model=convLSTM
-export model_hparams=../hparams/era5/${model}/model_hparams.json
-
-#create a subfolder with create time and user names, which can be consider as hyperparameter tunning folder. This can avoid overwrite the prevoius trained model using differ#ent hypermeters 
-export hyperdir="$(date +"%Y%m%dT%H%M")_"$USER""
-
-echo "model: ${model}"
-echo "hparams: ${model_hparams}"
-echo "experiment dir: ${hyperdir}"
diff --git a/video_prediction_savp/HPC_scripts/reset_dirs.sh b/video_prediction_savp/HPC_scripts/reset_dirs.sh
deleted file mode 100644
index 8de5247e044150d1c01eccfa512b9ae1c0e4cdfa..0000000000000000000000000000000000000000
--- a/video_prediction_savp/HPC_scripts/reset_dirs.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env bash
-
-sed -i "s|source_dir=.*|source_dir=${SAVE_DIR}preprocessedData/|g" DataPreprocess_to_tf.sh
-sed -i "s|destination_dir=.*|destination_dir=${SAVE_DIR}preprocessedData/|g" DataPreprocess_to_tf.sh
-
-sed -i "s|source_dir=.*|source_dir=${SAVE_DIR}preprocessedData/|g" train_era5.sh
-sed -i "s|destination_dir=.*|destination_dir=${SAVE_DIR}models/|g" train_era5.sh
-
-sed -i "s|source_dir=.*|source_dir=${SAVE_DIR}preprocessedData/|g" generate_era5.sh
-sed -i "s|checkpoint_dir=.*|checkpoint_dir=${SAVE_DIR}models/|g" generate_era5.sh
-sed -i "s|results_dir=.*|results_dir=${SAVE_DIR}results/|g" generate_era5.sh
diff --git a/video_prediction_savp/bash/download_and_preprocess_dataset.sh b/video_prediction_savp/bash/download_and_preprocess_dataset.sh
deleted file mode 100644
index 5779c2b7ff79f84ccb52e1e44cb7c0cd0d4ee154..0000000000000000000000000000000000000000
--- a/video_prediction_savp/bash/download_and_preprocess_dataset.sh
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env bash
-
-# exit if any command fails
-set -e
-
-if [ "$#" -eq 2 ]; then
-  if [ $1 = "bair" ]; then
-    echo "IMAGE_SIZE argument is only applicable to kth dataset" >&2
-    exit 1
-  fi
-elif [ "$#" -ne 1 ]; then
-  echo "Usage: $0 DATASET_NAME [IMAGE_SIZE]" >&2
-  exit 1
-fi
-if [ $1 = "bair" ]; then
-  TARGET_DIR=./data/bair
-  mkdir -p ${TARGET_DIR}
-  TAR_FNAME=bair_robot_pushing_dataset_v0.tar
-  URL=http://rail.eecs.berkeley.edu/datasets/${TAR_FNAME}
-  echo "Downloading '$1' dataset (this takes a while)"
-  #wget ${URL} -O ${TARGET_DIR}/${TAR_FNAME} Bing: on MacOS system , use curl instead of wget
-  curl ${URL} -O ${TARGET_DIR}/${TAR_FNAME}
-  tar -xvf ${TARGET_DIR}/${TAR_FNAME} --strip-components=1 -C ${TARGET_DIR}
-  rm ${TARGET_DIR}/${TAR_FNAME}
-  mkdir -p ${TARGET_DIR}/val
-  # reserve a fraction of the training set for validation
-  mv ${TARGET_DIR}/train/traj_256_to_511.tfrecords ${TARGET_DIR}/val/
-elif [ $1 = "kth" ]; then
-  if [ "$#" -eq 2 ]; then
-    IMAGE_SIZE=$2
-    TARGET_DIR=./data/kth_${IMAGE_SIZE}
-  else
-    IMAGE_SIZE=64
-    TARGET_DIR=./data/kth
-  fi
-  echo ${TARGET_DIR} ${IMAGE_SIZE}
-  mkdir -p ${TARGET_DIR}
-  mkdir -p ${TARGET_DIR}/raw
-  echo "Downloading '$1' dataset (this takes a while)"
-  # TODO Bing: for save time just use walking, need to change back if all the data are needed
-  #for ACTION in walking jogging running boxing handwaving handclapping; do
-#  for ACTION in walking; do
-#    echo "Action: '$ACTION' "
-#    ZIP_FNAME=${ACTION}.zip
-#    URL=http://www.nada.kth.se/cvap/actions/${ZIP_FNAME}
-#   # wget ${URL} -O ${TARGET_DIR}/raw/${ZIP_FNAME}
-#    echo "Start downloading action '$ACTION' ULR '$URL' "
-#    curl ${URL} -O ${TARGET_DIR}/raw/${ZIP_FNAME}
-#    unzip ${TARGET_DIR}/raw/${ZIP_FNAME} -d ${TARGET_DIR}/raw/${ACTION}
-#    echo "Action '$ACTION' data download and unzip "
-#  done
-  FRAME_RATE=25
-#  mkdir -p ${TARGET_DIR}/processed
-#  # download files with metadata specifying the subsequences
-#  TAR_FNAME=kth_meta.tar.gz
-#  URL=http://rail.eecs.berkeley.edu/models/savp/data/${TAR_FNAME}
-#  echo "Downloading '${TAR_FNAME}' ULR '$URL' "
-#  #wget ${URL} -O ${TARGET_DIR}/processed/${TAR_FNAME}
-#  curl ${URL} -O ${TARGET_DIR}/processed/${TAR_FNAME}
-#  tar -xzvf ${TARGET_DIR}/processed/${TAR_FNAME} --strip 1 -C ${TARGET_DIR}/processed
-  # convert the videos into sequence of downscaled images
-  echo "Processing '$1' dataset"
-  #TODO Bing, just use walking for test
-  #for ACTION in walking jogging running boxing handwaving handclapping; do
-  #Todo Bing: remove the comments below after testing
-  for ACTION in walking running; do
-    for VIDEO_FNAME in ${TARGET_DIR}/raw/${ACTION}/*.avi; do
-      FNAME=$(basename ${VIDEO_FNAME})
-      FNAME=${FNAME%_uncomp.avi}
-      echo "FNAME '$FNAME' "
-      # sometimes the directory is not created, so try until it is
-      while [ ! -d "${TARGET_DIR}/processed/${ACTION}/${FNAME}" ]; do
-        mkdir -p ${TARGET_DIR}/processed/${ACTION}/${FNAME}
-      done
-      ffmpeg -i ${VIDEO_FNAME} -r ${FRAME_RATE} -f image2 -s ${IMAGE_SIZE}x${IMAGE_SIZE} \
-      ${TARGET_DIR}/processed/${ACTION}/${FNAME}/image-%03d_${IMAGE_SIZE}x${IMAGE_SIZE}.png
-    done
-  done
-  python video_prediction/datasets/kth_dataset.py ${TARGET_DIR}/processed ${TARGET_DIR} ${IMAGE_SIZE}
-  rm -rf ${TARGET_DIR}/raw
-  rm -rf ${TARGET_DIR}/processed
-else
-  echo "Invalid dataset name: '$1' (choose from 'bair', 'kth')" >&2
-  exit 1
-fi
-echo "Succesfully finished downloadi\
-
-ng and preprocessing dataset '$1'"
diff --git a/video_prediction_savp/bash/download_and_preprocess_dataset_era5.sh b/video_prediction_savp/bash/download_and_preprocess_dataset_era5.sh
deleted file mode 100644
index eacc01801b5e323ea8da8d7adc97c8156172fd7b..0000000000000000000000000000000000000000
--- a/video_prediction_savp/bash/download_and_preprocess_dataset_era5.sh
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/env bash
-
-# exit if any command fails
-set -e
-
-
-#if [ "$#" -eq 2 ]; then
-#  if [ $1 = "bair" ]; then
-#    echo "IMAGE_SIZE argument is only applicable to kth dataset" >&2
-#    exit 1
-#  fi
-#elif [ "$#" -ne 1 ]; then
-#  echo "Usage: $0 DATASET_NAME [IMAGE_SIZE]" >&2
-#  exit 1
-#fi
-#if [ $1 = "bair" ]; then
-#  TARGET_DIR=./data/bair
-#  mkdir -p ${TARGET_DIR}
-#  TAR_FNAME=bair_robot_pushing_dataset_v0.tar
-#  URL=http://rail.eecs.berkeley.edu/datasets/${TAR_FNAME}
-#  echo "Downloading '$1' dataset (this takes a while)"
-#  #wget ${URL} -O ${TARGET_DIR}/${TAR_FNAME} Bing: on MacOS system , use curl instead of wget
-#  curl ${URL} -O ${TARGET_DIR}/${TAR_FNAME}
-#  tar -xvf ${TARGET_DIR}/${TAR_FNAME} --strip-components=1 -C ${TARGET_DIR}
-#  rm ${TARGET_DIR}/${TAR_FNAME}
-#  mkdir -p ${TARGET_DIR}/val
-#  # reserve a fraction of the training set for validation
-#  mv ${TARGET_DIR}/train/traj_256_to_511.tfrecords ${TARGET_DIR}/val/
-#elif [ $1 = "kth" ]; then
-#  if [ "$#" -eq 2 ]; then
-#    IMAGE_SIZE=$2
-#    TARGET_DIR=./data/kth_${IMAGE_SIZE}
-#  else
-#    IMAGE_SIZE=64
-#  fi
-#  echo ${TARGET_DIR} ${IMAGE_SIZE}
-#  mkdir -p ${TARGET_DIR}
-#  mkdir -p ${TARGET_DIR}/raw
-#  echo "Downloading '$1' dataset (this takes a while)"
-  # TODO Bing: for save time just use walking, need to change back if all the data are needed
-  #for ACTION in walking jogging running boxing handwaving handclapping; do
-#  for ACTION in walking; do
-#    echo "Action: '$ACTION' "
-#    ZIP_FNAME=${ACTION}.zip
-#    URL=http://www.nada.kth.se/cvap/actions/${ZIP_FNAME}
-#   # wget ${URL} -O ${TARGET_DIR}/raw/${ZIP_FNAME}
-#    echo "Start downloading action '$ACTION' ULR '$URL' "
-#    curl ${URL} -O ${TARGET_DIR}/raw/${ZIP_FNAME}
-#    unzip ${TARGET_DIR}/raw/${ZIP_FNAME} -d ${TARGET_DIR}/raw/${ACTION}
-#    echo "Action '$ACTION' data download and unzip "
-#  done
-#  FRAME_RATE=25
-#  mkdir -p ${TARGET_DIR}/processed
-#  # download files with metadata specifying the subsequences
-#  TAR_FNAME=kth_meta.tar.gz
-#  URL=http://rail.eecs.berkeley.edu/models/savp/data/${TAR_FNAME}
-#  echo "Downloading '${TAR_FNAME}' ULR '$URL' "
-#  #wget ${URL} -O ${TARGET_DIR}/processed/${TAR_FNAME}
-#  curl ${URL} -O ${TARGET_DIR}/processed/${TAR_FNAME}
-#  tar -xzvf ${TARGET_DIR}/processed/${TAR_FNAME} --strip 1 -C ${TARGET_DIR}/processed
-  # convert the videos into sequence of downscaled images
-#  echo "Processing '$1' dataset"
-#  #TODO Bing, just use walking for test
-#  #for ACTION in walking jogging running boxing handwaving handclapping; do
-#  #Todo Bing: remove the comments below after testing
-#  for ACTION in walking; do
-#    for VIDEO_FNAME in ${TARGET_DIR}/raw/${ACTION}/*.avi; do
-#      FNAME=$(basename ${VIDEO_FNAME})
-#      FNAME=${FNAME%_uncomp.avi}
-#      echo "FNAME '$FNAME' "
-#      # sometimes the directory is not created, so try until it is
-#      while [ ! -d "${TARGET_DIR}/processed/${ACTION}/${FNAME}" ]; do
-#        mkdir -p ${TARGET_DIR}/processed/${ACTION}/${FNAME}
-#      done
-#      ffmpeg -i ${VIDEO_FNAME} -r ${FRAME_RATE} -f image2 -s ${IMAGE_SIZE}x${IMAGE_SIZE} \
-#      ${TARGET_DIR}/processed/${ACTION}/${FNAME}/image-%03d_${IMAGE_SIZE}x${IMAGE_SIZE}.png
-#    done
-#  done
-#  python video_prediction/datasets/kth_dataset.py ${TARGET_DIR}/processed ${TARGET_DIR} ${IMAGE_SIZE}
-#  rm -rf ${TARGET_DIR}/raw
-#  rm -rf ${TARGET_DIR}/processed
-
-while [[ $# -gt 0 ]] #of the number of passed argument is greater than 0
-do
-key="$1"
-case $key in
-    -d|--data)
-    DATA="$2"
-    shift
-    shift
-    ;;
-    -i|--input_dir)
-    INPUT_DIR="$2"
-    shift
-    shift
-    ;;
-    -o|--output_dir)
-    OUTPUT_DIR="$2"
-    shift
-    shift
-    ;;
-esac
-done
-
-echo "DATA  = ${DATA} "
-
-echo "OUTPUT_DIRECTORY = ${OUTPUT_DIR}"
-
-if [ -d $INPUT_DIR ]; then
-    echo "INPUT DIRECTORY = ${INPUT_DIR}"
-
-else
-    echo "INPUT DIRECTORY '$INPUT_DIR' DOES NOT EXIST"
-    exit 1
-fi
-
-
-if [ $DATA = "era5" ]; then
-
-  mkdir -p ${OUTPUT_DIR}
-  python video_prediction/datasets/era5_dataset.py $INPUT_DIR  ${OUTPUT_DIR}
-else
-  echo "dataset name: '$DATA' (choose from 'era5')" >&2
-  exit 1
-fi
-
-echo "Succesfully finished downloading and preprocessing dataset '$DATA' "
\ No newline at end of file
diff --git a/video_prediction_savp/bash/download_and_preprocess_dataset_v1.sh b/video_prediction_savp/bash/download_and_preprocess_dataset_v1.sh
deleted file mode 100644
index 3541b4a538c089cd79ea2a39c6df0804e11cb0a6..0000000000000000000000000000000000000000
--- a/video_prediction_savp/bash/download_and_preprocess_dataset_v1.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env bash
-
-# exit if any command fails
-set -e
-
-if [ "$#" -eq 2 ]; then
-  if [ $1 = "bair" ]; then
-    echo "IMAGE_SIZE argument is only applicable to kth dataset" >&2
-    exit 1
-  fi
-elif [ "$#" -ne 1 ]; then
-  echo "Usage: $0 DATASET_NAME [IMAGE_SIZE]" >&2
-  exit 1
-fi
-if [ $1 = "bair" ]; then
-  TARGET_DIR=./data/bair
-  mkdir -p ${TARGET_DIR}
-  TAR_FNAME=bair_robot_pushing_dataset_v0.tar
-  URL=http://rail.eecs.berkeley.edu/datasets/${TAR_FNAME}
-  echo "Downloading '$1' dataset (this takes a while)"
-  #wget ${URL} -O ${TARGET_DIR}/${TAR_FNAME} Bing: on MacOS system , use curl instead of wget
-  curl ${URL} -O ${TARGET_DIR}/${TAR_FNAME}
-  tar -xvf ${TARGET_DIR}/${TAR_FNAME} --strip-components=1 -C ${TARGET_DIR}
-  rm ${TARGET_DIR}/${TAR_FNAME}
-  mkdir -p ${TARGET_DIR}/val
-  # reserve a fraction of the training set for validation
-  mv ${TARGET_DIR}/train/traj_256_to_511.tfrecords ${TARGET_DIR}/val/
-elif [ $1 = "kth" ]; then
-  if [ "$#" -eq 2 ]; then
-    IMAGE_SIZE=$2
-    TARGET_DIR=./data/kth_${IMAGE_SIZE}
-  else
-    IMAGE_SIZE=64
-    TARGET_DIR=./data/kth
-  fi
-  echo ${TARGET_DIR} ${IMAGE_SIZE}
-  mkdir -p ${TARGET_DIR}
-  mkdir -p ${TARGET_DIR}/raw
-  echo "Downloading '$1' dataset (this takes a while)"
-  # TODO Bing: for save time just use walking, need to change back if all the data are needed
-  #for ACTION in walking jogging running boxing handwaving handclapping; do
-#  for ACTION in walking; do
-#    echo "Action: '$ACTION' "
-#    ZIP_FNAME=${ACTION}.zip
-#    URL=http://www.nada.kth.se/cvap/actions/${ZIP_FNAME}
-#   # wget ${URL} -O ${TARGET_DIR}/raw/${ZIP_FNAME}
-#    echo "Start downloading action '$ACTION' ULR '$URL' "
-#    curl ${URL} -O ${TARGET_DIR}/raw/${ZIP_FNAME}
-#    unzip ${TARGET_DIR}/raw/${ZIP_FNAME} -d ${TARGET_DIR}/raw/${ACTION}
-#    echo "Action '$ACTION' data download and unzip "
-#  done
-  FRAME_RATE=25
-#  mkdir -p ${TARGET_DIR}/processed
-#  # download files with metadata specifying the subsequences
-#  TAR_FNAME=kth_meta.tar.gz
-#  URL=http://rail.eecs.berkeley.edu/models/savp/data/${TAR_FNAME}
-#  echo "Downloading '${TAR_FNAME}' ULR '$URL' "
-#  #wget ${URL} -O ${TARGET_DIR}/processed/${TAR_FNAME}
-#  curl ${URL} -O ${TARGET_DIR}/processed/${TAR_FNAME}
-#  tar -xzvf ${TARGET_DIR}/processed/${TAR_FNAME} --strip 1 -C ${TARGET_DIR}/processed
-  # convert the videos into sequence of downscaled images
-  echo "Processing '$1' dataset"
-  #TODO Bing, just use walking for test
-  #for ACTION in walking jogging running boxing handwaving handclapping; do
-  #Todo Bing: remove the comments below after testing
-  for ACTION in walking; do
-    for VIDEO_FNAME in ${TARGET_DIR}/raw/${ACTION}/*.avi; do
-      FNAME=$(basename ${VIDEO_FNAME})
-      FNAME=${FNAME%_uncomp.avi}
-      echo "FNAME '$FNAME' "
-      # sometimes the directory is not created, so try until it is
-      while [ ! -d "${TARGET_DIR}/processed/${ACTION}/${FNAME}" ]; do
-        mkdir -p ${TARGET_DIR}/processed/${ACTION}/${FNAME}
-      done
-      ffmpeg -i ${VIDEO_FNAME} -r ${FRAME_RATE} -f image2 -s ${IMAGE_SIZE}x${IMAGE_SIZE} \
-      ${TARGET_DIR}/processed/${ACTION}/${FNAME}/image-%03d_${IMAGE_SIZE}x${IMAGE_SIZE}.png
-    done
-  done
-  python video_prediction/datasets/kth_dataset.py ${TARGET_DIR}/processed ${TARGET_DIR} ${IMAGE_SIZE}
-  rm -rf ${TARGET_DIR}/raw
-  rm -rf ${TARGET_DIR}/processed
-else
-  echo "Invalid dataset name: '$1' (choose from 'bair', 'kth')" >&2
-  exit 1
-fi
-echo "Succesfully finished downloading and preprocessing dataset '$1'"
diff --git a/video_prediction_savp/bash/workflow_era5.sh b/video_prediction_savp/bash/workflow_era5.sh
deleted file mode 100755
index 01d16bfdf7f38ffe00495ba31f85349d9ce68335..0000000000000000000000000000000000000000
--- a/video_prediction_savp/bash/workflow_era5.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env bash
-set -e
-#
-#MODEL=savp
-##train_mode: end_to_end, pre_trained
-#TRAIN_MODE=end_to_end
-#EXP_NAME=era5_size_64_64_3_3t_norm
-
-MODEL=$1
-TRAIN_MODE=$2
-EXP_NAME=$3
-RETRAIN=1 #if we continue training the model or using the existing end-to-end model, 1 means continue training, and 1 means use the existing one
-DATA_ETL_DIR=/p/scratch/deepacf/${USER}/
-DATA_EXTRA_DIR=${DATA_ETL_DIR}/extractedData/${EXP_NAME}
-DATA_PREPROCESS_DIR=${DATA_ETL_DIR}/preprocessedData/${EXP_NAME}
-DATA_PREPROCESS_TF_DIR=./data/${EXP_NAME}
-RESULTS_OUTPUT_DIR=./results_test_samples/${EXP_NAME}/${TRAIN_MODE}/
-
-if [ $MODEL==savp ]; then
-    method_dir=ours_savp
-elif [ $MODEL==gan ]; then
-    method_dir=ours_gan
-elif [ $MODEL==vae ]; then
-    method_dir=ours_vae
-else
-    echo "model does not exist" 2>&1
-    exit 1
-fi
-
-if [ "$TRAIN_MODE" == pre_trained ]; then
-    TRAIN_OUTPUT_DIR=./pretrained_models/kth/${method_dir}
-else
-    TRAIN_OUTPUT_DIR=./logs/${EXP_NAME}/${TRAIN_MODE}
-fi
-
-CHECKPOINT_DIR=${TRAIN_OUTPUT_DIR}/${method_dir}
-
-echo "===========================WORKFLOW SETUP===================="
-echo "Model ${MODEL}"
-echo "TRAIN MODE ${TRAIN_MODE}"
-echo "Method_dir ${method_dir}"
-echo "DATA_ETL_DIR ${DATA_ETL_DIR}"
-echo "DATA_EXTRA_DIR ${DATA_EXTRA_DIR}"
-echo "DATA_PREPROCESS_DIR ${DATA_PREPROCESS_DIR}"
-echo "DATA_PREPROCESS_TF_DIR ${DATA_PREPROCESS_TF_DIR}"
-echo "TRAIN_OUTPUT_DIR ${TRAIN_OUTPUT_DIR}"
-echo "============================================================="
-
-##############Datat Preprocessing################
-#To hkl data
-if [ -d "$DATA_PREPROCESS_DIR" ]; then
-    echo "The Preprocessed Data (.hkl ) exist"
-else
-    python ../workflow_video_prediction/DataPreprocess/benchmark/mpi_stager_v2_process_netCDF.py \
-    --input_dir ${DATA_EXTRA_DIR} --destination_dir ${DATA_PREPROCESS_DIR}
-fi
-
-#Change the .hkl data to .tfrecords files
-if [ -d "$DATA_PREPROCESS_TF_DIR" ]
-then
-    echo "Step2: The Preprocessed Data (tf.records) exist"
-else
-    echo "Step2: start, hkl. files to tf.records"
-    python ./video_prediction/datasets/era5_dataset_v2.py  --source_dir ${DATA_PREPROCESS_DIR}/splits \
-    --destination_dir ${DATA_PREPROCESS_TF_DIR}
-    echo "Step2: finish"
-fi
-
-#########Train##########################
-if [ "$TRAIN_MODE" == "pre_trained" ]; then
-    echo "step3: Using kth pre_trained model"
-elif [ "$TRAIN_MODE" == "end_to_end" ]; then
-    echo "step3: End-to-end training"
-    if [ "$RETRAIN" == 1 ]; then
-        echo "Using the existing end-to-end model"
-    else
-        echo "Step3: Training Starts "
-        python ./scripts/train_v2.py --input_dir $DATA_PREPROCESS_TF_DIR --dataset era5  \
-        --model ${MODEL} --model_hparams_dict hparams/kth/${method_dir}/model_hparams.json \
-        --output_dir ${TRAIN_OUTPUT_DIR} --checkpoint ${CHECKPOINT_DIR_DIR}
-        echo "Training ends "
-    fi
-else
-    echo "TRAIN_MODE is end_to_end or pre_trained"
-    exit 1
-fi
-
-#########Generate results#################
-echo "Step4: Postprocessing start"
-python ./scripts/generate_transfer_learning_finetune.py --input_dir ${DATA_PREPROCESS_TF_DIR} \
---dataset_hparams sequence_length=20 --checkpoint ${CHECKPOINT_DIR_DIR} --mode test --results_dir ${RESULTS_OUTPUT_DIR} \
---batch_size 4 --dataset era5
\ No newline at end of file
diff --git a/video_prediction_savp/bash/workflow_era5_macOS.sh b/video_prediction_savp/bash/workflow_era5_macOS.sh
deleted file mode 100755
index 1a6ebef38df877b8ee20f628d4e375a20e7c8bd5..0000000000000000000000000000000000000000
--- a/video_prediction_savp/bash/workflow_era5_macOS.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/env bash
-set -e
-#
-#MODEL=savp
-##train_mode: end_to_end, pre_trained
-#TRAIN_MODE=end_to_end
-#EXP_NAME=era5_size_64_64_3_3t_norm
-
-MODEL=$1
-TRAIN_MODE=$2
-EXP_NAME=$3
-RETRAIN=1 #if we continue training the model or using the existing end-to-end model, 1 means continue training, and 1 means use the existing one
-DATA_ETL_DIR=/p/scratch/deepacf/${USER}/
-DATA_ETL_DIR=/p/scratch/deepacf/${USER}/
-DATA_EXTRA_DIR=${DATA_ETL_DIR}/extractedData/${EXP_NAME}
-DATA_PREPROCESS_DIR=${DATA_ETL_DIR}/preprocessedData/${EXP_NAME}
-DATA_PREPROCESS_TF_DIR=./data/${EXP_NAME}
-RESULTS_OUTPUT_DIR=./results_test_samples/${EXP_NAME}/${TRAIN_MODE}/
-
-if [ $MODEL==savp ]; then
-    method_dir=ours_savp
-elif [ $MODEL==gan ]; then
-    method_dir=ours_gan
-elif [ $MODEL==vae ]; then
-    method_dir=ours_vae
-else
-    echo "model does not exist" 2>&1
-    exit 1
-fi
-
-if [ "$TRAIN_MODE" == pre_trained ]; then
-    TRAIN_OUTPUT_DIR=./pretrained_models/kth/${method_dir}
-else
-    TRAIN_OUTPUT_DIR=./logs/${EXP_NAME}/${TRAIN_MODE}
-fi
-
-CHECKPOINT_DIR=${TRAIN_OUTPUT_DIR}/${method_dir}
-
-echo "===========================WORKFLOW SETUP===================="
-echo "Model ${MODEL}"
-echo "TRAIN MODE ${TRAIN_MODE}"
-echo "Method_dir ${method_dir}"
-echo "DATA_ETL_DIR ${DATA_ETL_DIR}"
-echo "DATA_EXTRA_DIR ${DATA_EXTRA_DIR}"
-echo "DATA_PREPROCESS_DIR ${DATA_PREPROCESS_DIR}"
-echo "DATA_PREPROCESS_TF_DIR ${DATA_PREPROCESS_TF_DIR}"
-echo "TRAIN_OUTPUT_DIR ${TRAIN_OUTPUT_DIR}"
-echo "============================================================="
-
-##############Datat Preprocessing################
-#To hkl data
-#if [ -d "$DATA_PREPROCESS_DIR" ]; then
-#    echo "The Preprocessed Data (.hkl ) exist"
-#else
-#    python ../workflow_video_prediction/DataPreprocess/benchmark/mpi_stager_v2_process_netCDF.py \
-#    --input_dir ${DATA_EXTRA_DIR} --destination_dir ${DATA_PREPROCESS_DIR}
-#fi
-
-####Change the .hkl data to .tfrecords files
-if [ -d "$DATA_PREPROCESS_TF_DIR" ]
-then
-    echo "Step2: The Preprocessed Data (tf.records) exist"
-else
-    echo "Step2: start, hkl. files to tf.records"
-    python ./video_prediction/datasets/era5_dataset_v2.py  --source_dir ${DATA_PREPROCESS_DIR}/splits \
-    --destination_dir ${DATA_PREPROCESS_TF_DIR}
-    echo "Step2: finish"
-fi
-
-#########Train##########################
-if [ "$TRAIN_MODE" == "pre_trained" ]; then
-    echo "step3: Using kth pre_trained model"
-elif [ "$TRAIN_MODE" == "end_to_end" ]; then
-    echo "step3: End-to-end training"
-    if [ "$RETRAIN" == 1 ]; then
-        echo "Using the existing end-to-end model"
-    else
-        echo "Training Starts "
-        python ./scripts/train_v2.py --input_dir $DATA_PREPROCESS_TF_DIR --dataset era5  \
-        --model ${MODEL} --model_hparams_dict hparams/kth/${method_dir}/model_hparams.json \
-        --output_dir ${TRAIN_OUTPUT_DIR} --checkpoint ${CHECKPOINT_DIR}
-        echo "Training ends "
-    fi
-else
-    echo "TRAIN_MODE is end_to_end or pre_trained"
-    exit 1
-fi
-
-#########Generate results#################
-echo "Step4: Postprocessing start"
-python ./scripts/generate_transfer_learning_finetune.py --input_dir ${DATA_PREPROCESS_TF_DIR} \
---dataset_hparams sequence_length=20 --checkpoint ${CHECKPOINT_DIR} --mode test --results_dir ${RESULTS_OUTPUT_DIR} \
---batch_size 4 --dataset era5
diff --git a/video_prediction_savp/bash/workflow_era5_zam347.sh b/video_prediction_savp/bash/workflow_era5_zam347.sh
deleted file mode 100755
index ffe7209b6099f4ad9f57b4e90247a7d7acaf009d..0000000000000000000000000000000000000000
--- a/video_prediction_savp/bash/workflow_era5_zam347.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/env bash
-set -e
-#
-#MODEL=savp
-##train_mode: end_to_end, pre_trained
-#TRAIN_MODE=end_to_end
-#EXP_NAME=era5_size_64_64_3_3t_norm
-
-MODEL=$1
-TRAIN_MODE=$2
-EXP_NAME=$3
-RETRAIN=1 #if we continue training the model or using the existing end-to-end model, 1 means continue training, and 1 means use the existing one
-DATA_ETL_DIR=/home/${USER}/
-DATA_ETL_DIR=/p/scratch/deepacf/${USER}/
-DATA_EXTRA_DIR=${DATA_ETL_DIR}/extractedData/${EXP_NAME}
-DATA_PREPROCESS_DIR=${DATA_ETL_DIR}/preprocessedData/${EXP_NAME}
-DATA_PREPROCESS_TF_DIR=./data/${EXP_NAME}
-RESULTS_OUTPUT_DIR=./results_test_samples/${EXP_NAME}/${TRAIN_MODE}/
-
-if [ $MODEL==savp ]; then
-    method_dir=ours_savp
-elif [ $MODEL==gan ]; then
-    method_dir=ours_gan
-elif [ $MODEL==vae ]; then
-    method_dir=ours_vae
-else
-    echo "model does not exist" 2>&1
-    exit 1
-fi
-
-if [ "$TRAIN_MODE" == pre_trained ]; then
-    TRAIN_OUTPUT_DIR=./pretrained_models/kth/${method_dir}
-else
-    TRAIN_OUTPUT_DIR=./logs/${EXP_NAME}/${TRAIN_MODE}
-fi
-
-CHECKPOINT_DIR=${TRAIN_OUTPUT_DIR}/${method_dir}
-
-echo "===========================WORKFLOW SETUP===================="
-echo "Model ${MODEL}"
-echo "TRAIN MODE ${TRAIN_MODE}"
-echo "Method_dir ${method_dir}"
-echo "DATA_ETL_DIR ${DATA_ETL_DIR}"
-echo "DATA_EXTRA_DIR ${DATA_EXTRA_DIR}"
-echo "DATA_PREPROCESS_DIR ${DATA_PREPROCESS_DIR}"
-echo "DATA_PREPROCESS_TF_DIR ${DATA_PREPROCESS_TF_DIR}"
-echo "TRAIN_OUTPUT_DIR ${TRAIN_OUTPUT_DIR}"
-echo "============================================================="
-
-##############Datat Preprocessing################
-#To hkl data
-#if [ -d "$DATA_PREPROCESS_DIR" ]; then
-#    echo "The Preprocessed Data (.hkl ) exist"
-#else
-#    python ../workflow_video_prediction/DataPreprocess/benchmark/mpi_stager_v2_process_netCDF.py \
-#    --input_dir ${DATA_EXTRA_DIR} --destination_dir ${DATA_PREPROCESS_DIR}
-#fi
-
-####Change the .hkl data to .tfrecords files
-if [ -d "$DATA_PREPROCESS_TF_DIR" ]
-then
-    echo "Step2: The Preprocessed Data (tf.records) exist"
-else
-    echo "Step2: start, hkl. files to tf.records"
-    python ./video_prediction/datasets/era5_dataset_v2.py  --source_dir ${DATA_PREPROCESS_DIR}/splits \
-    --destination_dir ${DATA_PREPROCESS_TF_DIR}
-    echo "Step2: finish"
-fi
-
-#########Train##########################
-if [ "$TRAIN_MODE" == "pre_trained" ]; then
-    echo "step3: Using kth pre_trained model"
-elif [ "$TRAIN_MODE" == "end_to_end" ]; then
-    echo "step3: End-to-end training"
-    if [ "$RETRAIN" == 1 ]; then
-        echo "Using the existing end-to-end model"
-    else
-        echo "Training Starts "
-        python ./scripts/train_v2.py --input_dir $DATA_PREPROCESS_TF_DIR --dataset era5  \
-        --model ${MODEL} --model_hparams_dict hparams/kth/${method_dir}/model_hparams.json \
-        --output_dir ${TRAIN_OUTPUT_DIR} --checkpoint ${CHECKPOINT_DIR}
-        echo "Training ends "
-    fi
-else
-    echo "TRAIN_MODE is end_to_end or pre_trained"
-    exit 1
-fi
-
-#########Generate results#################
-echo "Step4: Postprocessing start"
-python ./scripts/generate_transfer_learning_finetune.py --input_dir ${DATA_PREPROCESS_TF_DIR} \
---dataset_hparams sequence_length=20 --checkpoint ${CHECKPOINT_DIR} --mode test --results_dir ${RESULTS_OUTPUT_DIR} \
---batch_size 4 --dataset era5
diff --git a/video_prediction_savp/docs/presentation/presentation.md b/video_prediction_savp/docs/presentation/presentation.md
deleted file mode 100644
index d49239089d5d881ef7a42e5e847ee45c3be725d4..0000000000000000000000000000000000000000
--- a/video_prediction_savp/docs/presentation/presentation.md
+++ /dev/null
@@ -1,5 +0,0 @@
-This is the presentation materials for VP group
-
-
-## 2020-03-01 - 2020-03-31
-https://docs.google.com/presentation/d/18EJKBJJ2LHI7uNU_l8s_Cm-aGZhw9tkoQ8BxqYZfkWk/edit#slide=id.g71f805bc32_0_80
diff --git a/video_prediction_savp/geo_info.json b/video_prediction_savp/geo_info.json
deleted file mode 100644
index 911a7c3b1333c4e815db705197cf77cb107de8a8..0000000000000000000000000000000000000000
--- a/video_prediction_savp/geo_info.json
+++ /dev/null
@@ -1 +0,0 @@
-{"lat": [58.19999694824219, 57.89999771118164, 57.599998474121094, 57.29999923706055, 57.0, 56.69999694824219, 56.39999771118164, 56.099998474121094, 55.79999923706055, 55.5, 55.19999694824219, 54.89999771118164, 54.599998474121094, 54.29999923706055, 54.0, 53.69999694824219, 53.39999771118164, 53.099998474121094, 52.79999923706055, 52.5, 52.19999694824219, 51.89999771118164, 51.599998474121094, 51.29999923706055, 51.0, 50.69999694824219, 50.39999771118164, 50.099998474121094, 49.79999923706055, 49.5, 49.19999694824219, 48.89999771118164, 48.599998474121094, 48.29999923706055, 48.0, 47.69999694824219, 47.39999771118164, 47.099998474121094, 46.79999923706055, 46.5, 46.19999694824219, 45.89999771118164, 45.599998474121094, 45.29999923706055, 45.0, 44.69999694824219, 44.39999771118164, 44.099998474121094, 43.79999923706055, 43.5, 43.19999694824219, 42.89999771118164, 42.599998474121094, 42.29999923706055, 42.0, 41.69999694824219, 41.39999771118164, 41.099998474121094, 40.79999923706055, 40.499996185302734, 40.19999694824219, 39.89999771118164, 39.599998474121094, 39.29999923706055], "lon": [-0.5999755859375, -0.29998779296875, 0.0, 0.30000001192092896, 0.6000000238418579, 0.9000000357627869, 1.2000000476837158, 1.5, 1.8000000715255737, 2.1000001430511475, 2.4000000953674316, 2.700000047683716, 3.0, 3.3000001907348633, 3.6000001430511475, 3.9000000953674316, 4.200000286102295, 4.5, 4.800000190734863, 5.100000381469727, 5.400000095367432, 5.700000286102295, 6.0, 6.300000190734863, 6.600000381469727, 6.900000095367432, 7.200000286102295, 7.500000476837158, 7.800000190734863, 8.100000381469727, 8.40000057220459, 8.700000762939453, 9.0, 9.300000190734863, 9.600000381469727, 9.90000057220459, 10.200000762939453, 10.5, 10.800000190734863, 11.100000381469727, 11.40000057220459, 11.700000762939453, 12.0, 12.300000190734863, 12.600000381469727, 12.90000057220459, 13.200000762939453, 13.500000953674316, 13.800000190734863, 14.100000381469727, 14.40000057220459, 14.700000762939453, 15.000000953674316, 15.300000190734863, 15.600000381469727, 15.90000057220459, 16.200000762939453, 16.5, 16.80000114440918, 17.100000381469727, 17.400001525878906, 17.700000762939453, 18.0, 18.30000114440918]}
\ No newline at end of file
diff --git a/video_prediction_savp/scripts/Analysis_all.py b/video_prediction_savp/scripts/Analysis_all.py
deleted file mode 100644
index 7ed4b6b666634abafbd18af8517cf5b81b75bd61..0000000000000000000000000000000000000000
--- a/video_prediction_savp/scripts/Analysis_all.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import pickle
-import os
-from matplotlib.pylab import plt
-
-# results_path = ["results_test_samples/era5_size_64_64_3_norm_dup_pretrained/ours_savp","results_test_samples/era5_size_64_64_3_norm_dup_pretrained_finetune/ours_savp",
-#                "results_test_samples/era5_size_64_64_3_norm_dup_pretrained_gan/kth_ours_gan","results_test_samples/era5_size_64_64_3_norm_dup_pretrained_vae_l1/kth_ours_vae_l1"]
-#
-# model_names = ["SAVP","SAVP_Finetune","GAN","VAE"]
-
-
-# results_path = ["results_test_samples/era5_size_64_64_3_norm_dup_pretrained/ours_savp","results_test_samples/era5_size_64_64_3_norm_msl_gph_pretrained_savp/ours_savp",
-#                "results_test_samples/era5_size_64_64_3_norm_dup_pretrained_gan/kth_ours_gan","results_test_samples/era5_size_64_64_3_norm_msl_gph_pretrained_gan/kth_ours_gan"]
-#
-# model_names = ["SAVP_3T","SAVP_T-MSL-GPH","GAN_3T","GAN_T-MSL_GPH"]
-#
-# results_path = ["results_test_samples/era5_size_64_64_3_norm_dup_pretrained/ours_savp","results_test_samples/era5_size_64_64_3_norm_dup/ours_savp",
-#                 "results_test_samples/era5_size_64_64_3_norm_dup_pretrained/kth_ours_gan","results_test_samples/era5_size_64_64_3_norm_dup/ours_gan",
-#                 "results_test_samples/era5_size_64_64_3_norm_dup_pretrained/kth_ours_vae_l1","results_test_samples/era5_size_64_64_3_norm_dup/ours_vae_l1"]
-# model_names = ["TF-SAVP(KTH)","SAVP (3T)","TF-GAN(KTH)","GAN (3T)","TF-VAE (KTH)","VAE (3T)"]
-
-##
-##results_path = ["results_test_samples/era5_size_64_64_3_norm_t_msl_gph/ours_savp", "results_test_samples/era5_size_64_64_3_norm_dup/ours_savp",
-##                "results_test_samples/era5_size_64_64_3_norm_t_msl_gph/ours_gan","results_test_samples/era5_size_64_64_3_norm_dup/ours_gan"]
-##model_names = ["SAVP(T-MSL-GPH)", "SAVP (3T)", "GAN (T-MSL-GPH)","GAN (3T)"]
-
-##results_path = ["results_test_samples/era5_size_64_64_3_norm_t_msl_gph/ours_savp", "results_test_samples/era5_size_64_64_3_norm_dup/ours_savp",
-##                "results_test_samples/era5_size_64_64_3_norm_t_msl_gph/ours_gan","results_test_samples/era5_size_64_64_3_norm_dup/ours_gan"]
-##model_names = ["SAVP(T-MSL-GPH)", "SAVP (3T)", "GAN (T-MSL-GPH)","GAN (3T)"]
-##
-##mse_all = []
-##psnr_all = []
-##ssim_all = []
-##for path in results_path:
-##    p = os.path.join(path,"results.pkl")
-##    result = pickle.load(open(p,"rb"))
-##    mse = result["mse"]
-##    psnr = result["psnr"]
-##    ssim = result["ssim"]
-##    mse_all.append(mse)
-##    psnr_all.append(psnr)
-##    ssim_all.append(ssim)
-##
-##
-##def get_metric(metrtic):
-##    if metric == "mse":
-##        return mse_all
-##    elif metric == "psnr":
-##        return psnr_all
-##    elif metric == "ssim":
-##        return ssim_all
-##    else:
-##        raise("Metric error")
-##
-##for metric in ["mse","psnr","ssim"]:
-##    evals = get_metric(metric)
-##    timestamp = list(range(1,11))
-##    fig = plt.figure()
-##    plt.plot(timestamp, evals[0],'-.',label=model_names[0])
-##    plt.plot(timestamp, evals[1],'--',label=model_names[1])
-##    plt.plot(timestamp, evals[2],'-',label=model_names[2])
-##    plt.plot(timestamp, evals[3],'--.',label=model_names[3])
-##    # plt.plot(timestamp, evals[4],'*-.',label=model_names[4])
-##    # plt.plot(timestamp, evals[5],'--*',label=model_names[5])
-##    if metric == "mse":
-##        plt.legend(loc="upper left")
-##    else:
-##        plt.legend(loc = "upper right")
-##    plt.xlabel("Timestamps")
-##    plt.ylabel(metric)
-##    plt.title(metric,fontsize=15)
-##    plt.savefig(metric + "2.png")
-##    plt.clf()
-
-
-
-#persistent analysis
-persistent_mse_all = []
-persistent_psnr_all = []
-persistent_ssim_all = []
-mse_all = []
-psnr_all = []
-ssim_all = []
-results_root_path = "/p/scratch/deepacf/video_prediction_shared_folder/results/era5-Y2017M01to12-64x64-50d00N11d50E-T_T_T/ours_gan"
-p1 = os.path.join(results_root_path,"results.pkl")
-result1 = pickle.load(open(p1,"rb"))
-p2 = os.path.join(results_root_path,"persistent_results.pkl")
-result2 = pickle.load(open(p2,"rb"))
-mse = result1["mse"]
-psnr = result1["psnr"]
-ssim = result1["ssim"]
-mse_all.append(mse)
-psnr_all.append(psnr)
-ssim_all.append(ssim)
-
-persistent_mse = result2["mse"]
-persistent_psnr = result2["psnr"]
-persistent_ssim = result2["ssim"]
-persistent_mse_all.append(persistent_mse)
-persistent_psnr_all.append(persistent_psnr)
-persistent_ssim_all.append(persistent_ssim)
-
-
-
-print("persistent_mse",persistent_mse_all)
-print("mse",mse_all)
diff --git a/video_prediction_savp/scripts/evaluate_svg.sh b/video_prediction_savp/scripts/evaluate_svg.sh
deleted file mode 100644
index 212c4ba239ecdbd15c70c05b9336c32175dc8c5c..0000000000000000000000000000000000000000
--- a/video_prediction_savp/scripts/evaluate_svg.sh
+++ /dev/null
@@ -1 +0,0 @@
-#!/usr/bin/env bash
\ No newline at end of file
diff --git a/video_prediction_savp/scripts/generate.py b/video_prediction_savp/scripts/generate.py
deleted file mode 100644
index e1893194fe600981350a52e9880df9f6a034701d..0000000000000000000000000000000000000000
--- a/video_prediction_savp/scripts/generate.py
+++ /dev/null
@@ -1,537 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import argparse
-import errno
-import json
-import os
-import math
-import random
-import cv2
-import numpy as np
-import tensorflow as tf
-import seaborn as sns
-import pickle
-from random import seed
-import random
-import json
-import numpy as np
-#from six.moves import cPickle
-import matplotlib
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-import matplotlib.gridspec as gridspec
-import matplotlib.animation as animation
-import seaborn as sns
-import pandas as pd
-from video_prediction import datasets, models
-from matplotlib.colors import LinearSegmentedColormap
-from matplotlib.ticker import MaxNLocator
-from video_prediction.utils.ffmpeg_gif import save_gif
-
-with open("./splits_size_64_64_1/geo_info.json","r") as json_file:
-    geo = json.load(json_file)
-    lat = [round(i,2) for i in geo["lat"]]
-    lon = [round(i,2) for i in geo["lon"]]
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--input_dir", type=str, required=True, help="either a directory containing subdirectories "
-                                                                     "train, val, test, etc, or a directory containing "
-                                                                     "the tfrecords")
-    parser.add_argument("--results_dir", type=str, default='results', help="ignored if output_gif_dir is specified")
-    parser.add_argument("--results_gif_dir", type=str, help="default is results_dir. ignored if output_gif_dir is specified")
-    parser.add_argument("--results_png_dir", type=str, help="default is results_dir. ignored if output_png_dir is specified")
-    parser.add_argument("--output_gif_dir", help="output directory where samples are saved as gifs. default is "
-                                                 "results_gif_dir/model_fname")
-    parser.add_argument("--output_png_dir", help="output directory where samples are saved as pngs. default is "
-                                                 "results_png_dir/model_fname")
-    parser.add_argument("--checkpoint", help="directory with checkpoint or checkpoint name (e.g. checkpoint_dir/model-200000)")
-
-    parser.add_argument("--mode", type=str, choices=['val', 'test'], default='val', help='mode for dataset, val or test.')
-
-    parser.add_argument("--dataset", type=str, help="dataset class name")
-    parser.add_argument("--dataset_hparams", type=str, help="a string of comma separated list of dataset hyperparameters")
-    parser.add_argument("--model", type=str, help="model class name")
-    parser.add_argument("--model_hparams", type=str, help="a string of comma separated list of model hyperparameters")
-
-    parser.add_argument("--batch_size", type=int, default=8, help="number of samples in batch")
-    parser.add_argument("--num_samples", type=int, help="number of samples in total (all of them by default)")
-    parser.add_argument("--num_epochs", type=int, default=1)
-
-    parser.add_argument("--num_stochastic_samples", type=int, default=1) #Bing original is 5, change to 1
-    parser.add_argument("--gif_length", type=int, help="default is sequence_length")
-    parser.add_argument("--fps", type=int, default=4)
-
-    parser.add_argument("--gpu_mem_frac", type=float, default=0, help="fraction of gpu memory to use")
-    parser.add_argument("--seed", type=int, default=7)
-
-    args = parser.parse_args()
-
-    if args.seed is not None:
-        tf.set_random_seed(args.seed)
-        np.random.seed(args.seed)
-        random.seed(args.seed)
-
-    args.results_gif_dir = args.results_gif_dir or args.results_dir
-    args.results_png_dir = args.results_png_dir or args.results_dir
-    dataset_hparams_dict = {}
-    model_hparams_dict = {}
-    if args.checkpoint:
-        checkpoint_dir = os.path.normpath(args.checkpoint)
-        if not os.path.isdir(args.checkpoint):
-            checkpoint_dir, _ = os.path.split(checkpoint_dir)
-        if not os.path.exists(checkpoint_dir):
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), checkpoint_dir)
-        with open(os.path.join(checkpoint_dir, "options.json")) as f:
-            print("loading options from checkpoint %s" % args.checkpoint)
-            options = json.loads(f.read())
-            args.dataset = args.dataset or options['dataset']
-            args.model = args.model or options['model']
-        try:
-            with open(os.path.join(checkpoint_dir, "dataset_hparams.json")) as f:
-                dataset_hparams_dict = json.loads(f.read())
-        except FileNotFoundError:
-            print("dataset_hparams.json was not loaded because it does not exist")
-        try:
-            with open(os.path.join(checkpoint_dir, "model_hparams.json")) as f:
-                model_hparams_dict = json.loads(f.read())
-        except FileNotFoundError:
-            print("model_hparams.json was not loaded because it does not exist")
-        args.output_gif_dir = args.output_gif_dir or os.path.join(args.results_gif_dir, os.path.split(checkpoint_dir)[1])
-        args.output_png_dir = args.output_png_dir or os.path.join(args.results_png_dir, os.path.split(checkpoint_dir)[1])
-    else:
-        if not args.dataset:
-            raise ValueError('dataset is required when checkpoint is not specified')
-        if not args.model:
-            raise ValueError('model is required when checkpoint is not specified')
-        args.output_gif_dir = args.output_gif_dir or os.path.join(args.results_gif_dir, 'model.%s' % args.model)
-        args.output_png_dir = args.output_png_dir or os.path.join(args.results_png_dir, 'model.%s' % args.model)
-
-    print('----------------------------------- Options ------------------------------------')
-    for k, v in args._get_kwargs():
-        print(k, "=", v)
-    print('------------------------------------- End --------------------------------------')
-
-    VideoDataset = datasets.get_dataset_class(args.dataset)
-    dataset = VideoDataset(
-        args.input_dir,
-        mode=args.mode,
-        num_epochs=args.num_epochs,
-        seed=args.seed,
-        hparams_dict=dataset_hparams_dict,
-        hparams=args.dataset_hparams)
-    VideoPredictionModel = models.get_model_class(args.model)
-    hparams_dict = dict(model_hparams_dict)
-    hparams_dict.update({
-        'context_frames': dataset.hparams.context_frames,
-        'sequence_length': dataset.hparams.sequence_length,
-        'repeat': dataset.hparams.time_shift,
-    })
-    model = VideoPredictionModel(
-        mode=args.mode,
-        hparams_dict=hparams_dict,
-        hparams=args.model_hparams)
-
-    sequence_length = model.hparams.sequence_length
-    context_frames = model.hparams.context_frames
-    future_length = sequence_length - context_frames
-
-    if args.num_samples:
-        if args.num_samples > dataset.num_examples_per_epoch():
-            raise ValueError('num_samples cannot be larger than the dataset')
-        num_examples_per_epoch = args.num_samples
-    else:
-        #Bing: error occurs here, cheats a little bit here
-        #num_examples_per_epoch = dataset.num_examples_per_epoch()
-        num_examples_per_epoch = args.batch_size * 8
-    if num_examples_per_epoch % args.batch_size != 0:
-        #bing
-        #raise ValueError('batch_size should evenly divide the dataset size %d' % num_examples_per_epoch)
-        pass
-    #Bing if it is era 5 data we used dataset.make_batch_v2
-    #inputs = dataset.make_batch(args.batch_size)
-    inputs = dataset.make_batch(args.batch_size)
-    input_phs = {k: tf.placeholder(v.dtype, v.shape, '%s_ph' % k) for k, v in inputs.items()}
-    with tf.variable_scope(''):
-        model.build_graph(input_phs)
-
-    for output_dir in (args.output_gif_dir, args.output_png_dir):
-        if not os.path.exists(output_dir):
-            os.makedirs(output_dir)
-        with open(os.path.join(output_dir, "options.json"), "w") as f:
-            f.write(json.dumps(vars(args), sort_keys=True, indent=4))
-        with open(os.path.join(output_dir, "dataset_hparams.json"), "w") as f:
-            f.write(json.dumps(dataset.hparams.values(), sort_keys=True, indent=4))
-        with open(os.path.join(output_dir, "model_hparams.json"), "w") as f:
-            f.write(json.dumps(model.hparams.values(), sort_keys=True, indent=4))
-
-    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_mem_frac)
-    config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
-    sess = tf.Session(config=config)
-    sess.graph.as_default()
-    model.restore(sess, args.checkpoint)
-    sample_ind = 0
-    gen_images_all = []
-    input_images_all = []
-
-    while True:
-        if args.num_samples and sample_ind >= args.num_samples:
-            break
-        try:
-            input_results = sess.run(inputs)
-        except tf.errors.OutOfRangeError:
-            break
-        print("evaluation samples from %d to %d" % (sample_ind, sample_ind + args.batch_size))
-        feed_dict = {input_ph: input_results[name] for name, input_ph in input_phs.items()}
-        for stochastic_sample_ind in range(args.num_stochastic_samples): #Todo: why use here
-            print("Stochastic sample id", stochastic_sample_ind)
-            gen_images = sess.run(model.outputs['gen_images'], feed_dict=feed_dict)
-            #input_images = sess.run(inputs["images"])
-            #Bing: Add evaluation metrics
-            # fetches = {'images': model.inputs['images']}
-            # fetches.update(model.eval_outputs.items())
-            # fetches.update(model.eval_metrics.items())
-            # results = sess.run(fetches, feed_dict = feed_dict)
-            # input_images = results["images"] #shape (batch_size,future_frames,height,width,channel)
-            # only keep the future frames
-            #gen_images = gen_images[:, -future_length:] #(8,10,64,64,1) (batch_size, sequences, height, width, channel)
-            #input_images = input_results["images"][:,-future_length:,:,:]
-            input_images = input_results["images"][:,1:,:,:,:]
-            #gen_mse_avg = results["eval_mse/avg"] #shape (batch_size,future_frames)
-            print("Finish sample ind",stochastic_sample_ind)
-            input_gen_diff_ = input_images - gen_images
-            #diff_image_range = pd.cut(input_gen_diff_.flatten(), bins = 4, labels = [-10, -5, 0, 5], right = False)
-            #diff_image_range = np.reshape(np.array(diff_image_range),input_gen_diff_.shape)
-            gen_images_all.extend(gen_images)
-            input_images_all.extend(input_images)
-
-            colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
-            cmap_name = 'my_list'
-            if sample_ind < 100:
-                for i in range(len(gen_images)):
-                    name = 'Batch_id_' + str(sample_ind) + " + Sample_" + str(i)
-                    gen_images_ = gen_images[i, :]
-                    gen_mse_avg_ = [np.mean(input_gen_diff_[i, frame, :, :, :]**2) for frame in
-                                    range(19)]  # return the list with 10 (sequence) mse
-                    input_gen_diff = input_gen_diff_ [i,:,:,:,:]
-                    input_images_ = input_images[i, :]
-                    #gen_mse_avg_ = gen_mse_avg[i, :]
-
-                    # Bing: This is to check the difference between the images and next images for debugging the freezon issues
-                    # gen_images_diff = []
-                    # for gen_idx in range(len(gen_images_) - 1):
-                    #     img_1 = gen_images_[gen_idx, :, :, :]
-                    #     img_2 = gen_images_[gen_idx + 1, :, :, :]
-                    #     img_diff = img_2 - img_1
-                    #     img_diff_nonzero = [e for img_idx, e in enumerate(img_diff.flatten()) if round(e,3) != 0.000]
-                    #     gen_images_diff.append(img_diff_nonzero)
-
-                    fig = plt.figure()
-                    gs = gridspec.GridSpec(4,6)
-                    gs.update(wspace = 0.7,hspace=0.8)
-                    ax1 = plt.subplot(gs[0:2,0:3])
-                    ax2 = plt.subplot(gs[0:2,3:],sharey=ax1)
-                    ax3 = plt.subplot(gs[2:4,0:3])
-                    ax4 = plt.subplot(gs[2:4,3:])
-                    xlables = [round(i,2) for i in list(np.linspace(np.min(lon),np.max(lon),5))]
-                    ylabels = [round(i,2) for i  in list(np.linspace(np.max(lat),np.min(lat),5))]
-                    plt.setp([ax1,ax2,ax3],xticks=list(np.linspace(0,64,5)), xticklabels=xlables ,yticks=list(np.linspace(0,64,5)),yticklabels=ylabels)
-                    ax1.title.set_text("(a) Ground Truth")
-                    ax2.title.set_text("(b) SAVP")
-                    ax3.title.set_text("(c) Diff.")
-                    ax4.title.set_text("(d) MSE")
-
-                    ax1.xaxis.set_tick_params(labelsize=7)
-                    ax1.yaxis.set_tick_params(labelsize = 7)
-                    ax2.xaxis.set_tick_params(labelsize=7)
-                    ax2.yaxis.set_tick_params(labelsize = 7)
-                    ax3.xaxis.set_tick_params(labelsize=7)
-                    ax3.yaxis.set_tick_params(labelsize = 7)
-
-                    init_images = np.zeros((input_images_.shape[1], input_images_.shape[2]))
-                    print("inti images shape", init_images.shape)
-                    xdata, ydata = [], []
-                    plot1 = ax1.imshow(init_images, cmap='jet', vmin = 270, vmax = 300)
-                    plot2 = ax2.imshow(init_images, cmap='jet', vmin = 270, vmax = 300)
-                    #x = np.linspace(0, 64, 64)
-                    #y = np.linspace(0, 64, 64)
-                    #plot1 = ax1.contourf(x,y,init_images, cmap='jet', vmin = np.min(input_images), vmax = np.max(input_images))
-                    #plot2 = ax2.contourf(x,y,init_images, cmap='jet', vmin = np.min(input_images), vmax = np.max(input_images))
-                    fig.colorbar(plot1, ax=ax1).ax.tick_params(labelsize=7)
-                    fig.colorbar(plot2, ax=ax2).ax.tick_params(labelsize=7)
-
-                    cm = LinearSegmentedColormap.from_list(
-                        cmap_name, "bwr", N = 5)
-
-                    plot3 = ax3.imshow(init_images, vmin=-10, vmax=10, cmap=cm)#cmap = 'PuBu_r',
-                    plot4, = ax4.plot([], [], color = "r")
-                    ax4.set_xlim(0, len(gen_mse_avg_)-1)
-                    ax4.set_ylim(0, 10)
-                    ax4.set_xlabel("Frames", fontsize=10)
-                    #ax4.set_ylabel("MSE", fontsize=10)
-                    ax4.xaxis.set_tick_params(labelsize=7)
-                    ax4.yaxis.set_tick_params(labelsize=7)
-
-
-                    plots = [plot1, plot2, plot3, plot4]
-
-                    #fig.colorbar(plots[1], ax = [ax1, ax2])
-
-                    fig.colorbar(plots[2], ax=ax3).ax.tick_params(labelsize=7)
-                    #fig.colorbar(plot1[0], ax=ax1).ax.tick_params(labelsize=7)
-                    #fig.colorbar(plot2[1], ax=ax2).ax.tick_params(labelsize=7)
-
-                    def animation_sample(t):
-                        input_image = input_images_[t, :, :, 0]
-                        gen_image = gen_images_[t, :, :, 0]
-                        diff_image = input_gen_diff[t,:,:,0]
-
-                        data = gen_mse_avg_[:t + 1]
-                        # x = list(range(len(gen_mse_avg_)))[:t+1]
-                        xdata.append(t)
-                        print("xdata", xdata)
-                        ydata.append(gen_mse_avg_[t])
-
-                        print("ydata", ydata)
-                        # p = sns.lineplot(x=x,y=data,color="b")
-                        # p.tick_params(labelsize=17)
-                        # plt.setp(p.lines, linewidth=6)
-                        plots[0].set_data(input_image)
-                        plots[1].set_data(gen_image)
-                        #plots[0] = ax1.contourf(x, y, input_image, cmap = 'jet', vmin = np.min(input_images),vmax = np.max(input_images))
-                        #plots[1] = ax2.contourf(x, y, gen_image, cmap = 'jet', vmin = np.min(input_images),vmax = np.max(input_images))
-                        plots[2].set_data(diff_image)
-                        plots[3].set_data(xdata, ydata)
-                        fig.suptitle("Frame " + str(t+1))
-
-                        return plots
-
-                    ani = animation.FuncAnimation(fig, animation_sample, frames=len(gen_mse_avg_), interval = 1000,
-                                                  repeat_delay = 2000)
-                    ani.save(os.path.join(args.output_png_dir, "Sample_" + str(name) + ".mp4"))
-
-            else:
-                pass
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-    #         # for i, gen_mse_avg_ in enumerate(gen_mse_avg):
-    #         #     ims = []
-    #         #     fig = plt.figure()
-    #         #     plt.xlim(0,len(gen_mse_avg_))
-    #         #     plt.ylim(np.min(gen_mse_avg),np.max(gen_mse_avg))
-    #         #     plt.xlabel("Frames")
-    #         #     plt.ylabel("MSE_AVG")
-    #         #     #X = list(range(len(gen_mse_avg_)))
-    #         #     #for t, gen_mse_avg_ in enumerate(gen_mse_avg):
-    #         #     def animate_metric(j):
-    #         #         data = gen_mse_avg_[:(j+1)]
-    #         #         x = list(range(len(gen_mse_avg_)))[:(j+1)]
-    #         #         p = sns.lineplot(x=x,y=data,color="b")
-    #         #         p.tick_params(labelsize=17)
-    #         #         plt.setp(p.lines, linewidth=6)
-    #         #     ani = animation.FuncAnimation(fig, animate_metric, frames=len(gen_mse_avg_), interval = 1000, repeat_delay=2000)
-    #         #     ani.save(os.path.join(args.output_png_dir, "MSE_AVG" + str(i) + ".gif"))
-    #         #
-    #         #
-    #         # for i, input_images_ in enumerate(input_images):
-    #         #     #context_images_ = (input_results['images'][i])
-    #         #     #gen_images_fname = 'gen_image_%05d_%02d.gif' % (sample_ind + i, stochastic_sample_ind)
-    #         #     ims = []
-    #         #     fig = plt.figure()
-    #         #     for t, input_image in enumerate(input_images_):
-    #         #         im = plt.imshow(input_images[i, t, :, :, 0], interpolation = 'none')
-    #         #         ttl = plt.text(1.5, 2,"Frame_" + str(t))
-    #         #         ims.append([im,ttl])
-    #         #     ani = animation.ArtistAnimation(fig, ims, interval= 1000, blit=True,repeat_delay=2000)
-    #         #     ani.save(os.path.join(args.output_png_dir,"groud_true_images_" + str(i) + ".gif"))
-    #         #     #plt.show()
-    #         #
-    #         # for i,gen_images_ in enumerate(gen_images):
-    #         #     ims = []
-    #         #     fig = plt.figure()
-    #         #     for t, gen_image in enumerate(gen_images_):
-    #         #         im = plt.imshow(gen_images[i, t, :, :, 0], interpolation = 'none')
-    #         #         ttl = plt.text(1.5, 2, "Frame_" + str(t))
-    #         #         ims.append([im, ttl])
-    #         #     ani = animation.ArtistAnimation(fig, ims, interval = 1000, blit = True, repeat_delay = 2000)
-    #         #     ani.save(os.path.join(args.output_png_dir, "prediction_images_" + str(i) + ".gif"))
-    #
-    #
-    #             # for i, gen_images_ in enumerate(gen_images):
-    #             #     #context_images_ = (input_results['images'][i] * 255.0).astype(np.uint8)
-    #             #     #gen_images_ = (gen_images_ * 255.0).astype(np.uint8)
-    #             #     #bing
-    #             #     context_images_ = (input_results['images'][i])
-    #             #     gen_images_fname = 'gen_image_%05d_%02d.gif' % (sample_ind + i, stochastic_sample_ind)
-    #             #     context_and_gen_images = list(context_images_[:context_frames]) + list(gen_images_)
-    #             #     plt.figure(figsize = (10,2))
-    #             #     gs = gridspec.GridSpec(2,10)
-    #             #     gs.update(wspace=0.,hspace=0.)
-    #             #     for t, gen_image in enumerate(gen_images_):
-    #             #         gen_image_fname_pattern = 'gen_image_%%05d_%%02d_%%0%dd.png' % max(2,len(str(len(gen_images_) - 1)))
-    #             #         gen_image_fname = gen_image_fname_pattern % (sample_ind + i, stochastic_sample_ind, t)
-    #             #         plt.subplot(gs[t])
-    #             #         plt.imshow(input_images[i, t, :, :, 0], interpolation = 'none')  # the last index sets the channel. 0 = t2
-    #             #         # plt.pcolormesh(X_test[i,t,::-1,:,0], shading='bottom', cmap=plt.cm.jet)
-    #             #         plt.tick_params(axis = 'both', which = 'both', bottom = False, top = False, left = False,
-    #             #                         right = False, labelbottom = False, labelleft = False)
-    #             #         if t == 0: plt.ylabel('Actual', fontsize = 10)
-    #             #
-    #             #         plt.subplot(gs[t + 10])
-    #             #         plt.imshow(gen_images[i, t, :, :, 0], interpolation = 'none')
-    #             #         # plt.pcolormesh(X_hat[i,t,::-1,:,0], shading='bottom', cmap=plt.cm.jet)
-    #             #         plt.tick_params(axis = 'both', which = 'both', bottom = False, top = False, left = False,
-    #             #                         right = False, labelbottom = False, labelleft = False)
-    #             #         if t == 0: plt.ylabel('Predicted', fontsize = 10)
-    #             #     plt.savefig(os.path.join(args.output_png_dir, gen_image_fname) + 'plot_' + str(i) + '.png')
-    #             #     plt.clf()
-    #
-    #             # if args.gif_length:
-    #             #     context_and_gen_images = context_and_gen_images[:args.gif_length]
-    #             # save_gif(os.path.join(args.output_gif_dir, gen_images_fname),
-    #             #          context_and_gen_images, fps=args.fps)
-    #             #
-    #             # gen_image_fname_pattern = 'gen_image_%%05d_%%02d_%%0%dd.png' % max(2, len(str(len(gen_images_) - 1)))
-    #             # for t, gen_image in enumerate(gen_images_):
-    #             #     gen_image_fname = gen_image_fname_pattern % (sample_ind + i, stochastic_sample_ind, t)
-    #             #     if gen_image.shape[-1] == 1:
-    #             #       gen_image = np.tile(gen_image, (1, 1, 3))
-    #             #     else:
-    #             #       gen_image = cv2.cvtColor(gen_image, cv2.COLOR_RGB2BGR)
-    #             #     cv2.imwrite(os.path.join(args.output_png_dir, gen_image_fname), gen_image)
-
-        sample_ind += args.batch_size
-
-
-    with open(os.path.join(args.output_png_dir, "input_images_all"),"wb") as input_files:
-        pickle.dump(input_images_all,input_files)
-
-    with open(os.path.join(args.output_png_dir, "gen_images_all"),"wb") as gen_files:
-        pickle.dump(gen_images_all,gen_files)
-
-    with open(os.path.join(args.output_png_dir, "input_images_all"),"rb") as input_files:
-        input_images_all = pickle.load(input_files)
-
-    with open(os.path.join(args.output_png_dir, "gen_images_all"),"rb") as gen_files:
-        gen_images_all=pickle.load(gen_files)
-    ims = []
-    fig = plt.figure()
-    for frame in range(19):
-        input_gen_diff = np.mean((np.array(gen_images_all) - np.array(input_images_all))**2, axis = 0)[frame, :,:, 0] # Get the first prediction frame (batch,height, width, channel)
-        #pix_mean = np.mean(input_gen_diff, axis = 0)
-        #pix_std = np.std(input_gen_diff, axis=0)
-        im = plt.imshow(input_gen_diff, interpolation = 'none',cmap='PuBu')
-        if frame == 0:
-            fig.colorbar(im)
-        ttl = plt.text(1.5, 2, "Frame_" + str(frame +1))
-        ims.append([im, ttl])
-    ani = animation.ArtistAnimation(fig, ims, interval = 1000, blit = True, repeat_delay = 2000)
-    ani.save(os.path.join(args.output_png_dir, "Mean_Frames.mp4"))
-    plt.close("all")
-
-    ims = []
-    fig = plt.figure()
-    for frame in range(19):
-        pix_std= np.std((np.array(gen_images_all) - np.array(input_images_all))**2, axis = 0)[frame, :,:, 0]  # Get the first prediction frame (batch,height, width, channel)
-        #pix_mean = np.mean(input_gen_diff, axis = 0)
-        #pix_std = np.std(input_gen_diff, axis=0)
-        im = plt.imshow(pix_std, interpolation = 'none',cmap='PuBu')
-        if frame == 0:
-            fig.colorbar(im)
-        ttl = plt.text(1.5, 2, "Frame_" + str(frame+1))
-        ims.append([im, ttl])
-    ani = animation.ArtistAnimation(fig, ims, interval = 1000, blit = True, repeat_delay = 2000)
-    ani.save(os.path.join(args.output_png_dir, "Std_Frames.mp4"))
-
-    gen_images_all = np.array(gen_images_all)
-    input_images_all = np.array(input_images_all)
-    # mse_model = np.mean((input_images_all[:, 1:,:,:,0] - gen_images_all[:, 1:,:,:,0])**2)  # look at all timesteps except the first
-    # mse_model_last = np.mean((input_images_all[:, future_length-1,:,:,0] - gen_images_all[:, future_length-1,:,:,0])**2)
-    # mse_prev = np.mean((input_images_all[:, :-1,:,:,0] - gen_images_all[:, 1:,:,:,0])**2 )
-
-    mse_model = np.mean((input_images_all[:, :10,:,:,0] - gen_images_all[:, :10,:,:,0])**2)  # look at all timesteps except the first
-    mse_model_last = np.mean((input_images_all[:,10,:,:,0] - gen_images_all[:, 10,:,:,0])**2)
-    mse_prev = np.mean((input_images_all[:, :9, :, :, 0] - input_images_all[:, 1:10, :, :, 0])**2 )
-
-    def psnr(img1, img2):
-        mse = np.mean((img1 - img2) ** 2)
-        if mse == 0: return 100
-        PIXEL_MAX = 1
-        return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
-
-    psnr_model = psnr(input_images_all[:, :10, :, :, 0],  gen_images_all[:, :10, :, :, 0])
-    psnr_model_last = psnr(input_images_all[:, 10, :, :, 0],  gen_images_all[:,10, :, :, 0])
-    psnr_prev = psnr(input_images_all[:, :9, :, :, 0],  input_images_all[:, 1:10, :, :, 0])
-    f = open(os.path.join(args.output_png_dir,'prediction_scores_4prediction.txt'), 'w')
-    f.write("Model MSE: %f\n" % mse_model)
-    f.write("Model MSE from only last prediction in sequence: %f\n" % mse_model_last)
-    f.write("Previous Frame MSE: %f\n" % mse_prev)
-    f.write("Model PSNR: %f\n" % psnr_model)
-    f.write("Model PSNR from only last prediction in sequence: %f\n" % psnr_model_last)
-    f.write("Previous frame PSNR: %f\n" % psnr_prev)
-    f.write("Shape of X_test: " + str(input_images_all.shape))
-    f.write("")
-    f.write("Shape of X_hat: " + str(gen_images_all.shape))
-    f.close()
-
-    seed(1)
-    s = random.sample(range(len(gen_images_all)), 100)
-    print("******KDP******")
-    #kernel density plot for checking the model collapse
-    fig = plt.figure()
-    kdp = sns.kdeplot(gen_images_all[s].flatten(), shade=True, color="r", label = "Generate Images")
-    kdp = sns.kdeplot(input_images_all[s].flatten(), shade=True, color="b", label = "Ground True")
-    kdp.set(xlabel = 'Temperature (K)', ylabel = 'Probability')
-    plt.savefig(os.path.join(args.output_png_dir, "kdp_gen_images.png"), dpi = 400)
-    plt.clf()
-
-    #line plot for evaluating the prediction and groud-truth
-    for i in [0,3,6,9,12,15,18]:
-        fig = plt.figure()
-        plt.scatter(gen_images_all[:,i,:,:][s].flatten(),input_images_all[:,i,:,:][s].flatten(),s=0.3)
-        #plt.scatter(gen_images_all[:,0,:,:].flatten(),input_images_all[:,0,:,:].flatten(),s=0.3)
-        plt.xlabel("Prediction")
-        plt.ylabel("Real values")
-        plt.title("Frame_{}".format(i+1))
-        plt.plot([250,300], [250,300],color="black")
-        plt.savefig(os.path.join(args.output_png_dir,"pred_real_frame_{}.png".format(str(i))))
-        plt.clf()
-
-
-    mse_model_by_frames = np.mean((input_images_all[:, :, :, :, 0][s] - gen_images_all[:, :, :, :, 0][s]) ** 2,axis=(2,3)) #return (batch, sequence)
-    x = [str(i+1) for i in list(range(19))]
-    fig,axis = plt.subplots()
-    mean_f = np.mean(mse_model_by_frames, axis = 0)
-    median = np.median(mse_model_by_frames, axis=0)
-    q_low = np.quantile(mse_model_by_frames, q=0.25, axis=0)
-    q_high = np.quantile(mse_model_by_frames, q=0.75, axis=0)
-    d_low = np.quantile(mse_model_by_frames,q=0.1, axis=0)
-    d_high = np.quantile(mse_model_by_frames, q=0.9, axis=0)
-    plt.fill_between(x, d_high, d_low, color="ghostwhite",label="interdecile range")
-    plt.fill_between(x,q_high, q_low , color = "lightgray", label="interquartile range")
-    plt.plot(x, median, color="grey", linewidth=0.6, label="Median")
-    plt.plot(x, mean_f, color="peachpuff",linewidth=1.5, label="Mean")
-    plt.title(f'MSE percentile')
-    plt.xlabel("Frames")
-    plt.legend(loc=2, fontsize=8)
-    plt.savefig(os.path.join(args.output_png_dir,"mse_percentiles.png"))
-
-if __name__ == '__main__':
-    main()
diff --git a/video_prediction_savp/scripts/generate_movingmnist.py b/video_prediction_savp/scripts/generate_movingmnist.py
deleted file mode 100644
index d4fbf5eb5d8d8f4cad87ae26d15bc2787d9e6c0a..0000000000000000000000000000000000000000
--- a/video_prediction_savp/scripts/generate_movingmnist.py
+++ /dev/null
@@ -1,822 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import argparse
-import errno
-import json
-import os
-import math
-import random
-import cv2
-import numpy as np
-import tensorflow as tf
-import pickle
-from random import seed
-import random
-import json
-import numpy as np
-import matplotlib
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-import matplotlib.gridspec as gridspec
-import matplotlib.animation as animation
-import pandas as pd
-import re
-from video_prediction import datasets, models
-from matplotlib.colors import LinearSegmentedColormap
-#from matplotlib.ticker import MaxNLocator
-#from video_prediction.utils.ffmpeg_gif import save_gif
-from skimage.metrics import structural_similarity as ssim
-import datetime
-# Scarlet 2020/05/28: access to statistical values in json file 
-from os import path
-import sys
-sys.path.append(path.abspath('../video_prediction/datasets/'))
-from era5_dataset_v2 import Norm_data
-from os.path import dirname
-from netCDF4 import Dataset,date2num
-from metadata import MetaData as MetaData
-
-def set_seed(seed):
-    if seed is not None:
-        tf.set_random_seed(seed)
-        np.random.seed(seed)
-        random.seed(seed) 
-
-def get_coordinates(metadata_fname):
-    """
-    Retrieves the latitudes and longitudes read from the metadata json file.
-    """
-    md = MetaData(json_file=metadata_fname)
-    md.get_metadata_from_file(metadata_fname)
-    
-    try:
-        print("lat:",md.lat)
-        print("lon:",md.lon)
-        return md.lat, md.lon
-    except:
-        raise ValueError("Error when handling: '"+metadata_fname+"'")
-    
-
-def load_checkpoints_and_create_output_dirs(checkpoint,dataset,model):
-    if checkpoint:
-        checkpoint_dir = os.path.normpath(checkpoint)
-        if not os.path.isdir(checkpoint):
-            checkpoint_dir, _ = os.path.split(checkpoint_dir)
-        if not os.path.exists(checkpoint_dir):
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), checkpoint_dir)
-        with open(os.path.join(checkpoint_dir, "options.json")) as f:
-            print("loading options from checkpoint %s" % checkpoint)
-            options = json.loads(f.read())
-            dataset = dataset or options['dataset']
-            model = model or options['model']
-        try:
-            with open(os.path.join(checkpoint_dir, "dataset_hparams.json")) as f:
-                dataset_hparams_dict = json.loads(f.read())
-        except FileNotFoundError:
-            print("dataset_hparams.json was not loaded because it does not exist")
-        try:
-            with open(os.path.join(checkpoint_dir, "model_hparams.json")) as f:
-                model_hparams_dict = json.loads(f.read())
-        except FileNotFoundError:
-            print("model_hparams.json was not loaded because it does not exist")
-    else:
-        if not dataset:
-            raise ValueError('dataset is required when checkpoint is not specified')
-        if not model:
-            raise ValueError('model is required when checkpoint is not specified')
-
-    return options,dataset,model, checkpoint_dir,dataset_hparams_dict,model_hparams_dict
-
-
-    
-def setup_dataset(dataset,input_dir,mode,seed,num_epochs,dataset_hparams,dataset_hparams_dict):
-    VideoDataset = datasets.get_dataset_class(dataset)
-    dataset = VideoDataset(
-        input_dir,
-        mode = mode,
-        num_epochs = num_epochs,
-        seed = seed,
-        hparams_dict = dataset_hparams_dict,
-        hparams = dataset_hparams)
-    return dataset
-
-
-def setup_dirs(input_dir,results_png_dir):
-    input_dir = args.input_dir
-    temporal_dir = os.path.split(input_dir)[0] + "/hickle/splits/"
-    print ("temporal_dir:",temporal_dir)
-
-
-def update_hparams_dict(model_hparams_dict,dataset):
-    hparams_dict = dict(model_hparams_dict)
-    hparams_dict.update({
-        'context_frames': dataset.hparams.context_frames,
-        'sequence_length': dataset.hparams.sequence_length,
-        'repeat': dataset.hparams.time_shift,
-    })
-    return hparams_dict
-
-
-def psnr(img1, img2):
-    mse = np.mean((img1 - img2) ** 2)
-    if mse == 0: return 100
-    PIXEL_MAX = 1
-    return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
-
-
-def setup_num_samples_per_epoch(num_samples, dataset):
-    if num_samples:
-        if num_samples > dataset.num_examples_per_epoch():
-            raise ValueError('num_samples cannot be larger than the dataset')
-        num_examples_per_epoch = num_samples
-    else:
-        num_examples_per_epoch = dataset.num_examples_per_epoch()
-    #if num_examples_per_epoch % args.batch_size != 0:
-    #    raise ValueError('batch_size should evenly divide the dataset size %d' % num_examples_per_epoch)
-    return num_examples_per_epoch
-
-
-def initia_save_data():
-    sample_ind = 0
-    gen_images_all = []
-    #Bing:20200410
-    persistent_images_all = []
-    input_images_all = []
-    return sample_ind, gen_images_all,persistent_images_all, input_images_all
-
-
-def write_params_to_results_dir(args,output_dir,dataset,model):
-    if not os.path.exists(output_dir):
-        os.makedirs(output_dir)
-    with open(os.path.join(output_dir, "options.json"), "w") as f:
-        f.write(json.dumps(vars(args), sort_keys = True, indent = 4))
-    with open(os.path.join(output_dir, "dataset_hparams.json"), "w") as f:
-        f.write(json.dumps(dataset.hparams.values(), sort_keys = True, indent = 4))
-    with open(os.path.join(output_dir, "model_hparams.json"), "w") as f:
-        f.write(json.dumps(model.hparams.values(), sort_keys = True, indent = 4))
-    return None
-
-def get_one_seq_and_time(input_images,i):
-    assert (len(np.array(input_images).shape)==5)
-    input_images_ = input_images[i,:,:,:,:]
-    return input_images_
-
-
-def denorm_images_all_channels(input_images_):
-    input_images_all_channles_denorm = []
-    input_images_ = np.array(input_images_)
-    input_images_denorm = input_images_ * 255.0
-    #print("input_images_denorm shape",input_images_denorm.shape)
-    return input_images_denorm
-
-def plot_seq_imgs(imgs,output_png_dir,idx,label="Ground Truth"):
-    """
-    Plot the seq images 
-    """
-
-    if len(np.array(imgs).shape)!=3:raise("img dims should be three: (seq_len,lat,lon)")
-    img_len = imgs.shape[0]
-    fig = plt.figure(figsize=(18,6))
-    gs = gridspec.GridSpec(1, 10)
-    gs.update(wspace = 0., hspace = 0.)
-    for i in range(img_len):      
-        ax1 = plt.subplot(gs[i])
-        plt.imshow(imgs[i] ,cmap = 'jet')
-        plt.setp([ax1], xticks = [], xticklabels = [], yticks = [], yticklabels = [])
-    plt.savefig(os.path.join(output_png_dir, label + "_" +   str(idx) +  ".jpg"))
-    print("images_saved")
-    plt.clf()
- 
-
-    
-def get_persistence(ts):
-    pass
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--input_dir", type = str, required = True,
-                        help = "either a directory containing subdirectories "
-                               "train, val, test, etc, or a directory containing "
-                               "the tfrecords")
-    parser.add_argument("--results_dir", type = str, default = 'results',
-                        help = "ignored if output_gif_dir is specified")
-    parser.add_argument("--checkpoint",
-                        help = "directory with checkpoint or checkpoint name (e.g. checkpoint_dir/model-200000)")
-    parser.add_argument("--mode", type = str, choices = ['train','val', 'test'], default = 'val',
-                        help = 'mode for dataset, val or test.')
-    parser.add_argument("--dataset", type = str, help = "dataset class name")
-    parser.add_argument("--dataset_hparams", type = str,
-                        help = "a string of comma separated list of dataset hyperparameters")
-    parser.add_argument("--model", type = str, help = "model class name")
-    parser.add_argument("--model_hparams", type = str,
-                        help = "a string of comma separated list of model hyperparameters")
-    parser.add_argument("--batch_size", type = int, default = 8, help = "number of samples in batch")
-    parser.add_argument("--num_samples", type = int, help = "number of samples in total (all of them by default)")
-    parser.add_argument("--num_epochs", type = int, default = 1)
-    parser.add_argument("--num_stochastic_samples", type = int, default = 1)
-    parser.add_argument("--gif_length", type = int, help = "default is sequence_length")
-    parser.add_argument("--fps", type = int, default = 4)
-    parser.add_argument("--gpu_mem_frac", type = float, default = 0.95, help = "fraction of gpu memory to use")
-    parser.add_argument("--seed", type = int, default = 7)
-    args = parser.parse_args()
-    set_seed(args.seed)
-
-    dataset_hparams_dict = {}
-    model_hparams_dict = {}
-
-    options,dataset,model, checkpoint_dir,dataset_hparams_dict,model_hparams_dict = load_checkpoints_and_create_output_dirs(args.checkpoint,args.dataset,args.model)
-    print("Step 1 finished")
-
-    print('----------------------------------- Options ------------------------------------')
-    for k, v in args._get_kwargs():
-        print(k, "=", v)
-    print('------------------------------------- End --------------------------------------')
-
-    #setup dataset and model object
-    input_dir_tf = os.path.join(args.input_dir, "tfrecords") # where tensorflow records are stored
-    dataset = setup_dataset(dataset,input_dir_tf,args.mode,args.seed,args.num_epochs,args.dataset_hparams,dataset_hparams_dict)
-    
-    print("Step 2 finished")
-    VideoPredictionModel = models.get_model_class(model)
-    
-    hparams_dict = dict(model_hparams_dict)
-    hparams_dict.update({
-        'context_frames': dataset.hparams.context_frames,
-        'sequence_length': dataset.hparams.sequence_length,
-        'repeat': dataset.hparams.time_shift,
-    })
-    
-    model = VideoPredictionModel(
-        mode = args.mode,
-        hparams_dict = hparams_dict,
-        hparams = args.model_hparams)
-
-    sequence_length = model.hparams.sequence_length
-    context_frames = model.hparams.context_frames
-    future_length = sequence_length - context_frames #context_Frames is the number of input frames
-
-    num_examples_per_epoch = setup_num_samples_per_epoch(args.num_samples,dataset)
-    
-    inputs = dataset.make_batch(args.batch_size)
-    print("inputs",inputs)
-    input_phs = {k: tf.placeholder(v.dtype, v.shape, '%s_ph' % k) for k, v in inputs.items()}
-    print("input_phs",input_phs)
-    
-    
-    # Build graph
-    with tf.variable_scope(''):
-        model.build_graph(input_phs)
-
-    #Write the update hparameters into results_dir    
-    write_params_to_results_dir(args=args,output_dir=args.results_dir,dataset=dataset,model=model)
-        
-    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = args.gpu_mem_frac)
-    config = tf.ConfigProto(gpu_options = gpu_options, allow_soft_placement = True)
-    sess = tf.Session(config = config)
-    sess.graph.as_default()
-    sess.run(tf.global_variables_initializer())
-    sess.run(tf.local_variables_initializer())
-    model.restore(sess, args.checkpoint)
-    
-    #model.restore(sess, args.checkpoint)#Bing: Todo: 20200728 Let's only focus on true and persistend data
-    sample_ind, gen_images_all, persistent_images_all, input_images_all = initia_save_data()
-    
-    is_first=True
-    #loop for in samples
-    while sample_ind < 5:
-        gen_images_stochastic = []
-        if args.num_samples and sample_ind >= args.num_samples:
-            break
-        try:
-            input_results = sess.run(inputs)
-            input_images = input_results["images"]
-            #get the intial times
-            t_starts = input_results["T_start"]
-        except tf.errors.OutOfRangeError:
-            break
-            
-        #Get prediction values 
-        feed_dict = {input_ph: input_results[name] for name, input_ph in input_phs.items()}
-        gen_images = sess.run(model.outputs['gen_images'], feed_dict = feed_dict)#return [batchsize,seq_len,lat,lon,channel]
-        print("gen_images 20200822:",np.array(gen_images).shape)       
-        #Loop in batch size
-        for i in range(args.batch_size):
-            
-            #get one seq and the corresponding start time point
-            input_images_ = get_one_seq_and_time(input_images,i)
-            
-            #Renormalized data for inputs
-            input_images_denorm = denorm_images_all_channels(input_images_)  
-            print("input_images_denorm",input_images_denorm[0][0])
-                                                             
-            #Renormalized data for inputs
-            gen_images_ = gen_images[i]
-            gen_images_denorm = denorm_images_all_channels(gen_images_)
-            print("gene_images_denorm:",gen_images_denorm[0][0])
-            
-            #Generate images inputs
-            plot_seq_imgs(imgs=input_images_denorm[context_frames+1:,:,:,0],idx = sample_ind + i, label="Ground Truth",output_png_dir=args.results_dir)  
-                                                             
-            #Generate forecast images
-            plot_seq_imgs(imgs=gen_images_denorm[context_frames:,:,:,0],idx = sample_ind + i,label="Forecast by Model " + args.model,output_png_dir=args.results_dir) 
-            
-            #TODO: Scaret plot persistence image
-            #implment get_persistence() function
-
-            #in case of generate the images for all the input, we just generate the first 5 sampe_ind examples for visuliation
-
-        sample_ind += args.batch_size
-
-
-        #for input_image in input_images_:
-
-#             for stochastic_sample_ind in range(args.num_stochastic_samples):
-#                 input_images_all.extend(input_images)
-#                 with open(os.path.join(args.output_png_dir, "input_images_all.pkl"), "wb") as input_files:
-#                     pickle.dump(list(input_images_all), input_files)
-
-
-#                 gen_images_stochastic.append(gen_images)
-#                 #print("Stochastic_sample,", stochastic_sample_ind)
-#                 for i in range(args.batch_size):
-#                     #bing:20200417
-#                     t_stampe = test_temporal_pkl[sample_ind+i]
-#                     print("timestamp:",type(t_stampe))
-#                     persistent_ts = np.array(t_stampe) - datetime.timedelta(days=1)
-#                     print ("persistent ts",persistent_ts)
-#                     persistent_idx = list(test_temporal_pkl).index(np.array(persistent_ts))
-#                     persistent_X = X_test[persistent_idx:persistent_idx+context_frames + future_length]
-#                     print("persistent index in test set:", persistent_idx)
-#                     print("persistent_X.shape",persistent_X.shape)
-#                     persistent_images_all.append(persistent_X)
-
-#                     cmap_name = 'my_list'
-#                     if sample_ind < 100:
-#                         #name = '_Stochastic_id_' + str(stochastic_sample_ind) + 'Batch_id_' + str(
-#                         #    sample_ind) + " + Sample_" + str(i)
-#                         name = '_Stochastic_id_' + str(stochastic_sample_ind) + "_Time_"+ t_stampe[0].strftime("%Y%m%d-%H%M%S")
-#                         print ("name",name)
-#                         gen_images_ = np.array(list(input_images[i,:context_frames]) + list(gen_images[i,-future_length:, :]))
-#                         #gen_images_ =  gen_images[i, :]
-#                         input_images_ = input_images[i, :]
-#                         #Bing:20200417
-#                         #persistent_images = ?
-#                         #+++Scarlet:20200528   
-#                         #print('Scarlet1')
-#                         input_gen_diff = norm_cls.denorm_var(input_images_[:, :, :,0], 'T2', norm) - norm_cls.denorm_var(gen_images_[:, :, :, 0],'T2',norm)
-#                         persistent_diff = norm_cls.denorm_var(input_images_[:, :, :,0], 'T2', norm) - norm_cls.denorm_var(persistent_X[:, :, :, 0], 'T2',norm)
-#                         #---Scarlet:20200528    
-#                         gen_mse_avg_ = [np.mean(input_gen_diff[frame, :, :] ** 2) for frame in
-#                                         range(sequence_length)]  # return the list with 10 (sequence) mse
-#                         persistent_mse_avg_ = [np.mean(persistent_diff[frame, :, :] ** 2) for frame in
-#                                         range(sequence_length)]  # return the list with 10 (sequence) mse
-
-#                         fig = plt.figure(figsize=(18,6))
-#                         gs = gridspec.GridSpec(1, 10)
-#                         gs.update(wspace = 0., hspace = 0.)
-#                         ts = list(range(10,20)) #[10,11,12,..]
-#                         xlables = [round(i,2) for i  in list(np.linspace(np.min(lon),np.max(lon),5))]
-#                         ylabels = [round(i,2) for i  in list(np.linspace(np.max(lat),np.min(lat),5))]
-
-#                         for t in ts:
-
-#                             #if t==0 : ax1=plt.subplot(gs[t])
-#                             ax1 = plt.subplot(gs[ts.index(t)])
-#                             #+++Scarlet:20200528
-#                             #print('Scarlet2')
-#                             input_image = norm_cls.denorm_var(input_images_[t, :, :, 0], 'T2', norm)
-#                             #---Scarlet:20200528
-#                             plt.imshow(input_image, cmap = 'jet', vmin=270, vmax=300)
-#                             ax1.title.set_text("t = " + str(t+1-10))
-#                             plt.setp([ax1], xticks = [], xticklabels = [], yticks = [], yticklabels = [])
-#                             if t == 0:
-#                                 plt.setp([ax1], xticks = list(np.linspace(0, 64, 3)), xticklabels = xlables, yticks = list(np.linspace(0, 64, 3)), yticklabels = ylabels)
-#                                 plt.ylabel("Ground Truth", fontsize=10)
-#                         plt.savefig(os.path.join(args.output_png_dir, "Ground_Truth_Sample_" + str(name) + ".jpg"))
-#                         plt.clf()
-
-#                         fig = plt.figure(figsize=(12,6))
-#                         gs = gridspec.GridSpec(1, 10)
-#                         gs.update(wspace = 0., hspace = 0.)
-
-#                         for t in ts:
-#                             #if t==0 : ax1=plt.subplot(gs[t])
-#                             ax1 = plt.subplot(gs[ts.index(t)])
-#                             #+++Scarlet:20200528
-#                             #print('Scarlet3')
-#                             gen_image = norm_cls.denorm_var(gen_images_[t, :, :, 0], 'T2', norm)
-#                             #---Scarlet:20200528
-#                             plt.imshow(gen_image, cmap = 'jet', vmin=270, vmax=300)
-#                             ax1.title.set_text("t = " + str(t+1-10))
-#                             plt.setp([ax1], xticks = [], xticklabels = [], yticks = [], yticklabels = [])
-
-#                         plt.savefig(os.path.join(args.output_png_dir, "Predicted_Sample_" + str(name) + ".jpg"))
-#                         plt.clf()
-
-
-#                         fig = plt.figure(figsize=(12,6))
-#                         gs = gridspec.GridSpec(1, 10)
-#                         gs.update(wspace = 0., hspace = 0.)
-#                         for t in ts:
-#                             #if t==0 : ax1=plt.subplot(gs[t])
-#                             ax1 = plt.subplot(gs[ts.index(t)])
-#                             #persistent_image = persistent_X[t, :, :, 0] * (321.46630859375 - 235.2141571044922) + 235.2141571044922
-#                             plt.imshow(persistent_X[t, :, :, 0], cmap = 'jet', vmin=270, vmax=300)
-#                             ax1.title.set_text("t = " + str(t+1-10))
-#                             plt.setp([ax1], xticks = [], xticklabels = [], yticks = [], yticklabels = [])
-
-#                         plt.savefig(os.path.join(args.output_png_dir, "Persistent_Sample_" + str(name) + ".jpg"))
-#                         plt.clf()
-
-                        
-#                 with open(os.path.join(args.output_png_dir, "persistent_images_all.pkl"), "wb") as input_files:
-#                     pickle.dump(list(persistent_images_all), input_files)
-#                     print ("Save persistent all")
-#                 if is_first:
-#                     gen_images_all = gen_images_stochastic
-#                     is_first = False
-#                 else:
-#                     gen_images_all = np.concatenate((np.array(gen_images_all), np.array(gen_images_stochastic)), axis=1)
-
-#                 if args.num_stochastic_samples == 1:
-#                     with open(os.path.join(args.output_png_dir, "gen_images_all.pkl"), "wb") as gen_files:
-#                         pickle.dump(list(gen_images_all[0]), gen_files)
-#                         print ("Save generate all")
-#                 else:
-#                     with open(os.path.join(args.output_png_dir, "gen_images_sample_id_" + str(sample_ind)),"wb") as gen_files:
-#                         pickle.dump(list(gen_images_stochastic), gen_files)
-#                     with open(os.path.join(args.output_png_dir, "gen_images_all_stochastic"), "wb") as gen_files:
-#                         pickle.dump(list(gen_images_all), gen_files)
-
-#         sample_ind += args.batch_size
-
-
-#     with open(os.path.join(args.output_png_dir, "input_images_all.pkl"),"rb") as input_files:
-#         input_images_all = pickle.load(input_files)
-
-#     with open(os.path.join(args.output_png_dir, "gen_images_all.pkl"),"rb") as gen_files:
-#         gen_images_all = pickle.load(gen_files)
-
-#     with open(os.path.join(args.output_png_dir, "persistent_images_all.pkl"),"rb") as gen_files:
-#         persistent_images_all = pickle.load(gen_files)
-
-#     #+++Scarlet:20200528
-#     #print('Scarlet4')
-#     input_images_all = np.array(input_images_all)
-#     input_images_all = norm_cls.denorm_var(input_images_all, 'T2', norm)
-#     #---Scarlet:20200528
-#     persistent_images_all = np.array(persistent_images_all)
-#     if len(np.array(gen_images_all).shape) == 6:
-#         for i in range(len(gen_images_all)):
-#             #+++Scarlet:20200528
-#             #print('Scarlet5')
-#             gen_images_all_stochastic = np.array(gen_images_all)[i,:,:,:,:,:]
-#             gen_images_all_stochastic = norm_cls.denorm_var(gen_images_all_stochastic, 'T2', norm)
-#             #gen_images_all_stochastic = np.array(gen_images_all_stochastic) * (321.46630859375 - 235.2141571044922) + 235.2141571044922
-#             #---Scarlet:20200528
-#             mse_all = []
-#             psnr_all = []
-#             ssim_all = []
-#             f = open(os.path.join(args.output_png_dir, 'prediction_scores_4prediction_stochastic_{}.txt'.format(i)), 'w')
-#             for i in range(future_length):
-#                 mse_model = np.mean((input_images_all[:, i + 10, :, :, 0] - gen_images_all_stochastic[:, i + 9, :, :,
-#                                                                             0]) ** 2)  # look at all timesteps except the first
-#                 psnr_model = psnr(input_images_all[:, i + 10, :, :, 0], gen_images_all_stochastic[:, i + 9, :, :, 0])
-#                 ssim_model = ssim(input_images_all[:, i + 10, :, :, 0], gen_images_all_stochastic[:, i + 9, :, :, 0],
-#                                   data_range = max(gen_images_all_stochastic[:, i + 9, :, :, 0].flatten()) - min(
-#                                       input_images_all[:, i + 10, :, :, 0].flatten()))
-#                 mse_all.extend([mse_model])
-#                 psnr_all.extend([psnr_model])
-#                 ssim_all.extend([ssim_model])
-#                 results = {"mse": mse_all, "psnr": psnr_all, "ssim": ssim_all}
-#                 f.write("##########Predicted Frame {}\n".format(str(i + 1)))
-#                 f.write("Model MSE: %f\n" % mse_model)
-#                 # f.write("Previous Frame MSE: %f\n" % mse_prev)
-#                 f.write("Model PSNR: %f\n" % psnr_model)
-#                 f.write("Model SSIM: %f\n" % ssim_model)
-
-
-#             pickle.dump(results, open(os.path.join(args.output_png_dir, "results_stochastic_{}.pkl".format(i)), "wb"))
-#             # f.write("Previous frame PSNR: %f\n" % psnr_prev)
-#             f.write("Shape of X_test: " + str(input_images_all.shape))
-#             f.write("")
-#             f.write("Shape of X_hat: " + str(gen_images_all_stochastic.shape))
-
-#     else:
-#         #+++Scarlet:20200528
-#         #print('Scarlet6')
-#         gen_images_all = np.array(gen_images_all)
-#         gen_images_all = norm_cls.denorm_var(gen_images_all, 'T2', norm)
-#         #---Scarlet:20200528
-        
-#         # mse_model = np.mean((input_images_all[:, 1:,:,:,0] - gen_images_all[:, 1:,:,:,0])**2)  # look at all timesteps except the first
-#         # mse_model_last = np.mean((input_images_all[:, future_length-1,:,:,0] - gen_images_all[:, future_length-1,:,:,0])**2)
-#         # mse_prev = np.mean((input_images_all[:, :-1,:,:,0] - gen_images_all[:, 1:,:,:,0])**2 )
-#         mse_all = []
-#         psnr_all = []
-#         ssim_all = []
-#         persistent_mse_all = []
-#         persistent_psnr_all = []
-#         persistent_ssim_all = []
-#         f = open(os.path.join(args.output_png_dir, 'prediction_scores_4prediction.txt'), 'w')
-#         for i in range(future_length):
-#             mse_model = np.mean((input_images_all[:1268, i + 10, :, :, 0] - gen_images_all[:, i + 9, :, :,
-#                                                                         0]) ** 2)  # look at all timesteps except the first
-#             persistent_mse_model = np.mean((input_images_all[:1268, i + 10, :, :, 0] - persistent_images_all[:, i + 9, :, :,
-#                                                                         0]) ** 2)  # look at all timesteps except the first
-            
-#             psnr_model = psnr(input_images_all[:1268, i + 10, :, :, 0], gen_images_all[:, i + 9, :, :, 0])
-#             ssim_model = ssim(input_images_all[:1268, i + 10, :, :, 0], gen_images_all[:, i + 9, :, :, 0],
-#                               data_range = max(gen_images_all[:, i + 9, :, :, 0].flatten()) - min(
-#                                   input_images_all[:, i + 10, :, :, 0].flatten()))
-#             persistent_psnr_model = psnr(input_images_all[:1268, i + 10, :, :, 0], persistent_images_all[:, i + 9, :, :, 0])
-#             persistent_ssim_model = ssim(input_images_all[:1268, i + 10, :, :, 0], persistent_images_all[:, i + 9, :, :, 0],
-#                               data_range = max(gen_images_all[:1268, i + 9, :, :, 0].flatten()) - min(input_images_all[:1268, i + 10, :, :, 0].flatten()))
-#             mse_all.extend([mse_model])
-#             psnr_all.extend([psnr_model])
-#             ssim_all.extend([ssim_model])
-#             persistent_mse_all.extend([persistent_mse_model])
-#             persistent_psnr_all.extend([persistent_psnr_model])
-#             persistent_ssim_all.extend([persistent_ssim_model])
-#             results = {"mse": mse_all, "psnr": psnr_all, "ssim": ssim_all}
-
-#             persistent_results = {"mse": persistent_mse_all, "psnr": persistent_psnr_all, "ssim": persistent_ssim_all}
-#             f.write("##########Predicted Frame {}\n".format(str(i + 1)))
-#             f.write("Model MSE: %f\n" % mse_model)
-#             # f.write("Previous Frame MSE: %f\n" % mse_prev)
-#             f.write("Model PSNR: %f\n" % psnr_model)
-#             f.write("Model SSIM: %f\n" % ssim_model)
-
-#         pickle.dump(results, open(os.path.join(args.output_png_dir, "results.pkl"), "wb"))
-#         pickle.dump(persistent_results, open(os.path.join(args.output_png_dir, "persistent_results.pkl"), "wb"))
-#         # f.write("Previous frame PSNR: %f\n" % psnr_prev)
-#         f.write("Shape of X_test: " + str(input_images_all.shape))
-#         f.write("")
-#         f.write("Shape of X_hat: " + str(gen_images_all.shape)      
-
-if __name__ == '__main__':
-    main()        
-
-    #psnr_model = psnr(input_images_all[:, :10, :, :, 0],  gen_images_all[:, :10, :, :, 0])
-    #psnr_model_last = psnr(input_images_all[:, 10, :, :, 0],  gen_images_all[:,10, :, :, 0])
-    #psnr_prev = psnr(input_images_all[:, :, :, :, 0],  input_images_all[:, 1:10, :, :, 0])
-
-    # ims = []
-    # fig = plt.figure()
-    # for frame in range(20):
-    #     input_gen_diff = np.mean((np.array(gen_images_all) - np.array(input_images_all))**2, axis=0)[frame, :,:,0] # Get the first prediction frame (batch,height, width, channel)
-    #     #pix_mean = np.mean(input_gen_diff, axis = 0)
-    #     #pix_std = np.std(input_gen_diff, axis=0)
-    #     im = plt.imshow(input_gen_diff, interpolation = 'none',cmap='PuBu')
-    #     if frame == 0:
-    #         fig.colorbar(im)
-    #     ttl = plt.text(1.5, 2, "Frame_" + str(frame +1))
-    #     ims.append([im, ttl])
-    # ani = animation.ArtistAnimation(fig, ims, interval=1000, blit = True, repeat_delay=2000)
-    # ani.save(os.path.join(args.output_png_dir, "Mean_Frames.mp4"))
-    # plt.close("all")
-
-    # ims = []
-    # fig = plt.figure()
-    # for frame in range(19):
-    #     pix_std= np.std((np.array(gen_images_all) - np.array(input_images_all))**2, axis = 0)[frame, :,:, 0]  # Get the first prediction frame (batch,height, width, channel)
-    #     #pix_mean = np.mean(input_gen_diff, axis = 0)
-    #     #pix_std = np.std(input_gen_diff, axis=0)
-    #     im = plt.imshow(pix_std, interpolation = 'none',cmap='PuBu')
-    #     if frame == 0:
-    #         fig.colorbar(im)
-    #     ttl = plt.text(1.5, 2, "Frame_" + str(frame+1))
-    #     ims.append([im, ttl])
-    # ani = animation.ArtistAnimation(fig, ims, interval = 1000, blit = True, repeat_delay = 2000)
-    # ani.save(os.path.join(args.output_png_dir, "Std_Frames.mp4"))
-
-    # seed(1)
-    # s = random.sample(range(len(gen_images_all)), 100)
-    # print("******KDP******")
-    # #kernel density plot for checking the model collapse
-    # fig = plt.figure()
-    # kdp = sns.kdeplot(gen_images_all[s].flatten(), shade=True, color="r", label = "Generate Images")
-    # kdp = sns.kdeplot(input_images_all[s].flatten(), shade=True, color="b", label = "Ground True")
-    # kdp.set(xlabel = 'Temperature (K)', ylabel = 'Probability')
-    # plt.savefig(os.path.join(args.output_png_dir, "kdp_gen_images.png"), dpi = 400)
-    # plt.clf()
-
-    #line plot for evaluating the prediction and groud-truth
-    # for i in [0,3,6,9,12,15,18]:
-    #     fig = plt.figure()
-    #     plt.scatter(gen_images_all[:,i,:,:][s].flatten(),input_images_all[:,i,:,:][s].flatten(),s=0.3)
-    #     #plt.scatter(gen_images_all[:,0,:,:].flatten(),input_images_all[:,0,:,:].flatten(),s=0.3)
-    #     plt.xlabel("Prediction")
-    #     plt.ylabel("Real values")
-    #     plt.title("Frame_{}".format(i+1))
-    #     plt.plot([250,300], [250,300],color="black")
-    #     plt.savefig(os.path.join(args.output_png_dir,"pred_real_frame_{}.png".format(str(i))))
-    #     plt.clf()
-    #
-    # mse_model_by_frames = np.mean((input_images_all[:, :, :, :, 0][s] - gen_images_all[:, :, :, :, 0][s]) ** 2,axis=(2,3)) #return (batch, sequence)
-    # x = [str(i+1) for i in list(range(19))]
-    # fig,axis = plt.subplots()
-    # mean_f = np.mean(mse_model_by_frames, axis = 0)
-    # median = np.median(mse_model_by_frames, axis=0)
-    # q_low = np.quantile(mse_model_by_frames, q=0.25, axis=0)
-    # q_high = np.quantile(mse_model_by_frames, q=0.75, axis=0)
-    # d_low = np.quantile(mse_model_by_frames,q=0.1, axis=0)
-    # d_high = np.quantile(mse_model_by_frames, q=0.9, axis=0)
-    # plt.fill_between(x, d_high, d_low, color="ghostwhite",label="interdecile range")
-    # plt.fill_between(x,q_high, q_low , color = "lightgray", label="interquartile range")
-    # plt.plot(x, median, color="grey", linewidth=0.6, label="Median")
-    # plt.plot(x, mean_f, color="peachpuff",linewidth=1.5, label="Mean")
-    # plt.title(f'MSE percentile')
-    # plt.xlabel("Frames")
-    # plt.legend(loc=2, fontsize=8)
-    # plt.savefig(os.path.join(args.output_png_dir,"mse_percentiles.png"))
-
-
-##                
-##
-##                    # fig = plt.figure()
-##                    # gs = gridspec.GridSpec(4,6)
-##                    # gs.update(wspace = 0.7,hspace=0.8)
-##                    # ax1 = plt.subplot(gs[0:2,0:3])
-##                    # ax2 = plt.subplot(gs[0:2,3:],sharey=ax1)
-##                    # ax3 = plt.subplot(gs[2:4,0:3])
-##                    # ax4 = plt.subplot(gs[2:4,3:])
-##                    # xlables = [round(i,2) for i in list(np.linspace(np.min(lon),np.max(lon),5))]
-##                    # ylabels = [round(i,2) for i  in list(np.linspace(np.max(lat),np.min(lat),5))]
-##                    # plt.setp([ax1,ax2,ax3],xticks=list(np.linspace(0,64,5)), xticklabels=xlables ,yticks=list(np.linspace(0,64,5)),yticklabels=ylabels)
-##                    # ax1.title.set_text("(a) Ground Truth")
-##                    # ax2.title.set_text("(b) SAVP")
-##                    # ax3.title.set_text("(c) Diff.")
-##                    # ax4.title.set_text("(d) MSE")
-##                    #
-##                    # ax1.xaxis.set_tick_params(labelsize=7)
-##                    # ax1.yaxis.set_tick_params(labelsize = 7)
-##                    # ax2.xaxis.set_tick_params(labelsize=7)
-##                    # ax2.yaxis.set_tick_params(labelsize = 7)
-##                    # ax3.xaxis.set_tick_params(labelsize=7)
-##                    # ax3.yaxis.set_tick_params(labelsize = 7)
-##                    #
-##                    # init_images = np.zeros((input_images_.shape[1], input_images_.shape[2]))
-##                    # print("inti images shape", init_images.shape)
-##                    # xdata, ydata = [], []
-##                    # #plot1 = ax1.imshow(init_images, cmap='jet', vmin =0, vmax = 1)
-##                    # #plot2 = ax2.imshow(init_images, cmap='jet', vmin =0, vmax = 1)
-##                    # plot1 = ax1.imshow(init_images, cmap='jet', vmin = 270, vmax = 300)
-##                    # plot2 = ax2.imshow(init_images, cmap='jet', vmin = 270, vmax = 300)
-##                    # #x = np.linspace(0, 64, 64)
-##                    # #y = np.linspace(0, 64, 64)
-##                    # #plot1 = ax1.contourf(x,y,init_images, cmap='jet', vmin = np.min(input_images), vmax = np.max(input_images))
-##                    # #plot2 = ax2.contourf(x,y,init_images, cmap='jet', vmin = np.min(input_images), vmax = np.max(input_images))
-##                    # fig.colorbar(plot1, ax=ax1).ax.tick_params(labelsize=7)
-##                    # fig.colorbar(plot2, ax=ax2).ax.tick_params(labelsize=7)
-##                    #
-##                    # cm = LinearSegmentedColormap.from_list(
-##                    #     cmap_name, "bwr", N = 5)
-##                    #
-##                    # plot3 = ax3.imshow(init_images, vmin=-20, vmax=20, cmap=cm)#cmap = 'PuBu_r',
-##                    # #plot3 = ax3.imshow(init_images, vmin = -1, vmax = 1, cmap = cm)  # cmap = 'PuBu_r',
-##                    # plot4, = ax4.plot([], [], color = "r")
-##                    # ax4.set_xlim(0, future_length-1)
-##                    # ax4.set_ylim(0, 20)
-##                    # #ax4.set_ylim(0, 0.5)
-##                    # ax4.set_xlabel("Frames", fontsize=10)
-##                    # #ax4.set_ylabel("MSE", fontsize=10)
-##                    # ax4.xaxis.set_tick_params(labelsize=7)
-##                    # ax4.yaxis.set_tick_params(labelsize=7)
-##                    #
-##                    #
-##                    # plots = [plot1, plot2, plot3, plot4]
-##                    #
-##                    # #fig.colorbar(plots[1], ax = [ax1, ax2])
-##                    #
-##                    # fig.colorbar(plots[2], ax=ax3).ax.tick_params(labelsize=7)
-##                    # #fig.colorbar(plot1[0], ax=ax1).ax.tick_params(labelsize=7)
-##                    # #fig.colorbar(plot2[1], ax=ax2).ax.tick_params(labelsize=7)
-##                    #
-##                    # def animation_sample(t):
-##                    #     input_image = input_images_[t, :, :, 0]* (321.46630859375-235.2141571044922) + 235.2141571044922
-##                    #     gen_image = gen_images_[t, :, :, 0]* (321.46630859375-235.2141571044922) + 235.2141571044922
-##                    #     diff_image = input_gen_diff[t,:,:]
-##                    #     # p = sns.lineplot(x=x,y=data,color="b")
-##                    #     # p.tick_params(labelsize=17)
-##                    #     # plt.setp(p.lines, linewidth=6)
-##                    #     plots[0].set_data(input_image)
-##                    #     plots[1].set_data(gen_image)
-##                    #     #plots[0] = ax1.contourf(x, y, input_image, cmap = 'jet', vmin = np.min(input_images),vmax = np.max(input_images))
-##                    #     #plots[1] = ax2.contourf(x, y, gen_image, cmap = 'jet', vmin = np.min(input_images),vmax = np.max(input_images))
-##                    #     plots[2].set_data(diff_image)
-##                    #
-##                    #     if t >= future_length:
-##                    #         #data = gen_mse_avg_[:t + 1]
-##                    #         # x = list(range(len(gen_mse_avg_)))[:t+1]
-##                    #         xdata.append(t-future_length)
-##                    #         print("xdata", xdata)
-##                    #         ydata.append(gen_mse_avg_[t])
-##                    #         print("ydata", ydata)
-##                    #         plots[3].set_data(xdata, ydata)
-##                    #         fig.suptitle("Predicted Frame " + str(t-future_length))
-##                    #     else:
-##                    #         #plots[3].set_data(xdata, ydata)
-##                    #         fig.suptitle("Context Frame " + str(t))
-##                    #     return plots
-##                    #
-##                    # ani = animation.FuncAnimation(fig, animation_sample, frames=len(gen_mse_avg_), interval = 1000,
-##                    #                               repeat_delay=2000)
-##                    # ani.save(os.path.join(args.output_png_dir, "Sample_" + str(name) + ".mp4"))
-##
-####                else:
-####                    pass
-##
-
-
-
-
-
-    #         # for i, gen_mse_avg_ in enumerate(gen_mse_avg):
-    #         #     ims = []
-    #         #     fig = plt.figure()
-    #         #     plt.xlim(0,len(gen_mse_avg_))
-    #         #     plt.ylim(np.min(gen_mse_avg),np.max(gen_mse_avg))
-    #         #     plt.xlabel("Frames")
-    #         #     plt.ylabel("MSE_AVG")
-    #         #     #X = list(range(len(gen_mse_avg_)))
-    #         #     #for t, gen_mse_avg_ in enumerate(gen_mse_avg):
-    #         #     def animate_metric(j):
-    #         #         data = gen_mse_avg_[:(j+1)]
-    #         #         x = list(range(len(gen_mse_avg_)))[:(j+1)]
-    #         #         p = sns.lineplot(x=x,y=data,color="b")
-    #         #         p.tick_params(labelsize=17)
-    #         #         plt.setp(p.lines, linewidth=6)
-    #         #     ani = animation.FuncAnimation(fig, animate_metric, frames=len(gen_mse_avg_), interval = 1000, repeat_delay=2000)
-    #         #     ani.save(os.path.join(args.output_png_dir, "MSE_AVG" + str(i) + ".gif"))
-    #         #
-    #         #
-    #         # for i, input_images_ in enumerate(input_images):
-    #         #     #context_images_ = (input_results['images'][i])
-    #         #     #gen_images_fname = 'gen_image_%05d_%02d.gif' % (sample_ind + i, stochastic_sample_ind)
-    #         #     ims = []
-    #         #     fig = plt.figure()
-    #         #     for t, input_image in enumerate(input_images_):
-    #         #         im = plt.imshow(input_images[i, t, :, :, 0], interpolation = 'none')
-    #         #         ttl = plt.text(1.5, 2,"Frame_" + str(t))
-    #         #         ims.append([im,ttl])
-    #         #     ani = animation.ArtistAnimation(fig, ims, interval= 1000, blit=True,repeat_delay=2000)
-    #         #     ani.save(os.path.join(args.output_png_dir,"groud_true_images_" + str(i) + ".gif"))
-    #         #     #plt.show()
-    #         #
-    #         # for i,gen_images_ in enumerate(gen_images):
-    #         #     ims = []
-    #         #     fig = plt.figure()
-    #         #     for t, gen_image in enumerate(gen_images_):
-    #         #         im = plt.imshow(gen_images[i, t, :, :, 0], interpolation = 'none')
-    #         #         ttl = plt.text(1.5, 2, "Frame_" + str(t))
-    #         #         ims.append([im, ttl])
-    #         #     ani = animation.ArtistAnimation(fig, ims, interval = 1000, blit = True, repeat_delay = 2000)
-    #         #     ani.save(os.path.join(args.output_png_dir, "prediction_images_" + str(i) + ".gif"))
-    #
-    #
-    #             # for i, gen_images_ in enumerate(gen_images):
-    #             #     #context_images_ = (input_results['images'][i] * 255.0).astype(np.uint8)
-    #             #     #gen_images_ = (gen_images_ * 255.0).astype(np.uint8)
-    #             #     #bing
-    #             #     context_images_ = (input_results['images'][i])
-    #             #     gen_images_fname = 'gen_image_%05d_%02d.gif' % (sample_ind + i, stochastic_sample_ind)
-    #             #     context_and_gen_images = list(context_images_[:context_frames]) + list(gen_images_)
-    #             #     plt.figure(figsize = (10,2))
-    #             #     gs = gridspec.GridSpec(2,10)
-    #             #     gs.update(wspace=0.,hspace=0.)
-    #             #     for t, gen_image in enumerate(gen_images_):
-    #             #         gen_image_fname_pattern = 'gen_image_%%05d_%%02d_%%0%dd.png' % max(2,len(str(len(gen_images_) - 1)))
-    #             #         gen_image_fname = gen_image_fname_pattern % (sample_ind + i, stochastic_sample_ind, t)
-    #             #         plt.subplot(gs[t])
-    #             #         plt.imshow(input_images[i, t, :, :, 0], interpolation = 'none')  # the last index sets the channel. 0 = t2
-    #             #         # plt.pcolormesh(X_test[i,t,::-1,:,0], shading='bottom', cmap=plt.cm.jet)
-    #             #         plt.tick_params(axis = 'both', which = 'both', bottom = False, top = False, left = False,
-    #             #                         right = False, labelbottom = False, labelleft = False)
-    #             #         if t == 0: plt.ylabel('Actual', fontsize = 10)
-    #             #
-    #             #         plt.subplot(gs[t + 10])
-    #             #         plt.imshow(gen_images[i, t, :, :, 0], interpolation = 'none')
-    #             #         # plt.pcolormesh(X_hat[i,t,::-1,:,0], shading='bottom', cmap=plt.cm.jet)
-    #             #         plt.tick_params(axis = 'both', which = 'both', bottom = False, top = False, left = False,
-    #             #                         right = False, labelbottom = False, labelleft = False)
-    #             #         if t == 0: plt.ylabel('Predicted', fontsize = 10)
-    #             #     plt.savefig(os.path.join(args.output_png_dir, gen_image_fname) + 'plot_' + str(i) + '.png')
-    #             #     plt.clf()
-    #
-    #             # if args.gif_length:
-    #             #     context_and_gen_images = context_and_gen_images[:args.gif_length]
-    #             # save_gif(os.path.join(args.output_gif_dir, gen_images_fname),
-    #             #          context_and_gen_images, fps=args.fps)
-    #             #
-    #             # gen_image_fname_pattern = 'gen_image_%%05d_%%02d_%%0%dd.png' % max(2, len(str(len(gen_images_) - 1)))
-    #             # for t, gen_image in enumerate(gen_images_):
-    #             #     gen_image_fname = gen_image_fname_pattern % (sample_ind + i, stochastic_sample_ind, t)
-    #             #     if gen_image.shape[-1] == 1:
-    #             #       gen_image = np.tile(gen_image, (1, 1, 3))
-    #             #     else:
-    #             #       gen_image = cv2.cvtColor(gen_image, cv2.COLOR_RGB2BGR)
-    #             #     cv2.imwrite(os.path.join(args.output_png_dir, gen_image_fname), gen_image)
diff --git a/video_prediction_savp/scripts/train_v2.py b/video_prediction_savp/scripts/train_v2.py
deleted file mode 100644
index f83192d6af7d3953c666f40cc9e6d3766a78e92e..0000000000000000000000000000000000000000
--- a/video_prediction_savp/scripts/train_v2.py
+++ /dev/null
@@ -1,362 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import argparse
-import errno
-import json
-import os
-import random
-import time
-
-import numpy as np
-import tensorflow as tf
-
-from video_prediction import datasets, models
-
-
-def add_tag_suffix(summary, tag_suffix):
-    summary_proto = tf.Summary()
-    summary_proto.ParseFromString(summary)
-    summary = summary_proto
-
-    for value in summary.value:
-        tag_split = value.tag.split('/')
-        value.tag = '/'.join([tag_split[0] + tag_suffix] + tag_split[1:])
-    return summary.SerializeToString()
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--input_dir", type=str, required=True, help="either a directory containing subdirectories "
-                                                                     "train, val, test, etc, or a directory containing "
-                                                                     "the tfrecords")
-    parser.add_argument("--val_input_dir", type=str, help="directories containing the tfrecords. default: input_dir")
-    parser.add_argument("--logs_dir", default='logs', help="ignored if output_dir is specified")
-    parser.add_argument("--output_dir", help="output directory where json files, summary, model, gifs, etc are saved. "
-                                             "default is logs_dir/model_fname, where model_fname consists of "
-                                             "information from model and model_hparams")
-    parser.add_argument("--output_dir_postfix", default="")
-    parser.add_argument("--checkpoint", help="directory with checkpoint or checkpoint name (e.g. checkpoint_dir/model-200000)")
-    parser.add_argument("--resume", action='store_true', help='resume from lastest checkpoint in output_dir.')
-
-    parser.add_argument("--dataset", type=str, help="dataset class name")
-    parser.add_argument("--dataset_hparams", type=str, help="a string of comma separated list of dataset hyperparameters")
-    parser.add_argument("--dataset_hparams_dict", type=str, help="a json file of dataset hyperparameters")
-    parser.add_argument("--model", type=str, help="model class name")
-    parser.add_argument("--model_hparams", type=str, help="a string of comma separated list of model hyperparameters")
-    parser.add_argument("--model_hparams_dict", type=str, help="a json file of model hyperparameters")
-
-    parser.add_argument("--summary_freq", type=int, default=1000, help="save frequency of summaries (except for image and eval summaries) for train/validation set")
-    parser.add_argument("--image_summary_freq", type=int, default=5000, help="save frequency of image summaries for train/validation set")
-    parser.add_argument("--eval_summary_freq", type=int, default=25000, help="save frequency of eval summaries for train/validation set")
-    parser.add_argument("--accum_eval_summary_freq", type=int, default=100000, help="save frequency of accumulated eval summaries for validation set only")
-    parser.add_argument("--progress_freq", type=int, default=100, help="display progress every progress_freq steps")
-    parser.add_argument("--save_freq", type=int, default=5000, help="save frequence of model, 0 to disable")
-
-    parser.add_argument("--aggregate_nccl", type=int, default=0, help="whether to use nccl or cpu for gradient aggregation in multi-gpu training")
-    parser.add_argument("--gpu_mem_frac", type=float, default=0, help="fraction of gpu memory to use")
-    parser.add_argument("--seed", type=int)
-
-    args = parser.parse_args()
-
-    if args.seed is not None:
-        tf.set_random_seed(args.seed)
-        np.random.seed(args.seed)
-        random.seed(args.seed)
-
-    if args.output_dir is None:
-        list_depth = 0
-        model_fname = ''
-        for t in ('model=%s,%s' % (args.model, args.model_hparams)):
-            if t == '[':
-                list_depth += 1
-            if t == ']':
-                list_depth -= 1
-            if list_depth and t == ',':
-                t = '..'
-            if t in '=,':
-                t = '.'
-            if t in '[]':
-                t = ''
-            model_fname += t
-        args.output_dir = os.path.join(args.logs_dir, model_fname) + args.output_dir_postfix
-
-    if args.resume:
-        if args.checkpoint:
-            raise ValueError('resume and checkpoint cannot both be specified')
-        args.checkpoint = args.output_dir
-
-    dataset_hparams_dict = {}
-    model_hparams_dict = {}
-    if args.dataset_hparams_dict:
-        with open(args.dataset_hparams_dict) as f:
-            dataset_hparams_dict.update(json.loads(f.read()))
-    if args.model_hparams_dict:
-        with open(args.model_hparams_dict) as f:
-            model_hparams_dict.update(json.loads(f.read()))
-    if args.checkpoint:
-        checkpoint_dir = os.path.normpath(args.checkpoint)
-        if not os.path.isdir(args.checkpoint):
-            checkpoint_dir, _ = os.path.split(checkpoint_dir)
-        if not os.path.exists(checkpoint_dir):
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), checkpoint_dir)
-        with open(os.path.join(checkpoint_dir, "options.json")) as f:
-            print("loading options from checkpoint %s" % args.checkpoint)
-            options = json.loads(f.read())
-            args.dataset = args.dataset or options['dataset']
-            args.model = args.model or options['model']
-        try:
-            with open(os.path.join(checkpoint_dir, "dataset_hparams.json")) as f:
-                dataset_hparams_dict.update(json.loads(f.read()))
-        except FileNotFoundError:
-            print("dataset_hparams.json was not loaded because it does not exist")
-        try:
-            with open(os.path.join(checkpoint_dir, "model_hparams.json")) as f:
-                model_hparams_dict.update(json.loads(f.read()))
-        except FileNotFoundError:
-            print("model_hparams.json was not loaded because it does not exist")
-
-    print('----------------------------------- Options ------------------------------------')
-    for k, v in args._get_kwargs():
-        print(k, "=", v)
-    print('------------------------------------- End --------------------------------------')
-
-    VideoDataset = datasets.get_dataset_class(args.dataset)
-    train_dataset = VideoDataset(
-        args.input_dir,
-        mode='train',
-        hparams_dict=dataset_hparams_dict,
-        hparams=args.dataset_hparams)
-    val_dataset = VideoDataset(
-        args.val_input_dir or args.input_dir,
-        mode='val',
-        hparams_dict=dataset_hparams_dict,
-        hparams=args.dataset_hparams)
-    if val_dataset.hparams.long_sequence_length != val_dataset.hparams.sequence_length:
-        # the longer dataset is only used for the accum_eval_metrics
-        long_val_dataset = VideoDataset(
-            args.val_input_dir or args.input_dir,
-            mode='val',
-            hparams_dict=dataset_hparams_dict,
-            hparams=args.dataset_hparams)
-        long_val_dataset.set_sequence_length(val_dataset.hparams.long_sequence_length)
-    else:
-        long_val_dataset = None
-
-    variable_scope = tf.get_variable_scope()
-    variable_scope.set_use_resource(True)
-
-    VideoPredictionModel = models.get_model_class(args.model)
-    hparams_dict = dict(model_hparams_dict)
-    hparams_dict.update({
-        'context_frames': train_dataset.hparams.context_frames,
-        'sequence_length': train_dataset.hparams.sequence_length,
-        'repeat': train_dataset.hparams.time_shift,
-    })
-    model = VideoPredictionModel(
-        hparams_dict=hparams_dict,
-        hparams=args.model_hparams,
-        aggregate_nccl=args.aggregate_nccl)
-
-    batch_size = model.hparams.batch_size
-    train_tf_dataset = train_dataset.make_dataset_v2(batch_size)#Bing: adopt the meteo data prepartion here
-    train_iterator = train_tf_dataset.make_one_shot_iterator()#Bing:for era5, the problem happen in sess.run(feches) should come from here
-    # The `Iterator.string_handle()` method returns a tensor that can be evaluated
-    # and used to feed the `handle` placeholder.
-    train_handle = train_iterator.string_handle()
-    val_tf_dataset = val_dataset.make_dataset_v2(batch_size)
-    val_iterator = val_tf_dataset.make_one_shot_iterator()
-    val_handle = val_iterator.string_handle()
-    #iterator = tf.data.Iterator.from_string_handle(
-    #    train_handle, train_tf_dataset.output_types, train_tf_dataset.output_shapes)
-    inputs = train_iterator.get_next()
-
-    # inputs comes from the training dataset by default, unless train_handle is remapped to the val_handles
-    model.build_graph(inputs, finetune=True)
-
-    if long_val_dataset is not None:
-        # separately build a model for the longer sequence.
-        # this is needed because the model doesn't support dynamic shapes.
-        long_hparams_dict = dict(hparams_dict)
-        long_hparams_dict['sequence_length'] = long_val_dataset.hparams.sequence_length
-        # use smaller batch size for longer model to prevenet running out of memory
-        long_hparams_dict['batch_size'] = model.hparams.batch_size // 2
-        long_model = VideoPredictionModel(
-            mode="test",  # to not build the losses and discriminators
-            hparams_dict=long_hparams_dict,
-            hparams=args.model_hparams,
-            aggregate_nccl=args.aggregate_nccl)
-        tf.get_variable_scope().reuse_variables()
-        long_model.build_graph(long_val_dataset.make_batch(batch_size))
-    else:
-        long_model = None
-
-    if not os.path.exists(args.output_dir):
-        os.makedirs(args.output_dir)
-    with open(os.path.join(args.output_dir, "options.json"), "w") as f:
-        f.write(json.dumps(vars(args), sort_keys=True, indent=4))
-    with open(os.path.join(args.output_dir, "dataset_hparams.json"), "w") as f:
-        f.write(json.dumps(train_dataset.hparams.values(), sort_keys=True, indent=4))
-    with open(os.path.join(args.output_dir, "model_hparams.json"), "w") as f:
-        f.write(json.dumps(model.hparams.values(), sort_keys=True, indent=4))
-
-    with tf.name_scope("parameter_count"):
-        # exclude trainable variables that are replicas (used in multi-gpu setting)
-        trainable_variables = set(tf.trainable_variables()) & set(model.saveable_variables)
-        parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in trainable_variables])
-
-    saver = tf.train.Saver(var_list=model.saveable_variables, max_to_keep=2)
-
-    # None has the special meaning of evaluating at the end, so explicitly check for non-equality to zero
-    if (args.summary_freq != 0 or args.image_summary_freq != 0 or
-            args.eval_summary_freq != 0 or args.accum_eval_summary_freq != 0):
-        summary_writer = tf.summary.FileWriter(args.output_dir)
-
-    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_mem_frac, allow_growth=True)
-    config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
-    global_step = tf.train.get_or_create_global_step()
-    max_steps = model.hparams.max_steps
-    with tf.Session(config=config) as sess:
-        print("parameter_count =", sess.run(parameter_count))
-        sess.run(tf.global_variables_initializer())
-        sess.run(tf.local_variables_initializer())
-        #coord = tf.train.Coordinator()
-        #threads = tf.train.start_queue_runners(sess = sess, coord = coord)
-        print("Init done: {sess.run(tf.local_variables_initializer())}%")
-        model.restore(sess, args.checkpoint)
-        print("Restore processed finished")
-        sess.run(model.post_init_ops)
-        print("Model run started")
-        val_handle_eval = sess.run(val_handle)
-        print("val handle done")
-        sess.graph.finalize()
-        print("graph inalize done")
-        start_step = sess.run(global_step)
-        print("global step done")
-
-        def should(step, freq):
-            if freq is None:
-                return (step + 1) == (max_steps - start_step)
-            else:
-                return freq and ((step + 1) % freq == 0 or (step + 1) in (0, max_steps - start_step))
-
-        def should_eval(step, freq):
-            # never run eval summaries at the beginning since it's expensive, unless it's the last iteration
-            return should(step, freq) and (step >= 0 or (step + 1) == (max_steps - start_step))
-
-        # start at one step earlier to log everything without doing any training
-        # step is relative to the start_step
-        for step in range(-1, max_steps - start_step):
-            if step == 1:
-                # skip step -1 and 0 for timing purposes (for warmstarting)
-                start_time = time.time()
-
-            fetches = {"global_step": global_step}
-            if step >= 0:
-                fetches["train_op"] = model.train_op
-            if should(step, args.progress_freq):
-                fetches['d_loss'] = model.d_loss
-                fetches['g_loss'] = model.g_loss
-                fetches['d_losses'] = model.d_losses
-                fetches['g_losses'] = model.g_losses
-                if isinstance(model.learning_rate, tf.Tensor):
-                    fetches["learning_rate"] = model.learning_rate
-            if should(step, args.summary_freq):
-                fetches["summary"] = model.summary_op
-            if should(step, args.image_summary_freq):
-                fetches["image_summary"] = model.image_summary_op
-            if should_eval(step, args.eval_summary_freq):
-                fetches["eval_summary"] = model.eval_summary_op
-
-            run_start_time = time.time()
-            results = sess.run(fetches) #fetch the elements in dictinoary fetch
-
-            run_elapsed_time = time.time() - run_start_time
-            if run_elapsed_time > 1.5 and step > 0 and set(fetches.keys()) == {"global_step", "train_op"}:
-                print('running train_op took too long (%0.1fs)' % run_elapsed_time)
-
-            if (should(step, args.summary_freq) or
-                    should(step, args.image_summary_freq) or
-                    should_eval(step, args.eval_summary_freq)):
-                val_fetches = {"global_step": global_step}
-                if should(step, args.summary_freq):
-                    val_fetches["summary"] = model.summary_op
-                if should(step, args.image_summary_freq):
-                    val_fetches["image_summary"] = model.image_summary_op
-                if should_eval(step, args.eval_summary_freq):
-                    val_fetches["eval_summary"] = model.eval_summary_op
-                val_results = sess.run(val_fetches, feed_dict={train_handle: val_handle_eval})
-                for name, summary in val_results.items():
-                    if name == 'global_step':
-                        continue
-                    val_results[name] = add_tag_suffix(summary, '_1')
-
-            if should(step, args.summary_freq):
-                print("recording summary")
-                summary_writer.add_summary(results["summary"], results["global_step"])
-                summary_writer.add_summary(val_results["summary"], val_results["global_step"])
-                print("done")
-            if should(step, args.image_summary_freq):
-                print("recording image summary")
-                summary_writer.add_summary(results["image_summary"], results["global_step"])
-                summary_writer.add_summary(val_results["image_summary"], val_results["global_step"])
-                print("done")
-            if should_eval(step, args.eval_summary_freq):
-                print("recording eval summary")
-                summary_writer.add_summary(results["eval_summary"], results["global_step"])
-                summary_writer.add_summary(val_results["eval_summary"], val_results["global_step"])
-                print("done")
-            if should_eval(step, args.accum_eval_summary_freq):
-                val_datasets = [val_dataset]
-                val_models = [model]
-                if long_model is not None:
-                    val_datasets.append(long_val_dataset)
-                    val_models.append(long_model)
-                for i, (val_dataset_, val_model) in enumerate(zip(val_datasets, val_models)):
-                    sess.run(val_model.accum_eval_metrics_reset_op)
-                    # traverse (roughly up to rounding based on the batch size) all the validation dataset
-                    accum_eval_summary_num_updates = val_dataset_.num_examples_per_epoch() // val_model.hparams.batch_size
-                    val_fetches = {"global_step": global_step, "accum_eval_summary": val_model.accum_eval_summary_op}
-                    for update_step in range(accum_eval_summary_num_updates):
-                        print('evaluating %d / %d' % (update_step + 1, accum_eval_summary_num_updates))
-                        val_results = sess.run(val_fetches, feed_dict={train_handle: val_handle_eval})
-                    accum_eval_summary = add_tag_suffix(val_results["accum_eval_summary"], '_%d' % (i + 1))
-                    print("recording accum eval summary")
-                    summary_writer.add_summary(accum_eval_summary, val_results["global_step"])
-                    print("done")
-            if (should(step, args.summary_freq) or should(step, args.image_summary_freq) or
-                    should_eval(step, args.eval_summary_freq) or should_eval(step, args.accum_eval_summary_freq)):
-                summary_writer.flush()
-            if should(step, args.progress_freq):
-                # global_step will have the correct step count if we resume from a checkpoint
-                # global step is read before it's incremented
-                steps_per_epoch = train_dataset.num_examples_per_epoch() / batch_size
-                train_epoch = results["global_step"] / steps_per_epoch
-                print("progress  global step %d  epoch %0.1f" % (results["global_step"] + 1, train_epoch))
-                if step > 0:
-                    elapsed_time = time.time() - start_time
-                    average_time = elapsed_time / step
-                    images_per_sec = batch_size / average_time
-                    remaining_time = (max_steps - (start_step + step + 1)) * average_time
-                    print("          image/sec %0.1f  remaining %dm (%0.1fh) (%0.1fd)" %
-                          (images_per_sec, remaining_time / 60, remaining_time / 60 / 60, remaining_time / 60 / 60 / 24))
-
-                if results['d_losses']:
-                    print("d_loss", results["d_loss"])
-                for name, loss in results['d_losses'].items():
-                    print("  ", name, loss)
-                if results['g_losses']:
-                    print("g_loss", results["g_loss"])
-                for name, loss in results['g_losses'].items():
-                    print("  ", name, loss)
-                if isinstance(model.learning_rate, tf.Tensor):
-                    print("learning_rate", results["learning_rate"])
-
-            if should(step, args.save_freq):
-                print("saving model to", args.output_dir)
-                saver.save(sess, os.path.join(args.output_dir, "model"), global_step=global_step)
-                print("done")
-
-if __name__ == '__main__':
-    main()
diff --git a/video_prediction_savp/utils/metadata.py b/video_prediction_savp/utils/metadata.py
deleted file mode 100644
index c4ef4f36ef68dd70ae129706211a4468f60a9404..0000000000000000000000000000000000000000
--- a/video_prediction_savp/utils/metadata.py
+++ /dev/null
@@ -1,365 +0,0 @@
-""" 
-Class to retrieve and handle meta-data
-"""
-
-import os
-import sys
-import time
-import numpy as np
-import json
-from netCDF4 import Dataset
-from general_utils import is_integer, add_str_to_path
-
-class MetaData:
-    """
-     Class for handling, storing and retrieving meta-data
-    """
-    
-    def __init__(self,json_file=None,suffix_indir=None,exp_id=None,data_filename=None,slices=None,variables=None):
-
-        """
-         Initailizes MetaData instance by reading a corresponding json-file or by handling arguments of the Preprocessing step
-         (i.e. exemplary input file, slices defining region of interest, input variables)
-        :param json_file: already existing json-file with metadata, if present the following arguments are not needed
-        :param suffix_indir: suffix of directory where processed data is stored for running the models
-        :param exp_id: experiment identifier
-        :param data_filename: name of netCDF-file serving as base for metadata retrieval
-        :param slices: indices defining the region of interest
-        :param variables: predictor variables
-        """
-        
-        method_name = MetaData.__init__.__name__+" of Class "+MetaData.__name__
-        
-        if not json_file is None: 
-            print(json_file)
-            print(type(json_file))
-            MetaData.get_metadata_from_file(self,json_file)
-            
-        else:
-            # No dictionary from json-file available, all other arguments have to set
-            if not suffix_indir:
-                raise TypeError(method_name+": 'suffix_indir'-argument is required if 'json_file' is not passed.")
-            else:
-                if not isinstance(suffix_indir,str):
-                    raise TypeError(method_name+": 'suffix_indir'-argument must be a string.")
-
-            if not exp_id:
-                raise TypeError(method_name+": 'exp_id'-argument is required if 'json_file' is not passed.")
-            else:
-                if not isinstance(exp_id,str):
-                    raise TypeError(method_name+": 'exp_id'-argument must be a string.")
-            
-            if not data_filename:
-                raise TypeError(method_name+": 'data_filename'-argument is required if 'json_file' is not passed.")
-            else:
-                if not isinstance(data_filename,str):
-                    raise TypeError(method_name+": 'data_filename'-argument must be a string.")
-                
-            if not slices:
-                raise TypeError(method_name+": 'slices'-argument is required if 'json_file' is not passed.")
-            else:
-                if not isinstance(slices,dict):
-                    raise TypeError(method_name+": 'slices'-argument must be a dictionary.")
-            
-            if not variables:
-                raise TypeError(method_name+": 'variables'-argument is required if 'json_file' is not passed.")
-            else:
-                if not isinstance(variables,list):
-                    raise TypeError(method_name+": 'variables'-argument must be a list.")       
-            
-            MetaData.get_and_set_metadata_from_file(self,suffix_indir,exp_id,data_filename,slices,variables)
-            
-            MetaData.write_metadata_to_file(self)
-            
-
-    def get_and_set_metadata_from_file(self,suffix_indir,exp_id,datafile_name,slices,variables):
-        """
-         Retrieves several meta data from netCDF-file and sets corresponding class instance attributes.
-         Besides, the name of the experiment directory is constructed following the naming convention (see below)
-        
-         Naming convention:
-         [model_base]_Y[yyyy]to[yyyy]M[mm]to[mm]-[nx]x[ny]-[nnnn]N[eeee]E-[var1]_[var2]_(...)_[varN]
-         ---------------- Given ----------------|---------------- Created dynamically --------------
-        
-         Note that the model-base as well as the date-identifiers must already be included in target_dir_in.
-        """
-        
-        method_name = MetaData.get_and_set_metadata_from_file.__name__+" of Class "+MetaData.__name__
-        
-        if not suffix_indir: raise ValueError(method_name+": suffix_indir must be a non-empty path.")
-    
-        # retrieve required information from file 
-        flag_coords = ["N", "E"]
- 
-        print("Retrieve metadata based on file: '"+datafile_name+"'")
-        try:
-            datafile = Dataset(datafile_name,'r')
-        except:
-            print(method_name + ": Error when handling data file: '"+datafile_name+"'.")
-            exit()
-        
-        # Check if all requested variables can be obtained from datafile
-        MetaData.check_datafile(datafile,variables)
-        self.varnames    = variables
-        
-        self.nx, self.ny = np.abs(slices['lon_e'] - slices['lon_s']), np.abs(slices['lat_e'] - slices['lat_s'])    
-        sw_c             = [float(datafile.variables['lat'][slices['lat_e']-1]),float(datafile.variables['lon'][slices['lon_s']])]                # meridional axis lat is oriented from north to south (i.e. monotonically decreasing)
-        self.sw_c        = sw_c
-        self.lat = datafile.variables['lat'][slices['lat_s']:slices['lat_e']]
-        self.lon = datafile.variables['lon'][slices['lon_s']:slices['lon_e']]
-        
-        # Now start constructing expdir-string
-        # switch sign and coordinate-flags to avoid negative values appearing in expdir-name
-        if sw_c[0] < 0.:
-            sw_c[0] = np.abs(sw_c[0])
-            flag_coords[0] = "S"
-        if sw_c[1] < 0.:
-            sw_c[1] = np.abs(sw_c[1])
-            flag_coords[1] = "W"
-        nvar     = len(variables)
-        
-        # splitting has to be done in order to retrieve the expname-suffix (and the year if required)
-        path_parts = os.path.split(suffix_indir.rstrip("/"))
-        
-        if (is_integer(path_parts[1])):
-            year = path_parts[1]
-            path_parts = os.path.split(path_parts[0].rstrip("/"))
-        else:
-            year = ""
-        
-        expdir, expname = path_parts[0], path_parts[1] 
-
-        # extend expdir_in successively (splitted up for better readability)
-        expname += "-"+str(self.nx) + "x" + str(self.ny)
-        expname += "-"+(("{0: 05.2f}"+flag_coords[0]+"{1:05.2f}"+flag_coords[1]).format(*sw_c)).strip().replace(".","")+"-"  
-        
-        # reduced for-loop length as last variable-name is not followed by an underscore (see above)
-        for i in range(nvar-1):
-            expname += variables[i]+"_"
-        expname += variables[nvar-1]
-        
-        self.expname = expname
-        self.expdir  = expdir
-        self.exp_id  = exp_id
-        self.status  = ""                   # uninitialized (is set when metadata is written/compared to/with json-file, see write_metadata_to_file-method)
-
-    # ML 2020/04/24 E         
-    
-    def write_metadata_to_file(self,dest_dir = None):
-        
-        """
-         Write meta data attributes of class instance to json-file.
-        """
-        
-        method_name = MetaData.write_metadata_to_file.__name__+" of Class "+MetaData.__name__
-        # actual work:
-        meta_dict = {"expname": self.expname,
-                     "expdir" : self.expdir,
-                     "exp_id" : self.exp_id}
-        
-        meta_dict["sw_corner_frame"] = {
-            "lat" : np.around(self.sw_c[0],decimals=2),
-            "lon" : np.around(self.sw_c[1],decimals=2)
-            }
-        
-        meta_dict["coordinates"] = {
-            "lat" : np.around(self.lat,decimals=2).tolist(),
-            "lon" : np.around(self.lon,decimals=2).tolist()
-            }
-            
-        meta_dict["frame_size"] = {
-            "nx" : int(self.nx),
-            "ny" : int(self.ny)
-            }
-        
-        meta_dict["variables"] = []
-        for i in range(len(self.varnames)):
-            #print(self.varnames[i])
-            meta_dict["variables"].append( 
-                    {"var"+str(i+1) : self.varnames[i]})
-        
-        # create directory if required
-        if dest_dir is None: 
-            dest_dir = os.path.join(self.expdir,self.expname)
-        if not os.path.exists(dest_dir):
-            print("Created experiment directory: '"+self.expdir+"'")
-            os.makedirs(dest_dir,exist_ok=True)            
-            
-        meta_fname = os.path.join(dest_dir,"metadata.json")
-
-        if os.path.exists(meta_fname):                      # check if a metadata-file already exists and check its content
-            print(method_name+": json-file ('"+meta_fname+"' already exists. Its content will be checked...")
-            self.status = "old"                             # set status to old in order to prevent repeated modification of shell-/Batch-scripts
-            with open(meta_fname,'r') as js_file:
-                dict_dupl = json.load(js_file)
-                
-                if dict_dupl != meta_dict:
-                    meta_fname_dbg = os.path.join(dest_dir,"metadata_debug.json")
-                    print(method_name+": Already existing metadata (see '"+meta_fname+"') do not fit data being processed right now (see '" \
-                          +meta_fname_dbg+"'. Ensure a common data base.")
-                    with open(meta_fname_dbg,'w') as js_file:
-                        json.dump(meta_dict,js_file)                         
-                    raise ValueError
-                else: #do not need to do anything
-                    pass
-        else:
-            # write dictionary to file
-            print(method_name+": Write dictionary to json-file: '"+meta_fname+"'")
-            with open(meta_fname,'w') as js_file:
-                json.dump(meta_dict,js_file)
-            self.status = "new"                             # set status to new in order to trigger modification of shell-/Batch-scripts
-        
-    def get_metadata_from_file(self,js_file):
-        
-        """
-         Retrieves meta data attributes from json-file
-        """
-        
-        with open(js_file) as js_file:                
-            dict_in = json.load(js_file)
-            
-            self.expdir = dict_in["expdir"]
-            if "exp_id" in dict_in:
-                self.exp_id = dict_in["exp_id"]
-            
-            self.sw_c       = [dict_in["sw_corner_frame"]["lat"],dict_in["sw_corner_frame"]["lon"] ]
-            self.lat        = dict_in["coordinates"]["lat"]
-            self.lon        = dict_in["coordinates"]["lon"]
-            
-            self.nx         = dict_in["frame_size"]["nx"]
-            self.ny         = dict_in["frame_size"]["ny"]
-            # dict_in["variables"] is a list like [{var1: varname1},{var2: varname2},...]
-            list_of_dict_aux = dict_in["variables"] 
-            # iterate through the list with an integer ivar
-            # note: the naming of the variables starts with var1, thus add 1 to the iterator
-            self.variables = [list_of_dict_aux[ivar]["var"+str(ivar+1)] for ivar in range(len(list_of_dict_aux))]
-            
-    def write_dirs_to_batch_scripts(self,batch_script):
-        
-        """
-         Expands ('known') directory-variables in batch_script by expdir-attribute of class instance
-        """
-        
-        paths_to_mod = ["source_dir=","destination_dir=","checkpoint_dir=","results_dir="]      # known directory-variables in batch-scripts
-
-        # For backward compability:
-        # Check if exp_id (if present) needs to be added to batch_script in order to access the file
-        if hasattr(self,"exp_id"):
-            sep_idx = batch_script.index(".sh")
-            batch_script = batch_script[:sep_idx] + "_" + self.exp_id + batch_script[sep_idx:]
-
-        with open(batch_script,'r') as file:
-            data = file.readlines()
-            
-        nlines = len(data)
-        matched_lines = [iline for iline in range(nlines) if any(str_id in data[iline] for str_id in paths_to_mod)]   # list of line-number indices to be modified 
-
-        for i in matched_lines:
-            data[i] = add_str_to_path(data[i],self.expname)
-
-        
-        with open(batch_script,'w') as file:
-            file.writelines(data)
-    
-    @staticmethod
-    def write_destdir_jsontmp(dest_dir, tmp_dir = None):        
-        """
-          Writes dest_dir to temporary json-file (temp.json) stored in the current working directory.
-          To be executed by Master node in parallel mode.
-        """
-        
-        if not tmp_dir: tmp_dir = os.getcwd()
-        
-        file_tmp = os.path.join(tmp_dir,"temp.json")
-        dict_tmp = {"destination_dir": dest_dir}
-        
-        with open(file_tmp,"w") as js_file:
-            print("Save destination_dir-variable in temporary json-file: '"+file_tmp+"'")
-            json.dump(dict_tmp,js_file)
-            
-    @staticmethod
-    def get_destdir_jsontmp(tmp_dir = None):
-        """
-          Retrieves dest_dir from temporary json-file which is expected to exist in the current working directory and returns it.
-        """
-        
-        method_name = MetaData.get_destdir_jsontmp.__name__+" of Class "+MetaData.__name__
-
-        if not tmp_dir: tmp_dir = os.getcwd()
-        
-        file_tmp = os.path.join(tmp_dir,"temp.json")
-        
-        try:
-            with open(file_tmp,"r") as js_file:
-                dict_tmp = json.load(js_file)
-        except:
-            print(method_name+": Could not open requested json-file '"+file_tmp+"'")
-            sys.exit(1)
-            
-        if not "destination_dir" in dict_tmp.keys():
-            raise Exception(method_name+": Could not find 'destination_dir' in dictionary obtained from "+file_tmp)
-        else:
-            return(dict_tmp.get("destination_dir"))
-    
-    @staticmethod
-    def wait_for_jsontmp(tmp_dir = None, waittime = 10, delay=0.5):
-        """
-          Waits at max. waittime (in sec) until temp.json-file becomes available
-        """
-        
-        method_name = MetaData.wait_for_jsontmp.__name__+" of Class "+MetaData.__name__
-        
-        if not tmp_dir: tmp_dir = os.getcwd()
-        
-        file_tmp = os.path.join(tmp_dir,"temp.json")
-                                
-        counter_max = waittime/delay
-        counter = 0
-        status  = "not_ok" 
-        
-        while (counter <= counter_max):
-            if os.path.isfile(file_tmp):
-                status = "ok"
-                break
-            else:
-                time.sleep(delay)
-            
-            counter += 1
-                                
-        if status != "ok": raise IOError(method_name+": '"+file_tmp+ \
-                           "' does not exist after waiting for "+str(waittime)+" sec.") 
-    
-    
-    @staticmethod
-    def issubset(a,b):
-        """
-        Checks if all elements of a exist in b or vice versa (depends on the length of the corresponding lists/sets)
-        """  
-        
-        if len(a) > len(b):
-            return(set(b).issubset(set(a)))
-        elif len(b) >= len(a):
-            return(set(a).issubset(set(b)))
-    
-    @staticmethod
-    def check_datafile(datafile,varnames):
-        """
-          Checks if all varnames can be found in datafile
-        """
-        
-        if not MetaData.issubset(varnames,datafile.variables.keys()):
-            for i in range(len(varnames2check)):
-                if not varnames2check[i] in f0.variables.keys():
-                    print("Variable '"+varnames2check[i]+"' not found in datafile '"+data_filenames[0]+"'.")
-                raise ValueError("Could not find the above mentioned variables.")
-        else:
-            pass
-
-
-        
-# ----------------------------------- end of class MetaData -----------------------------------
-
-                       
-    
-        
-        
diff --git a/video_prediction_savp/Dockerfiles/Dockerfile_base b/video_prediction_tools/Dockerfiles/Dockerfile_base
similarity index 100%
rename from video_prediction_savp/Dockerfiles/Dockerfile_base
rename to video_prediction_tools/Dockerfiles/Dockerfile_base
diff --git a/video_prediction_savp/Dockerfiles/Dockerfile_tf b/video_prediction_tools/Dockerfiles/Dockerfile_tf
similarity index 100%
rename from video_prediction_savp/Dockerfiles/Dockerfile_tf
rename to video_prediction_tools/Dockerfiles/Dockerfile_tf
diff --git a/video_prediction_savp/HPC_scripts/DataExtraction_template.sh b/video_prediction_tools/HPC_scripts/data_extraction_era5_template.sh
similarity index 83%
rename from video_prediction_savp/HPC_scripts/DataExtraction_template.sh
rename to video_prediction_tools/HPC_scripts/data_extraction_era5_template.sh
index a80a3b8779908fc51121c6682817f20ec197a327..e00e3591f09e6c7feca29fc3dbf32185cab0a64b 100644
--- a/video_prediction_savp/HPC_scripts/DataExtraction_template.sh
+++ b/video_prediction_tools/HPC_scripts/data_extraction_era5_template.sh
@@ -5,9 +5,9 @@
 #SBATCH --ntasks=13
 ##SBATCH --ntasks-per-node=13
 #SBATCH --cpus-per-task=1
-#SBATCH --output=DataExtraction-out.%j
-#SBATCH --error=DataExtraction-err.%j
-#SBATCH --time=05:00:00
+#SBATCH --output=data_extraction_era5-out.%j
+#SBATCH --error=data_extraction_era5-err.%j
+#SBATCH --time=00:20:00
 #SBATCH --partition=devel
 #SBATCH --mail-type=ALL
 #SBATCH --mail-user=b.gong@fz-juelich.de
@@ -42,7 +42,7 @@ dest_dir="/p/scratch/deepacf/video_prediction_shared_folder/extractedData/"
 year="2010"
 
 # Run data extraction
-srun python ../../workflow_parallel_frame_prediction/DataExtraction/mpi_stager_v2.py --source_dir ${source_dir}/${year}/ --destination_dir ${dest_dir}/${year}/
+srun python ../main_scripts/main_data_extraction.py  --source_dir ${source_dir}/${year}/ --destination_dir ${dest_dir}/${year}/
 
 
 
diff --git a/video_prediction_savp/HPC_scripts/DataPreprocess_template.sh b/video_prediction_tools/HPC_scripts/preprocess_data_era5_step1_template.sh
similarity index 87%
rename from video_prediction_savp/HPC_scripts/DataPreprocess_template.sh
rename to video_prediction_tools/HPC_scripts/preprocess_data_era5_step1_template.sh
index b686976ea8dbb3e00feffe1c0f26180f463b9db3..4e71142f23e4f701f1a3ef5fedd5074cafbf247c 100644
--- a/video_prediction_savp/HPC_scripts/DataPreprocess_template.sh
+++ b/video_prediction_tools/HPC_scripts/preprocess_data_era5_step1_template.sh
@@ -41,12 +41,12 @@ declare -a years=(
                  "2017"
                   )
 
-max_year=`echo "${years[*]}" | sort -nr | head -n1`
-min_year=`echo "${years[*]}" | sort -nr | tail -n1`
+max_year=$( printf "%d\n" "${years[@]}" | sort -n | tail -1 )
+min_year=$( printf "%d\n" "${years[@]}" | sort -nr | tail -1 )
 # set some paths
 # note, that destination_dir is used during runtime to set a proper experiment directory
 exp_id=xxx                                          # experiment identifier is set by 'generate_workflow_runscripts.sh'
-source_dir=/p/project/deepacf/deeprain/video_prediction_shared_folder/extractedData
+source_dir=/p/scratch/deepacf/video_prediction_shared_folder/extractedData
 destination_dir=/p/project/deepacf/deeprain/video_prediction_shared_folder/preprocessedData/era5-Y${min_year}to${max_year}M01to12
 script_dir=`pwd`
 
@@ -54,7 +54,7 @@ script_dir=`pwd`
 for year in "${years[@]}";     do 
         echo "Year $year"
 	echo "source_dir ${source_dir}/${year}"
-	srun python ../../workflow_parallel_frame_prediction/DataPreprocess/mpi_stager_v2_process_netCDF.py \
+	srun python ../main_scripts/main_preprocess_data_step1.py \
         --source_dir ${source_dir} -scr_dir ${script_dir} -exp_id ${exp_id} \
         --destination_dir ${destination_dir} --years ${year} --vars T2 MSL gph500 --lat_s 74 --lat_e 202 --lon_s 550 --lon_e 710     
     done
diff --git a/video_prediction_savp/HPC_scripts/DataPreprocess2tf_template.sh b/video_prediction_tools/HPC_scripts/preprocess_data_era5_step2_template.sh
similarity index 88%
rename from video_prediction_savp/HPC_scripts/DataPreprocess2tf_template.sh
rename to video_prediction_tools/HPC_scripts/preprocess_data_era5_step2_template.sh
index e953b5bc3fd2a836a74b647c1066735d19e39640..b409fc408265397d9043c600dd91d9c38af6f528 100644
--- a/video_prediction_savp/HPC_scripts/DataPreprocess2tf_template.sh
+++ b/video_prediction_tools/HPC_scripts/preprocess_data_era5_step2_template.sh
@@ -37,4 +37,4 @@ source_dir=/p/project/deepacf/deeprain/video_prediction_shared_folder/preprocess
 destination_dir=/p/project/deepacf/deeprain/video_prediction_shared_folder/preprocessedData/
 
 # run Preprocessing (step 2 where Tf-records are generated)
-srun python ../video_prediction/datasets/era5_dataset_v2.py ${source_dir}/pickle ${destination_dir}/tfrecords -vars T2 MSL gph500 -height 128 -width 160 -seq_length 20 
+srun python ../main_scripts/main_preprocess_data_step2.py ${source_dir}/pickle ${destination_dir}/tfrecords -vars T2 MSL gph500 -height 128 -width 160 -seq_length 20 
diff --git a/video_prediction_savp/HPC_scripts/DataPreprocess2tf_movingmnist_template.sh b/video_prediction_tools/HPC_scripts/preprocess_data_moving_mnist_template.sh
similarity index 100%
rename from video_prediction_savp/HPC_scripts/DataPreprocess2tf_movingmnist_template.sh
rename to video_prediction_tools/HPC_scripts/preprocess_data_moving_mnist_template.sh
diff --git a/video_prediction_savp/HPC_scripts/train_era5_template.sh b/video_prediction_tools/HPC_scripts/train_model_era5_template.sh
similarity index 83%
rename from video_prediction_savp/HPC_scripts/train_era5_template.sh
rename to video_prediction_tools/HPC_scripts/train_model_era5_template.sh
index 5342c32a8883c6c0e1534b5a05ef9e59d9c92ffa..19af1297cae1869d4d88cf7748fee71c4c4216d4 100644
--- a/video_prediction_savp/HPC_scripts/train_era5_template.sh
+++ b/video_prediction_tools/HPC_scripts/train_model_era5_template.sh
@@ -41,9 +41,9 @@ destination_dir=/p/project/deepacf/deeprain/video_prediction_shared_folder/model
 # valid identifiers for model-argument are: convLSTM, savp, mcnet and vae
 model=convLSTM
 model_hparams=../hparams/era5/${model}/model_hparams.json
-destination_dir=${destination_dir}/${model}/"$(date +"%Y%m%dT%H%M")_"$USER"/"
+destination_dir_full=${destination_dir}/${model}/"$(date +"%Y%m%dT%H%M")_"$USER"/"
 
 # run training
-srun python ../scripts/train_dummy.py --input_dir  ${source_dir}/tfrecords/ --dataset era5  --model ${model} --model_hparams_dict ${model_hparams} --output_dir ${destination_dir}
+srun python ../main_scripts/main_train_models.py --input_dir  ${source_dir}/tfrecords/ --dataset era5  --model ${model} --model_hparams_dict ${model_hparams} --output_dir ${destination_dir_full}/
 
  
diff --git a/video_prediction_savp/HPC_scripts/train_movingmnist_template.sh b/video_prediction_tools/HPC_scripts/train_model_moving_mnist_template.sh
similarity index 100%
rename from video_prediction_savp/HPC_scripts/train_movingmnist_template.sh
rename to video_prediction_tools/HPC_scripts/train_model_moving_mnist_template.sh
diff --git a/video_prediction_savp/HPC_scripts/generate_era5_template.sh b/video_prediction_tools/HPC_scripts/visualize_postprocess_era5_template.sh
similarity index 78%
rename from video_prediction_savp/HPC_scripts/generate_era5_template.sh
rename to video_prediction_tools/HPC_scripts/visualize_postprocess_era5_template.sh
index 99f46ffb2d21449eb4157b2d3dc08b0c547e70d6..b2531f5644891a3144134f8fc4632d926b943c92 100644
--- a/video_prediction_savp/HPC_scripts/generate_era5_template.sh
+++ b/video_prediction_tools/HPC_scripts/visualize_postprocess_era5_template.sh
@@ -41,10 +41,9 @@ results_dir=/p/project/deepacf/deeprain/video_prediction_shared_folder/results/
 
 # name of model
 model=convLSTM
-
+exp=[specify experiment name]
 # run postprocessing/generation of model results including evaluation metrics
-srun python -u ../scripts/generate_transfer_learning_finetune.py \
---input_dir ${source_dir}/tfrecords --dataset_hparams sequence_length=20 --checkpoint  ${checkpoint_dir}/${model} \
---mode test --model ${model} --results_dir ${results_dir}/${model}/ --batch_size 2 --dataset era5   > generate_era5-out.out
+srun python -u ../main_scripts/main_visualize_postprocess.py \
+--input_dir ${source_dir} --dataset_hparams sequence_length=20 --checkpoint  ${checkpoint_dir}/${model}/${exp} \
+--mode test --model ${model} --results_dir ${results_dir}/${model}/${exp}/ --batch_size 2 --dataset era5   > generate_era5-out.out
 
-#srun  python scripts/train.py --input_dir data/era5 --dataset era5  --model savp --model_hparams_dict hparams/kth/ours_savp/model_hparams.json --output_dir logs/era5/ours_savp
diff --git a/video_prediction_savp/HPC_scripts/generate_movingmnist_template.sh b/video_prediction_tools/HPC_scripts/visualize_postprocess_moving_mnist_template.sh
similarity index 100%
rename from video_prediction_savp/HPC_scripts/generate_movingmnist_template.sh
rename to video_prediction_tools/HPC_scripts/visualize_postprocess_moving_mnist_template.sh
diff --git a/video_prediction_savp/LICENSE b/video_prediction_tools/LICENSE
similarity index 100%
rename from video_prediction_savp/LICENSE
rename to video_prediction_tools/LICENSE
diff --git a/video_prediction_savp/Zam347_scripts/DataExtraction_template.sh b/video_prediction_tools/Zam347_scripts/data_extraction_era5_template.sh
similarity index 100%
rename from video_prediction_savp/Zam347_scripts/DataExtraction_template.sh
rename to video_prediction_tools/Zam347_scripts/data_extraction_era5_template.sh
diff --git a/video_prediction_savp/Zam347_scripts/DataPreprocess_template.sh b/video_prediction_tools/Zam347_scripts/preprocess_data_era5_step1_template.sh
similarity index 100%
rename from video_prediction_savp/Zam347_scripts/DataPreprocess_template.sh
rename to video_prediction_tools/Zam347_scripts/preprocess_data_era5_step1_template.sh
diff --git a/video_prediction_savp/Zam347_scripts/DataPreprocess2tf_template.sh b/video_prediction_tools/Zam347_scripts/preprocess_data_era5_step2_template.sh
similarity index 100%
rename from video_prediction_savp/Zam347_scripts/DataPreprocess2tf_template.sh
rename to video_prediction_tools/Zam347_scripts/preprocess_data_era5_step2_template.sh
diff --git a/video_prediction_savp/Zam347_scripts/train_era5_template.sh b/video_prediction_tools/Zam347_scripts/train_model_era5_template.sh
similarity index 100%
rename from video_prediction_savp/Zam347_scripts/train_era5_template.sh
rename to video_prediction_tools/Zam347_scripts/train_model_era5_template.sh
diff --git a/video_prediction_savp/Zam347_scripts/generate_era5_template.sh b/video_prediction_tools/Zam347_scripts/visualize_postprocess_era5_template.sh
similarity index 100%
rename from video_prediction_savp/Zam347_scripts/generate_era5_template.sh
rename to video_prediction_tools/Zam347_scripts/visualize_postprocess_era5_template.sh
diff --git a/workflow_parallel_frame_prediction/DataExtraction/prepare_era5_data.py b/video_prediction_tools/data_preprocess/prepare_era5_data.py
similarity index 100%
rename from workflow_parallel_frame_prediction/DataExtraction/prepare_era5_data.py
rename to video_prediction_tools/data_preprocess/prepare_era5_data.py
diff --git a/workflow_parallel_frame_prediction/DataPreprocess/process_netCDF_v2.py b/video_prediction_tools/data_preprocess/process_netCDF_v2.py
similarity index 100%
rename from workflow_parallel_frame_prediction/DataPreprocess/process_netCDF_v2.py
rename to video_prediction_tools/data_preprocess/process_netCDF_v2.py
diff --git a/video_prediction_savp/video_prediction/datasets/era5_dataset_v2_anomaly.py b/video_prediction_tools/deprecate/datasets/era5_dataset_v2_anomaly.py
similarity index 100%
rename from video_prediction_savp/video_prediction/datasets/era5_dataset_v2_anomaly.py
rename to video_prediction_tools/deprecate/datasets/era5_dataset_v2_anomaly.py
diff --git a/video_prediction_savp/helper/helper.py b/video_prediction_tools/deprecate/helper/helper.py
similarity index 100%
rename from video_prediction_savp/helper/helper.py
rename to video_prediction_tools/deprecate/helper/helper.py
diff --git a/video_prediction_savp/pretrained_models/download_model.sh b/video_prediction_tools/deprecate/pretrained_models/download_model.sh
similarity index 100%
rename from video_prediction_savp/pretrained_models/download_model.sh
rename to video_prediction_tools/deprecate/pretrained_models/download_model.sh
diff --git a/video_prediction_savp/scripts/combine_results.py b/video_prediction_tools/deprecate/scripts/combine_results.py
similarity index 100%
rename from video_prediction_savp/scripts/combine_results.py
rename to video_prediction_tools/deprecate/scripts/combine_results.py
diff --git a/video_prediction_savp/scripts/evaluate.py b/video_prediction_tools/deprecate/scripts/evaluate.py
similarity index 100%
rename from video_prediction_savp/scripts/evaluate.py
rename to video_prediction_tools/deprecate/scripts/evaluate.py
diff --git a/video_prediction_savp/scripts/evaluate_all.sh b/video_prediction_tools/deprecate/scripts/evaluate_all.sh
similarity index 100%
rename from video_prediction_savp/scripts/evaluate_all.sh
rename to video_prediction_tools/deprecate/scripts/evaluate_all.sh
diff --git a/video_prediction_savp/scripts/generate_all.sh b/video_prediction_tools/deprecate/scripts/generate_all.sh
similarity index 100%
rename from video_prediction_savp/scripts/generate_all.sh
rename to video_prediction_tools/deprecate/scripts/generate_all.sh
diff --git a/video_prediction_savp/scripts/generate_orig.py b/video_prediction_tools/deprecate/scripts/generate_orig.py
similarity index 100%
rename from video_prediction_savp/scripts/generate_orig.py
rename to video_prediction_tools/deprecate/scripts/generate_orig.py
diff --git a/video_prediction_savp/scripts/plot_results.py b/video_prediction_tools/deprecate/scripts/plot_results.py
similarity index 100%
rename from video_prediction_savp/scripts/plot_results.py
rename to video_prediction_tools/deprecate/scripts/plot_results.py
diff --git a/video_prediction_savp/scripts/plot_results_all.sh b/video_prediction_tools/deprecate/scripts/plot_results_all.sh
similarity index 100%
rename from video_prediction_savp/scripts/plot_results_all.sh
rename to video_prediction_tools/deprecate/scripts/plot_results_all.sh
diff --git a/video_prediction_savp/scripts/train.py b/video_prediction_tools/deprecate/scripts/train.py
similarity index 100%
rename from video_prediction_savp/scripts/train.py
rename to video_prediction_tools/deprecate/scripts/train.py
diff --git a/video_prediction_savp/scripts/train_all.sh b/video_prediction_tools/deprecate/scripts/train_all.sh
similarity index 100%
rename from video_prediction_savp/scripts/train_all.sh
rename to video_prediction_tools/deprecate/scripts/train_all.sh
diff --git a/video_prediction_savp/docs/discussion/discussion.md b/video_prediction_tools/docs/discussion/discussion.md
similarity index 100%
rename from video_prediction_savp/docs/discussion/discussion.md
rename to video_prediction_tools/docs/discussion/discussion.md
diff --git a/video_prediction_savp/docs/structure_name_convention.md b/video_prediction_tools/docs/structure_name_convention.md
similarity index 100%
rename from video_prediction_savp/docs/structure_name_convention.md
rename to video_prediction_tools/docs/structure_name_convention.md
diff --git a/video_prediction_savp/env_setup/create_env.sh b/video_prediction_tools/env_setup/create_env.sh
similarity index 95%
rename from video_prediction_savp/env_setup/create_env.sh
rename to video_prediction_tools/env_setup/create_env.sh
index 8aa6c0a6e0aa2100acb49dbc0d849908c9f4eb5a..319b8cfc5e471cce4708b8792d878e1a0606d46a 100755
--- a/video_prediction_savp/env_setup/create_env.sh
+++ b/video_prediction_tools/env_setup/create_env.sh
@@ -32,7 +32,7 @@ fi
 
 # list of (Batch) scripts used for the steps in the workflow
 # !!! Expects that a template named [script_name]_template.sh exists!!!
-workflow_scripts=(DataExtraction DataPreprocess DataPreprocess2tf train_era5 generate_era5 DataPreprocess2tf_movingmnist train_movingmnist generate_movingmnist)
+workflow_scripts=(data_extraction_era5 preprocess_data_era5_step1 preprocess_data_era5_step2 train_model_era5 visualize_postprocess_era5 preprocess_data_moving_mnist train_model_moving_mnist visualize_postprocess_moving_mnist)
 
 HOST_NAME=`hostname`
 ENV_NAME=$1
@@ -119,7 +119,6 @@ if [[ "$ENV_EXIST" == 0 ]]; then
   if [[ "${HOST_NAME}" == hdfml* || "${HOST_NAME}" == juwels* ]]; then
     echo "export PYTHONPATH=${ENV_DIR}/lib/python3.6/site-packages:\$PYTHONPATH" >> ${activate_virt_env}
   fi
-  done
 elif [[ "$ENV_EXIST" == 1 ]]; then
   # activating virtual env is suifficient
   source ${ENV_DIR}/bin/activate  
@@ -141,5 +140,5 @@ for wf_script in "${workflow_scripts[@]}"; do
   else
     ./generate_workflow_runscripts.sh ${curr_script}  ${ENV_NAME} -exp_id=${exp_id}
   fi
-
+done
 
diff --git a/video_prediction_savp/env_setup/create_env_zam347.sh b/video_prediction_tools/env_setup/create_env_zam347.sh
similarity index 100%
rename from video_prediction_savp/env_setup/create_env_zam347.sh
rename to video_prediction_tools/env_setup/create_env_zam347.sh
diff --git a/video_prediction_savp/env_setup/generate_workflow_runscripts.sh b/video_prediction_tools/env_setup/generate_workflow_runscripts.sh
similarity index 97%
rename from video_prediction_savp/env_setup/generate_workflow_runscripts.sh
rename to video_prediction_tools/env_setup/generate_workflow_runscripts.sh
index c8cc49470461fc121bc6681cb8c15ae8c7dcf75e..34aebf26fe5021575a93839866f80f9f69a54d8d 100755
--- a/video_prediction_savp/env_setup/generate_workflow_runscripts.sh
+++ b/video_prediction_tools/env_setup/generate_workflow_runscripts.sh
@@ -103,6 +103,9 @@ if [[ -f ${target_script} ]]; then
   echo "ERROR: ${target_script} already exist."
   echo "Set explicitly a different experiment identifier."
   exit 4
+else 
+  echo "Convert ${curr_script}_template.sh to executable runscript"
+  echo "The executable runscript is saved under ${target_script}" 
 fi
 
 ### Do the work ###
diff --git a/video_prediction_savp/env_setup/modules_preprocess.sh b/video_prediction_tools/env_setup/modules_preprocess.sh
similarity index 91%
rename from video_prediction_savp/env_setup/modules_preprocess.sh
rename to video_prediction_tools/env_setup/modules_preprocess.sh
index a9de812dbde625a18198fe078ecb86c09286ed6d..35ee316db54023e9cc015dbbd2e1051c7ea7aae9 100755
--- a/video_prediction_savp/env_setup/modules_preprocess.sh
+++ b/video_prediction_tools/env_setup/modules_preprocess.sh
@@ -10,8 +10,8 @@ HOST_NAME=`hostname`
 
 echo "Start loading modules on ${HOST_NAME} required for preprocessing..."
 echo "modules_preprocess.sh is subject to: "
-echo "* DataExtraction.sh"
-echo "* DataPreprocess.sh"
+echo "* data_extraction_era5_<exp_id>.sh"
+echo "* preprocess_data_era5_step1_<exp_id>.sh"
 
 module purge
 module use $OTHERSTAGES
diff --git a/video_prediction_savp/env_setup/modules_train.sh b/video_prediction_tools/env_setup/modules_train.sh
similarity index 86%
rename from video_prediction_savp/env_setup/modules_train.sh
rename to video_prediction_tools/env_setup/modules_train.sh
index d45144340d334430b3d95580ceb2e74c8105e18a..685fae1de7f18a1575fd1efcaf7e344b2e3cce7c 100755
--- a/video_prediction_savp/env_setup/modules_train.sh
+++ b/video_prediction_tools/env_setup/modules_train.sh
@@ -10,9 +10,9 @@ HOST_NAME=`hostname`
 
 echo "Start loading modules on ${HOST_NAME}..."
 echo "modules_train.sh is subject to: "
-echo "* DataPreprocess_to_tf.sh"
-echo "* train_era5.sh"
-echo "* generate_era5.sh"
+echo "* preprocess_data_era5_step2_<exp_id>.sh"
+echo "* train_model_era5_<exp_id>.sh"
+echo "* visualize_postprocess_era5_<exp_id>.sh"
 
 module purge
 module use $OTHERSTAGES
diff --git a/video_prediction_savp/env_setup/requirements.txt b/video_prediction_tools/env_setup/requirements.txt
similarity index 100%
rename from video_prediction_savp/env_setup/requirements.txt
rename to video_prediction_tools/env_setup/requirements.txt
diff --git a/video_prediction_savp/external_package/hickle/bin/f2py b/video_prediction_tools/external_package/hickle/bin/f2py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/bin/f2py
rename to video_prediction_tools/external_package/hickle/bin/f2py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/easy-install.pth b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/easy-install.pth
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/easy-install.pth
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/easy-install.pth
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/PKG-INFO b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/PKG-INFO
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/PKG-INFO
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/PKG-INFO
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/SOURCES.txt b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/SOURCES.txt
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/SOURCES.txt
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/SOURCES.txt
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/dependency_links.txt b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/dependency_links.txt
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/dependency_links.txt
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/dependency_links.txt
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/not-zip-safe b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/not-zip-safe
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/not-zip-safe
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/not-zip-safe
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/requires.txt b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/requires.txt
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/requires.txt
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/requires.txt
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/top_level.txt b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/top_level.txt
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/top_level.txt
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/top_level.txt
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__init__.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__init__.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__init__.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__init__.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/helpers.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/helpers.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/helpers.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/helpers.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy2.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy2.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy2.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy2.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__init__.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__init__.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__init__.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__init__.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_astropy.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_astropy.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_astropy.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_astropy.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_numpy.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_numpy.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_numpy.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_numpy.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_pandas.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_pandas.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_pandas.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_pandas.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python3.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python3.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python3.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python3.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_scipy.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_scipy.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_scipy.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_scipy.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/lookup.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/lookup.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/lookup.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/lookup.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__init__.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__init__.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__init__.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__init__.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_astropy.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_astropy.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_astropy.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_astropy.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle_helpers.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle_helpers.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle_helpers.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle_helpers.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_legacy_load.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_legacy_load.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_legacy_load.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_legacy_load.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_scipy.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_scipy.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_scipy.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_scipy.py
diff --git a/video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/site.py b/video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/site.py
similarity index 100%
rename from video_prediction_savp/external_package/hickle/lib/python3.6/site-packages/site.py
rename to video_prediction_tools/external_package/hickle/lib/python3.6/site-packages/site.py
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/.gitignore b/video_prediction_tools/external_package/lpips-tensorflow/.gitignore
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/.gitignore
rename to video_prediction_tools/external_package/lpips-tensorflow/.gitignore
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/.gitmodules b/video_prediction_tools/external_package/lpips-tensorflow/.gitmodules
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/.gitmodules
rename to video_prediction_tools/external_package/lpips-tensorflow/.gitmodules
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/LICENSE b/video_prediction_tools/external_package/lpips-tensorflow/LICENSE
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/LICENSE
rename to video_prediction_tools/external_package/lpips-tensorflow/LICENSE
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/README.md b/video_prediction_tools/external_package/lpips-tensorflow/README.md
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/README.md
rename to video_prediction_tools/external_package/lpips-tensorflow/README.md
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/export_to_tensorflow.py b/video_prediction_tools/external_package/lpips-tensorflow/export_to_tensorflow.py
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/export_to_tensorflow.py
rename to video_prediction_tools/external_package/lpips-tensorflow/export_to_tensorflow.py
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/lpips_tf.py b/video_prediction_tools/external_package/lpips-tensorflow/lpips_tf.py
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/lpips_tf.py
rename to video_prediction_tools/external_package/lpips-tensorflow/lpips_tf.py
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/requirements-dev.txt b/video_prediction_tools/external_package/lpips-tensorflow/requirements-dev.txt
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/requirements-dev.txt
rename to video_prediction_tools/external_package/lpips-tensorflow/requirements-dev.txt
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/requirements.txt b/video_prediction_tools/external_package/lpips-tensorflow/requirements.txt
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/requirements.txt
rename to video_prediction_tools/external_package/lpips-tensorflow/requirements.txt
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/setup.py b/video_prediction_tools/external_package/lpips-tensorflow/setup.py
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/setup.py
rename to video_prediction_tools/external_package/lpips-tensorflow/setup.py
diff --git a/video_prediction_savp/external_package/lpips-tensorflow/test_network.py b/video_prediction_tools/external_package/lpips-tensorflow/test_network.py
similarity index 100%
rename from video_prediction_savp/external_package/lpips-tensorflow/test_network.py
rename to video_prediction_tools/external_package/lpips-tensorflow/test_network.py
diff --git a/video_prediction_savp/hparams/bair_action_free/ours_deterministic_l1/model_hparams.json b/video_prediction_tools/hparams/bair_action_free/ours_deterministic_l1/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/bair_action_free/ours_deterministic_l1/model_hparams.json
rename to video_prediction_tools/hparams/bair_action_free/ours_deterministic_l1/model_hparams.json
diff --git a/video_prediction_savp/hparams/bair_action_free/ours_deterministic_l2/model_hparams.json b/video_prediction_tools/hparams/bair_action_free/ours_deterministic_l2/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/bair_action_free/ours_deterministic_l2/model_hparams.json
rename to video_prediction_tools/hparams/bair_action_free/ours_deterministic_l2/model_hparams.json
diff --git a/video_prediction_savp/hparams/bair_action_free/ours_gan/model_hparams.json b/video_prediction_tools/hparams/bair_action_free/ours_gan/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/bair_action_free/ours_gan/model_hparams.json
rename to video_prediction_tools/hparams/bair_action_free/ours_gan/model_hparams.json
diff --git a/video_prediction_savp/hparams/bair_action_free/ours_savp/model_hparams.json b/video_prediction_tools/hparams/bair_action_free/ours_savp/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/bair_action_free/ours_savp/model_hparams.json
rename to video_prediction_tools/hparams/bair_action_free/ours_savp/model_hparams.json
diff --git a/video_prediction_savp/hparams/bair_action_free/ours_vae_l1/model_hparams.json b/video_prediction_tools/hparams/bair_action_free/ours_vae_l1/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/bair_action_free/ours_vae_l1/model_hparams.json
rename to video_prediction_tools/hparams/bair_action_free/ours_vae_l1/model_hparams.json
diff --git a/video_prediction_savp/hparams/bair_action_free/sv2p_time_invariant/model_hparams.json b/video_prediction_tools/hparams/bair_action_free/sv2p_time_invariant/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/bair_action_free/sv2p_time_invariant/model_hparams.json
rename to video_prediction_tools/hparams/bair_action_free/sv2p_time_invariant/model_hparams.json
diff --git a/video_prediction_savp/hparams/era5/convLSTM/model_hparams.json b/video_prediction_tools/hparams/era5/convLSTM/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/era5/convLSTM/model_hparams.json
rename to video_prediction_tools/hparams/era5/convLSTM/model_hparams.json
diff --git a/video_prediction_savp/hparams/era5/mcnet/model_hparams.json b/video_prediction_tools/hparams/era5/mcnet/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/era5/mcnet/model_hparams.json
rename to video_prediction_tools/hparams/era5/mcnet/model_hparams.json
diff --git a/video_prediction_savp/hparams/era5/savp/model_hparams.json b/video_prediction_tools/hparams/era5/savp/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/era5/savp/model_hparams.json
rename to video_prediction_tools/hparams/era5/savp/model_hparams.json
diff --git a/video_prediction_savp/hparams/era5/vae/model_hparams.json b/video_prediction_tools/hparams/era5/vae/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/era5/vae/model_hparams.json
rename to video_prediction_tools/hparams/era5/vae/model_hparams.json
diff --git a/video_prediction_savp/hparams/kth/ours_deterministic_l1/model_hparams.json b/video_prediction_tools/hparams/kth/ours_deterministic_l1/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/kth/ours_deterministic_l1/model_hparams.json
rename to video_prediction_tools/hparams/kth/ours_deterministic_l1/model_hparams.json
diff --git a/video_prediction_savp/hparams/kth/ours_deterministic_l2/model_hparams.json b/video_prediction_tools/hparams/kth/ours_deterministic_l2/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/kth/ours_deterministic_l2/model_hparams.json
rename to video_prediction_tools/hparams/kth/ours_deterministic_l2/model_hparams.json
diff --git a/video_prediction_savp/hparams/kth/ours_gan/model_hparams.json b/video_prediction_tools/hparams/kth/ours_gan/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/kth/ours_gan/model_hparams.json
rename to video_prediction_tools/hparams/kth/ours_gan/model_hparams.json
diff --git a/video_prediction_savp/hparams/kth/ours_savp/model_hparams.json b/video_prediction_tools/hparams/kth/ours_savp/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/kth/ours_savp/model_hparams.json
rename to video_prediction_tools/hparams/kth/ours_savp/model_hparams.json
diff --git a/video_prediction_savp/hparams/kth/ours_vae_l1/model_hparams.json b/video_prediction_tools/hparams/kth/ours_vae_l1/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/kth/ours_vae_l1/model_hparams.json
rename to video_prediction_tools/hparams/kth/ours_vae_l1/model_hparams.json
diff --git a/video_prediction_savp/hparams/moving_mnist/convLSTM/model_hparams.json b/video_prediction_tools/hparams/moving_mnist/convLSTM/model_hparams.json
similarity index 100%
rename from video_prediction_savp/hparams/moving_mnist/convLSTM/model_hparams.json
rename to video_prediction_tools/hparams/moving_mnist/convLSTM/model_hparams.json
diff --git a/workflow_parallel_frame_prediction/DataExtraction/mpi_stager_v2.py b/video_prediction_tools/main_scripts/main_data_extraction.py
similarity index 97%
rename from workflow_parallel_frame_prediction/DataExtraction/mpi_stager_v2.py
rename to video_prediction_tools/main_scripts/main_data_extraction.py
index 632c29567590466264a39b60bdcd9ae0cd820b05..5aef4308fe83dc01845823179eab5b93b1bf9c74 100644
--- a/workflow_parallel_frame_prediction/DataExtraction/mpi_stager_v2.py
+++ b/video_prediction_tools/main_scripts/main_data_extraction.py
@@ -4,11 +4,11 @@ import sys
 import subprocess
 import logging
 import time
-from external_function import directory_scanner
-from external_function import load_distributor
-from external_function import hash_directory
-from external_function import md5
-from prepare_era5_data import *
+from utils.external_function import directory_scanner
+from utils.external_function import load_distributor
+from utils.external_function import hash_directory
+from utils.external_function import md5
+from data_preprocess.prepare_era5_data import *
 # How to Run it!
 # mpirun -np 6 python mpi_stager_v2.py
 import os
diff --git a/workflow_parallel_frame_prediction/DataPreprocess/mpi_stager_v2_process_netCDF.py b/video_prediction_tools/main_scripts/main_preprocess_data_step1.py
similarity index 95%
rename from workflow_parallel_frame_prediction/DataPreprocess/mpi_stager_v2_process_netCDF.py
rename to video_prediction_tools/main_scripts/main_preprocess_data_step1.py
index 377b8b8f4f9761e4e81b60381f10999485fcd663..1d23ef4f6b92a6c979f615cca8b9821b18a4c908 100755
--- a/workflow_parallel_frame_prediction/DataPreprocess/mpi_stager_v2_process_netCDF.py
+++ b/video_prediction_tools/main_scripts/main_preprocess_data_step1.py
@@ -4,11 +4,11 @@ import sys
 import subprocess
 import logging
 import time
-from external_function import directory_scanner
-from external_function import load_distributor
-from external_function import hash_directory
-from external_function import md5
-from process_netCDF_v2 import *  
+from utils.external_function import directory_scanner
+from utils.external_function import load_distributor
+from utils.external_function import hash_directory
+from utils.external_function import md5
+from data_preprocess.process_netCDF_v2 import *  
 from metadata import MetaData as MetaData
 import os
 import argparse
@@ -118,9 +118,9 @@ def main():
         md = MetaData(suffix_indir=destination_dir,exp_id=exp_id,data_filename=data_files_list[0],slices=slices,variables=vars)
         # modify Batch scripts if metadata has been retrieved for the first time (md.status = "new")
         if (md.status == "new"):
-            md.write_dirs_to_batch_scripts(scr_dir+"/DataPreprocess2tf.sh")
-            md.write_dirs_to_batch_scripts(scr_dir + "/train_era5.sh")
-            md.write_dirs_to_batch_scripts(scr_dir+"/generate_era5.sh")
+            md.write_dirs_to_batch_scripts(scr_dir+"/preprocess_data_era5_step2.sh")
+            md.write_dirs_to_batch_scripts(scr_dir + "/train_model_era5.sh")
+            md.write_dirs_to_batch_scripts(scr_dir+"/visualize_postprocess_era5.sh")
 
         elif (md.status == "old"):      # meta-data file already exists and is ok
                                         # check for temp.json in working directory (required by slave nodes)
diff --git a/video_prediction_tools/main_scripts/main_preprocess_data_step2.py b/video_prediction_tools/main_scripts/main_preprocess_data_step2.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f653dd9a14514621ffa3e5532a69830d44147c5
--- /dev/null
+++ b/video_prediction_tools/main_scripts/main_preprocess_data_step2.py
@@ -0,0 +1,144 @@
+import argparse
+import sys
+import os
+import glob
+import itertools
+import pickle
+import random
+import re
+import numpy as np
+import json
+import tensorflow as tf
+from collections import OrderedDict
+from tensorflow.contrib.training import HParams
+from mpi4py import MPI
+from video_prediction.datasets.base_dataset import VarLenFeatureVideoDataset
+import data_preprocess.process_netCDF_v2
+from general_utils import get_unique_vars
+from statistics import Calc_data_stat
+from metadata import MetaData
+from normalization import Norm_data
+from video_prediction.datasets.era5_dataset import *
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("input_dir", type=str, help="directory containing the processed directories ""boxing, handclapping, handwaving, ""jogging, running, walking")
+    parser.add_argument("output_dir", type=str)
+    # ML 2020/04/08 S
+    # Add vars for ensuring proper normalization and reshaping of sequences
+    parser.add_argument("-vars","--variables",dest="variables", nargs='+', type=str, help="Names of input variables.")
+    parser.add_argument("-height",type=int,default=64)
+    parser.add_argument("-width",type = int,default=64)
+    parser.add_argument("-seq_length",type=int,default=20)
+    parser.add_argument("-sequences_per_file",type=int,default=2)
+    args = parser.parse_args()
+    current_path = os.getcwd()
+    #input_dir = "/Users/gongbing/PycharmProjects/video_prediction/splits"
+    #output_dir = "/Users/gongbing/PycharmProjects/video_prediction/data/era5"
+    #partition_names = ['train','val',  'test'] #64,64,3 val has issue#
+
+    ############################################################
+    # CONTROLLING variable! Needs to be adapted manually!!!
+    ############################################################
+    partition = {
+            "train":{
+                "2010":[1,2,3,4,5,6,7,8,9,10,11,12],
+                "2013":[1,2,3,4,5,6,7,8,9,10,11,12],
+                "2015":[1,2,3,4,5,6,7,8,9,10,11,12],
+                "2019":[1,2,3,4,5,6,7,8,9,10,11,12]
+                 },
+            "val":
+                {"2017":[1,2,3,4,5,6,7,8,9,10,11,12]
+                 },
+            "test":
+                {"2016":[1,2,3,4,5,6,7,8,9,10,11,12]
+                 }
+            }
+    
+    # ini. MPI
+    comm = MPI.COMM_WORLD
+    my_rank = comm.Get_rank()  # rank of the node
+    p = comm.Get_size()  # number of assigned nods
+  
+    if my_rank == 0 :
+        # retrieve final statistics first (not parallelized!)
+        # some preparatory steps
+        stat_dir_prefix = args.input_dir
+        varnames        = args.variables
+    
+        vars_uni, varsind, nvars = get_unique_vars(varnames)
+        stat_obj = Calc_data_stat(nvars)                            # init statistic-instance
+    
+        # loop over whole data set (training, dev and test set) to collect the intermediate statistics
+        print("Start collecting statistics from the whole datset to be processed...")
+        for split in partition.keys():
+            values = partition[split]
+            for year in values.keys():
+                file_dir = os.path.join(stat_dir_prefix,year)
+                for month in values[year]:
+                    # process stat-file:
+                    stat_obj.acc_stat_master(file_dir,int(month))  # process monthly statistic-file  
+        
+        # finalize statistics and write to json-file
+        stat_obj.finalize_stat_master(vars_uni)
+        stat_obj.write_stat_json(args.input_dir)
+
+        # organize parallelized partioning 
+        partition_year_month = [] #contain lists of list, each list includes three element [train,year,month]
+        partition_names = list(partition.keys())
+        print ("partition_names:",partition_names)
+        broadcast_lists = []
+        for partition_name in partition_names:
+            partition_data = partition[partition_name]        
+            years = list(partition_data.keys())
+            broadcast_lists.append([partition_name,years])
+        for nodes in range(1,p):
+            #ibroadcast_list = [partition_name,years,nodes]
+            #broadcast_lists.append(broadcast_list)
+            comm.send(broadcast_lists,dest=nodes) 
+           
+        message_counter = 1
+        while message_counter <= 12:
+            message_in = comm.recv()
+            message_counter = message_counter + 1 
+            print("Message in from slaver",message_in) 
+            
+        write_sequence_file(args.output_dir,args.seq_length,args.sequences_per_file)
+        
+        #write_sequence_file   
+    else:
+        message_in = comm.recv()
+        print ("My rank,", my_rank)   
+        print("message_in",message_in)
+        # open statistics file and feed it to norm-instance
+        print("Opening json-file: "+os.path.join(args.input_dir,"statistics.json"))
+        with open(os.path.join(args.input_dir,"statistics.json")) as js_file:
+            stats = json.load(js_file)
+        #loop the partitions (train,val,test)
+        for partition in message_in:
+            print("partition on slave ",partition)
+            partition_name = partition[0]
+            save_output_dir =  os.path.join(args.output_dir,partition_name)
+            for year in partition[1]:
+               input_file = "X_" + '{0:02}'.format(my_rank) + ".pkl"
+               temp_file = "T_" + '{0:02}'.format(my_rank) + ".pkl"
+               input_dir = os.path.join(args.input_dir,year)
+               temp_file = os.path.join(input_dir,temp_file )
+               input_file = os.path.join(input_dir,input_file)
+               # create the tfrecords-files
+               read_frames_and_save_tf_records(year=year,month=my_rank,stats=stats,output_dir=save_output_dir, \
+                                               input_file=input_file,temp_input_file=temp_file,vars_in=args.variables, \
+                                               partition_name=partition_name,seq_length=args.seq_length, \
+                                               height=args.height,width=args.width,sequences_per_file=args.sequences_per_file)   
+                                                  
+            print("Year {} finished",year)
+        message_out = ("Node:",str(my_rank),"finished","","\r\n")
+        print ("Message out for slaves:",message_out)
+        comm.send(message_out,dest=0)
+        
+    MPI.Finalize()        
+   
+if __name__ == '__main__':
+     main()
+
diff --git a/video_prediction_savp/scripts/train_dummy.py b/video_prediction_tools/main_scripts/main_train_models.py
similarity index 99%
rename from video_prediction_savp/scripts/train_dummy.py
rename to video_prediction_tools/main_scripts/main_train_models.py
index 0417a36514fd6136fb9fbe934bfb396633fa6093..d316881752c244222129cc151c30592b6090d007 100644
--- a/video_prediction_savp/scripts/train_dummy.py
+++ b/video_prediction_tools/main_scripts/main_train_models.py
@@ -144,10 +144,10 @@ def make_dataset_iterator(train_dataset, val_dataset, batch_size ):
     val_tf_dataset = val_dataset.make_dataset_v2(batch_size)
     val_iterator = val_tf_dataset.make_one_shot_iterator()
     val_handle = val_iterator.string_handle()
-    #iterator = tf.data.Iterator.from_string_handle(
-    #    train_handle, train_tf_dataset.output_types, train_tf_dataset.output_shapes)
+    iterator = tf.data.Iterator.from_string_handle(
+        train_handle, train_tf_dataset.output_types, train_tf_dataset.output_shapes)
     inputs = train_iterator.get_next()
-    val = val_iterator.get_next()
+
     return inputs,train_handle, val_handle
 
 
diff --git a/video_prediction_savp/scripts/generate_transfer_learning_finetune.py b/video_prediction_tools/main_scripts/main_visualize_postprocess.py
similarity index 100%
rename from video_prediction_savp/scripts/generate_transfer_learning_finetune.py
rename to video_prediction_tools/main_scripts/main_visualize_postprocess.py
diff --git a/workflow_parallel_frame_prediction/DataExtraction/external_function.py b/video_prediction_tools/utils/external_function.py
similarity index 100%
rename from workflow_parallel_frame_prediction/DataExtraction/external_function.py
rename to video_prediction_tools/utils/external_function.py
diff --git a/video_prediction_savp/utils/general_utils.py b/video_prediction_tools/utils/general_utils.py
similarity index 100%
rename from video_prediction_savp/utils/general_utils.py
rename to video_prediction_tools/utils/general_utils.py
diff --git a/video_prediction_tools/utils/metadata.py b/video_prediction_tools/utils/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f8f5a45458088d34730bef212dc9064201223a5
--- /dev/null
+++ b/video_prediction_tools/utils/metadata.py
@@ -0,0 +1,402 @@
+""" 
+Class to retrieve and handle meta-data
+"""
+
+import os
+import sys
+import time
+import numpy as np
+import json
+from netCDF4 import Dataset
+from general_utils import is_integer, add_str_to_path
+
+
+class MetaData:
+    """
+     Class for handling, storing and retrieving meta-data
+    """
+
+    def __init__(self, json_file=None, suffix_indir=None, exp_id=None, data_filename=None, slices=None, variables=None):
+
+        """
+         Initailizes MetaData instance by reading a corresponding json-file or by handling arguments of the Preprocessing step
+         (i.e. exemplary input file, slices defining region of interest, input variables)
+        :param json_file: already existing json-file with metadata, if present the following arguments are not needed
+        :param suffix_indir: suffix of directory where processed data is stored for running the models
+        :param exp_id: experiment identifier
+        :param data_filename: name of netCDF-file serving as base for metadata retrieval
+        :param slices: indices defining the region of interest
+        :param variables: predictor variables
+        """
+
+        method_name = MetaData.__init__.__name__ + " of Class " + MetaData.__name__
+
+        if not json_file is None:
+            print(json_file)
+            print(type(json_file))
+            MetaData.get_metadata_from_file(self, json_file)
+
+        else:
+            # No dictionary from json-file available, all other arguments have to set
+            if not suffix_indir:
+                raise TypeError(method_name + ": 'suffix_indir'-argument is required if 'json_file' is not passed.")
+            else:
+                if not isinstance(suffix_indir, str):
+                    raise TypeError(method_name + ": 'suffix_indir'-argument must be a string.")
+
+            if not exp_id:
+                raise TypeError(method_name + ": 'exp_id'-argument is required if 'json_file' is not passed.")
+            else:
+                if not isinstance(exp_id, str):
+                    raise TypeError(method_name + ": 'exp_id'-argument must be a string.")
+
+            if not data_filename:
+                raise TypeError(method_name + ": 'data_filename'-argument is required if 'json_file' is not passed.")
+            else:
+                if not isinstance(data_filename, str):
+                    raise TypeError(method_name + ": 'data_filename'-argument must be a string.")
+
+            if not slices:
+                raise TypeError(method_name + ": 'slices'-argument is required if 'json_file' is not passed.")
+            else:
+                if not isinstance(slices, dict):
+                    raise TypeError(method_name + ": 'slices'-argument must be a dictionary.")
+
+            if not variables:
+                raise TypeError(method_name + ": 'variables'-argument is required if 'json_file' is not passed.")
+            else:
+                if not isinstance(variables, list):
+                    raise TypeError(method_name + ": 'variables'-argument must be a list.")
+
+            MetaData.get_and_set_metadata_from_file(self, suffix_indir, exp_id, data_filename, slices, variables)
+
+            MetaData.write_metadata_to_file(self)
+
+    def get_and_set_metadata_from_file(self, suffix_indir, exp_id, datafile_name, slices, variables):
+        '''
+         Retrieves several meta data from an ERA5 netCDF-file and sets corresponding class instance attributes.
+         Besides, the name of the experiment directory is constructed following the naming convention (see below)
+
+         Naming convention:
+         [model_base]_Y[yyyy]to[yyyy]M[mm]to[mm]-[nx]x[ny]-[nnnn]N[eeee]E-[var1]_[var2]_(...)_[varN]
+         ---------------- Given ----------------|---------------- Created dynamically --------------
+
+         Note that the model-base as well as the date-identifiers must already be included in target_dir_in.
+        :param suffix_indir: Path to directory where the preprocessed data will be stored
+        :param exp_id: Experimental identifier
+        :param datafile_name: ERA 5 reanalysis netCDF file
+        :param slices: indices of lat- and lon-coordinates defining the region of interest
+        :param variables: meteorological variables to be processed during preprocessing
+        :return: A class instance with the following attributes set:
+                 * varnames       : name of variables to be processed
+                 * nx             : number of grid points of sliced region in zonal direction
+                 * ny             : same as nx, but in meridional direction
+                 * sw_c           : south-west corner [lat,lon] coordinates of region of interest
+                 * lat            : latitude coordinates of grid points (on a rectangular grid)
+                 * lon            : longitude coordinates of grid points (on a rectangular grid)
+                 * expname        : name of target experiment directory following anming convention (see above)
+                 * expdir         : basename of experiment diretory
+                 * exp_id         : experimental identifier
+                 * status         : status to indicate if a new metadata was set up or if it's pre-exsting (left empty here!)
+        '''
+
+        method_name = MetaData.get_and_set_metadata_from_file.__name__ + " of Class " + MetaData.__name__
+
+        if not suffix_indir: raise ValueError(method_name + ": suffix_indir must be a non-empty path.")
+
+        # retrieve required information from file 
+        flag_coords = ["N", "E"]
+
+        print("Retrieve metadata based on file: '" + datafile_name + "'")
+        try:
+            datafile = Dataset(datafile_name, 'r')
+        except:
+            print(method_name + ": Error when handling data file: '" + datafile_name + "'.")
+            exit()
+
+        # Check if all requested variables can be obtained from datafile
+        MetaData.check_datafile(datafile, variables)
+        self.varnames = variables
+
+        self.nx, self.ny = np.abs(slices['lon_e'] - slices['lon_s']), np.abs(slices['lat_e'] - slices['lat_s'])
+        sw_c = [float(datafile.variables['lat'][slices['lat_e'] - 1]), float(datafile.variables['lon'][slices[
+            'lon_s']])]  # meridional axis lat is oriented from north to south (i.e. monotonically decreasing)
+        self.sw_c = sw_c
+        self.lat = datafile.variables['lat'][slices['lat_s']:slices['lat_e']]
+        self.lon = datafile.variables['lon'][slices['lon_s']:slices['lon_e']]
+
+        # Now start constructing expdir-string
+        # switch sign and coordinate-flags to avoid negative values appearing in expdir-name
+        if sw_c[0] < 0.:
+            sw_c[0] = np.abs(sw_c[0])
+            flag_coords[0] = "S"
+        if sw_c[1] < 0.:
+            sw_c[1] = np.abs(sw_c[1])
+            flag_coords[1] = "W"
+        nvar = len(variables)
+
+        # splitting has to be done in order to retrieve the expname-suffix (and the year if required)
+        path_parts = os.path.split(suffix_indir.rstrip("/"))
+
+        if (is_integer(path_parts[1])):
+            year = path_parts[1]
+            path_parts = os.path.split(path_parts[0].rstrip("/"))
+        else:
+            year = ""
+
+        expdir, expname = path_parts[0], path_parts[1]
+
+        # extend expdir_in successively (splitted up for better readability)
+        expname += "-" + str(self.nx) + "x" + str(self.ny)
+        expname += "-" + (("{0: 05.2f}" + flag_coords[0] + "{1:05.2f}" + flag_coords[1]).format(*sw_c)).strip().replace(
+            ".", "") + "-"
+
+        # reduced for-loop length as last variable-name is not followed by an underscore (see above)
+        for i in range(nvar - 1):
+            expname += variables[i] + "_"
+        expname += variables[nvar - 1]
+
+        self.expname = expname
+        self.expdir = expdir
+        self.exp_id = exp_id
+        self.status = ""  # uninitialized (is set when metadata is written/compared to/with json-file, see write_metadata_to_file-method)
+
+    # ML 2020/04/24 E         
+
+    def write_metadata_to_file(self, dest_dir=None):
+        '''
+        Writes meta data stored as attributes in the class instance to metadata.json.
+        If dest_dir is None, the destination directory is constructed based on the attributes expdir and expname.
+        :param dest_dir: path to directory where to store metadata.json
+        :return: -
+        '''
+
+        method_name = MetaData.write_metadata_to_file.__name__ + " of Class " + MetaData.__name__
+        # actual work:
+        meta_dict = {"expname": self.expname, "expdir": self.expdir, "exp_id": self.exp_id, "sw_corner_frame": {
+            "lat": np.around(self.sw_c[0], decimals=2),
+            "lon": np.around(self.sw_c[1], decimals=2)
+        }, "coordinates": {
+            "lat": np.around(self.lat, decimals=2).tolist(),
+            "lon": np.around(self.lon, decimals=2).tolist()
+        }, "frame_size": {
+            "nx": int(self.nx),
+            "ny": int(self.ny)
+        }, "variables": []}
+
+        for i in range(len(self.varnames)):
+            # print(self.varnames[i])
+            meta_dict["variables"].append({"var" + str(i + 1): self.varnames[i]})
+        # create directory if required
+        if dest_dir is None:
+            dest_dir = os.path.join(self.expdir, self.expname)
+        if not os.path.exists(dest_dir):
+            print("Created experiment directory: '" + self.expdir + "'")
+            os.makedirs(dest_dir, exist_ok=True)
+
+        meta_fname = os.path.join(dest_dir, "metadata.json")
+
+        if os.path.exists(meta_fname):  # check if a metadata-file already exists and check its content
+            print(method_name + ": json-file ('" + meta_fname + "' already exists. Its content will be checked...")
+            self.status = "old"  # set status to old in order to prevent repeated modification of shell-/Batch-scripts
+            with open(meta_fname, 'r') as js_file:
+                dict_dupl = json.load(js_file)
+
+                if dict_dupl != meta_dict:
+                    meta_fname_dbg = os.path.join(dest_dir, "metadata_debug.json")
+                    print(
+                        method_name + ": Already existing metadata (see '" + meta_fname + "') do not fit data being processed right now (see '" \
+                        + meta_fname_dbg + "'. Ensure a common data base.")
+                    with open(meta_fname_dbg, 'w') as js_file:
+                        json.dump(meta_dict, js_file)
+                    raise ValueError
+                else:  # do not need to do anything
+                    pass
+        else:
+            # write dictionary to file
+            print(method_name + ": Write dictionary to json-file: '" + meta_fname + "'")
+            with open(meta_fname, 'w') as js_file:
+                json.dump(meta_dict, js_file)
+            self.status = "new"  # set status to new in order to trigger modification of shell-/Batch-scripts
+
+    def get_metadata_from_file(self, js_file):
+        '''
+        :param js_file: json file from which to retrieve the meta data
+        :return: A class instance with the following attributes set:
+                 * varnames       : name of variables to be processed
+                 * nx             : number of grid points of sliced region in zonal direction
+                 * ny             : same as nx, but in meridional direction
+                 * sw_c           : south-west corner [lat,lon] coordinates of region of interest
+                 * lat            : latitude coordinates of grid points (on a rectangular grid)
+                 * lon            : longitude coordinates of grid points (on a rectangular grid)
+                 * expname        : name of target experiment directory following naming convention (see above)
+                 * expdir         : basename of experiment directory
+                 * exp_id         : experimental identifier (if available!)
+                 * status         : status to indicate if a new metadata is set-up or pre-existing (left empty here!)
+        '''
+
+        with open(js_file) as js_file:
+            dict_in = json.load(js_file)
+
+            self.expdir = dict_in["expdir"]
+            self.expname = dict_in["expname"]
+            # check if exp_id is available (retained for ensuring backward compatilibity with
+            # old meta data files without exp_id)
+            if "exp_id" in dict_in:
+                self.exp_id = dict_in["exp_id"]
+
+            self.sw_c = [dict_in["sw_corner_frame"]["lat"], dict_in["sw_corner_frame"]["lon"]]
+            self.lat = dict_in["coordinates"]["lat"]
+            self.lon = dict_in["coordinates"]["lon"]
+
+            self.nx = dict_in["frame_size"]["nx"]
+            self.ny = dict_in["frame_size"]["ny"]
+            # dict_in["variables"] is a list like [{var1: varname1},{var2: varname2},...]
+            list_of_dict_aux = dict_in["variables"]
+            # iterate through the list with an integer ivar
+            # note: the naming of the variables starts with var1, thus add 1 to the iterator
+            self.variables = [list_of_dict_aux[ivar]["var" + str(ivar + 1)] for ivar in range(len(list_of_dict_aux))]
+
+    def write_dirs_to_batch_scripts(self, batch_script):
+        '''
+        Method for automatic extension of path variables in Batch scripts by the experiment directory which is saved
+        in the expname-attribute of the class instance
+        :param batch_script: Batch script whose (known) path variables (defined by paths_to_mod below) will be expanded
+                             by the expname-attribute of the class instance at hand
+        :return: modified Batch script
+        '''
+
+        paths_to_mod = ["source_dir=", "destination_dir=", "checkpoint_dir=",
+                        "results_dir="]  # known directory-variables in batch-scripts
+
+        # For backward compability:
+        # Check if exp_id (if present) needs to be added to batch_script in order to access the file
+        if hasattr(self, "exp_id"):
+            sep_idx = batch_script.index(".sh")
+            batch_script = batch_script[:sep_idx] + "_" + self.exp_id + batch_script[sep_idx:]
+
+        with open(batch_script, 'r') as file:
+            data = file.readlines()
+
+        nlines = len(data)
+        matched_lines = [iline for iline in range(nlines) if any(
+            str_id in data[iline] for str_id in paths_to_mod)]  # list of line-number indices to be modified
+
+        for i in matched_lines:
+            data[i] = add_str_to_path(data[i], self.expname)
+
+        with open(batch_script, 'w') as file:
+            file.writelines(data)
+
+    @staticmethod
+    def write_destdir_jsontmp(dest_dir, tmp_dir=None):
+        '''
+        Writes dest_dir to temporary json-file (temp.json) stored in the current working directory.
+        To be executed by Master node only in parallel mode.
+        :param dest_dir: path to destination directory
+        :param tmp_dir: directory where to store temp.json (optional)
+        :return: -
+        '''
+
+        if not tmp_dir: tmp_dir = os.getcwd()
+
+        file_tmp = os.path.join(tmp_dir, "temp.json")
+        dict_tmp = {"destination_dir": dest_dir}
+
+        with open(file_tmp, "w") as js_file:
+            print("Save destination_dir-variable in temporary json-file: '" + file_tmp + "'")
+            json.dump(dict_tmp, js_file)
+
+    @staticmethod
+    def get_destdir_jsontmp(tmp_dir=None):
+        '''
+        Retrieves path destination directory from temp.json file (to be created by write_destdir_jsontmp-method)
+        :param tmp_dir: directory where temp.json is stored (optional). If not provided, the working directory is used.
+        :return: string containing the path to the destination directory
+        '''
+
+        method_name = MetaData.get_destdir_jsontmp.__name__ + " of Class " + MetaData.__name__
+
+        if not tmp_dir: tmp_dir = os.getcwd()
+
+        file_tmp = os.path.join(tmp_dir, "temp.json")
+
+        try:
+            with open(file_tmp, "r") as js_file:
+                dict_tmp = json.load(js_file)
+        except:
+            print(method_name + ": Could not open requested json-file '" + file_tmp + "'")
+            sys.exit(1)
+
+        if not "destination_dir" in dict_tmp.keys():
+            raise Exception(method_name + ": Could not find 'destination_dir' in dictionary obtained from " + file_tmp)
+        else:
+            return (dict_tmp.get("destination_dir"))
+
+    @staticmethod
+    def wait_for_jsontmp(tmp_dir=None, waittime=10, delay=0.5):
+        '''
+        Waits until temp.json-file becomes available
+        :param tmp_dir: directory where temp.json is stored (optional). If not provided, the working directory is used.
+        :param waittime: time to wait in seconds (default: 10 s)
+        :param delay: length of checkin intervall (default: 0.5 s)
+        :return: -
+        '''
+
+        method_name = MetaData.wait_for_jsontmp.__name__ + " of Class " + MetaData.__name__
+
+        if not tmp_dir: tmp_dir = os.getcwd()
+
+        file_tmp = os.path.join(tmp_dir, "temp.json")
+
+        counter_max = waittime / delay
+        counter = 0
+        status = "not_ok"
+
+        while (counter <= counter_max):
+            if os.path.isfile(file_tmp):
+                status = "ok"
+                break
+            else:
+                time.sleep(delay)
+
+            counter += 1
+
+        if status != "ok": raise IOError(method_name + ": '" + file_tmp + \
+                                         "' does not exist after waiting for " + str(waittime) + " sec.")
+
+    @staticmethod
+    def issubset(a, b):
+        '''
+        Checks if all elements of a exist in b or vice versa (depends on the length of the corresponding lists/sets)
+        :param a: list 1
+        :param b: list 2
+        :return: True or False
+        '''
+
+        if len(a) > len(b):
+            return (set(b).issubset(set(a)))
+        elif len(b) >= len(a):
+            return (set(a).issubset(set(b)))
+
+    @staticmethod
+    def check_datafile(datafile, varnames):
+        '''
+        Checks if all variables whose names are given in varnames can be found in data-object (read in from a netCDF)
+        :param datafile: data-object
+        :param varnames: names of variables to be expected in data-object
+        :return: Raises a ValueError if any variable cannot be found
+        '''
+        """
+          Checks if all varnames can be found in datafile
+        """
+
+        if not MetaData.issubset(varnames, datafile.variables.keys()):
+            for i in range(len(varnames2check)):
+                if not varnames2check[i] in f0.variables.keys():
+                    print("Variable '" + varnames2check[i] + "' not found in datafile '" + data_filenames[0] + "'.")
+                raise ValueError("Could not find the above mentioned variables.")
+        else:
+            pass
+
+# ----------------------------------- end of class MetaData -----------------------------------
diff --git a/video_prediction_savp/utils/normalization.py b/video_prediction_tools/utils/normalization.py
similarity index 100%
rename from video_prediction_savp/utils/normalization.py
rename to video_prediction_tools/utils/normalization.py
diff --git a/workflow_parallel_frame_prediction/DataExtraction/shiftgrid.py b/video_prediction_tools/utils/shiftgrid.py
similarity index 100%
rename from workflow_parallel_frame_prediction/DataExtraction/shiftgrid.py
rename to video_prediction_tools/utils/shiftgrid.py
diff --git a/video_prediction_savp/utils/statistics.py b/video_prediction_tools/utils/statistics.py
similarity index 100%
rename from video_prediction_savp/utils/statistics.py
rename to video_prediction_tools/utils/statistics.py
diff --git a/video_prediction_savp/video_prediction/.DS_Store b/video_prediction_tools/video_prediction/.DS_Store
similarity index 100%
rename from video_prediction_savp/video_prediction/.DS_Store
rename to video_prediction_tools/video_prediction/.DS_Store
diff --git a/video_prediction_savp/video_prediction/__init__.py b/video_prediction_tools/video_prediction/__init__.py
similarity index 100%
rename from video_prediction_savp/video_prediction/__init__.py
rename to video_prediction_tools/video_prediction/__init__.py
diff --git a/video_prediction_savp/video_prediction/datasets/__init__.py b/video_prediction_tools/video_prediction/datasets/__init__.py
similarity index 94%
rename from video_prediction_savp/video_prediction/datasets/__init__.py
rename to video_prediction_tools/video_prediction/datasets/__init__.py
index f58607c2f4c14047aefb36956e98bd228a30aeb1..8a4f02817bae987e2d5248342582612d87349398 100644
--- a/video_prediction_savp/video_prediction/datasets/__init__.py
+++ b/video_prediction_tools/video_prediction/datasets/__init__.py
@@ -6,7 +6,7 @@ from .softmotion_dataset import SoftmotionVideoDataset
 from .kth_dataset import KTHVideoDataset
 from .ucf101_dataset import UCF101VideoDataset
 from .cartgripper_dataset import CartgripperVideoDataset
-from .era5_dataset_v2 import ERA5Dataset_v2
+from .era5_dataset import ERA5Dataset
 from .moving_mnist import MovingMnist
 #from .era5_dataset_v2_anomaly import ERA5Dataset_v2_anomaly
 
@@ -19,7 +19,7 @@ def get_dataset_class(dataset):
         'kth': 'KTHVideoDataset',
         'ucf101': 'UCF101VideoDataset',
         'cartgripper': 'CartgripperVideoDataset',
-        "era5":"ERA5Dataset_v2",
+        "era5":"ERA5Dataset",
         "moving_mnist":"MovingMnist"
 #        "era5_anomaly":"ERA5Dataset_v2_anomaly",
     }
diff --git a/video_prediction_savp/video_prediction/datasets/base_dataset.py b/video_prediction_tools/video_prediction/datasets/base_dataset.py
similarity index 100%
rename from video_prediction_savp/video_prediction/datasets/base_dataset.py
rename to video_prediction_tools/video_prediction/datasets/base_dataset.py
diff --git a/video_prediction_savp/video_prediction/datasets/cartgripper_dataset.py b/video_prediction_tools/video_prediction/datasets/cartgripper_dataset.py
similarity index 100%
rename from video_prediction_savp/video_prediction/datasets/cartgripper_dataset.py
rename to video_prediction_tools/video_prediction/datasets/cartgripper_dataset.py
diff --git a/video_prediction_savp/video_prediction/datasets/era5_dataset_v2.py b/video_prediction_tools/video_prediction/datasets/era5_dataset.py
similarity index 63%
rename from video_prediction_savp/video_prediction/datasets/era5_dataset_v2.py
rename to video_prediction_tools/video_prediction/datasets/era5_dataset.py
index 7a61aa090f9e115ffd140a54dd0784dbbd35c48d..2fc540eb8402dd54942be92222e1ace42d8aba66 100644
--- a/video_prediction_savp/video_prediction/datasets/era5_dataset_v2.py
+++ b/video_prediction_tools/video_prediction/datasets/era5_dataset.py
@@ -1,34 +1,29 @@
 import argparse
+import os
 import glob
+import sys
 import itertools
-import os
 import pickle
 import random
 import re
 import numpy as np
 import json
 import tensorflow as tf
+from collections import OrderedDict
+from tensorflow.contrib.training import HParams
+from mpi4py import MPI
 from video_prediction.datasets.base_dataset import VarLenFeatureVideoDataset
-# ML 2020/04/14: hack for getting functions of process_netCDF_v2:
-from os import path
-import sys
-sys.path.append(path.abspath('../../workflow_parallel_frame_prediction/'))
-import DataPreprocess.process_netCDF_v2
+import data_preprocess.process_netCDF_v2
 from general_utils import get_unique_vars
 from statistics import Calc_data_stat
 from metadata import MetaData
 from normalization import Norm_data
-#from base_dataset import VarLenFeatureVideoDataset
-from collections import OrderedDict
-from tensorflow.contrib.training import HParams
-from mpi4py import MPI
-import glob
 
 
 
-class ERA5Dataset_v2(VarLenFeatureVideoDataset):
+class ERA5Dataset(VarLenFeatureVideoDataset):
     def __init__(self, *args, **kwargs):
-        super(ERA5Dataset_v2, self).__init__(*args, **kwargs)
+        super(ERA5Dataset, self).__init__(*args, **kwargs)
         from google.protobuf.json_format import MessageToDict
         example = next(tf.python_io.tf_record_iterator(self.filenames[0]))
         dict_message = MessageToDict(tf.train.Example.FromString(example))
@@ -39,7 +34,7 @@ class ERA5Dataset_v2(VarLenFeatureVideoDataset):
         self.state_like_names_and_shapes['images'] = 'images/encoded', self.image_shape
 
     def get_default_hparams_dict(self):
-        default_hparams = super(ERA5Dataset_v2, self).get_default_hparams_dict()
+        default_hparams = super(ERA5Dataset, self).get_default_hparams_dict()
         hparams = dict(
             context_frames=10,#Bing: Todo oriignal is 10
             sequence_length=20,#bing: TODO original is 20,
@@ -125,6 +120,9 @@ class ERA5Dataset_v2(VarLenFeatureVideoDataset):
 
 
 
+
+
+
     def make_batch(self, batch_size):
         dataset = self.make_dataset_v2(batch_size)
         iterator = dataset.make_one_shot_iterator()
@@ -262,126 +260,3 @@ def write_sequence_file(output_dir,seq_length,sequences_per_file):
     
     
 
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("input_dir", type=str, help="directory containing the processed directories ""boxing, handclapping, handwaving, ""jogging, running, walking")
-    parser.add_argument("output_dir", type=str)
-    # ML 2020/04/08 S
-    # Add vars for ensuring proper normalization and reshaping of sequences
-    parser.add_argument("-vars","--variables",dest="variables", nargs='+', type=str, help="Names of input variables.")
-    parser.add_argument("-height",type=int,default=64)
-    parser.add_argument("-width",type = int,default=64)
-    parser.add_argument("-seq_length",type=int,default=20)
-    parser.add_argument("-sequences_per_file",type=int,default=2)
-    args = parser.parse_args()
-    current_path = os.getcwd()
-    #input_dir = "/Users/gongbing/PycharmProjects/video_prediction/splits"
-    #output_dir = "/Users/gongbing/PycharmProjects/video_prediction/data/era5"
-    #partition_names = ['train','val',  'test'] #64,64,3 val has issue#
-
-    ############################################################
-    # CONTROLLING variable! Needs to be adapted manually!!!
-    ############################################################
-    partition = {
-            "train":{
-           #     "2222":[1,2,3,5,6,7,8,9,10,11,12], # Issue due to month 04, it is missing
-                "2010":[1,2,3,4,5,6,7,8,9,10,11,12],
-           #     "2012":[1,2,3,4,5,6,7,8,9,10,11,12],
-                "2013":[1,2,3,4,5,6,7,8,9,10,11,12],
-                "2015":[1,2,3,4,5,6,7,8,9,10,11,12],
-                "2019":[1,2,3,4,5,6,7,8,9,10,11,12]
-                 },
-            "val":
-                {"2017":[1,2,3,4,5,6,7,8,9,10,11,12]
-                 },
-            "test":
-                {"2016":[1,2,3,4,5,6,7,8,9,10,11,12]
-                 }
-            }
-    
-    # ini. MPI
-    comm = MPI.COMM_WORLD
-    my_rank = comm.Get_rank()  # rank of the node
-    p = comm.Get_size()  # number of assigned nods
-  
-    if my_rank == 0 :
-        # retrieve final statistics first (not parallelized!)
-        # some preparatory steps
-        stat_dir_prefix = args.input_dir
-        varnames        = args.variables
-    
-        vars_uni, varsind, nvars = get_unique_vars(varnames)
-        stat_obj = Calc_data_stat(nvars)                            # init statistic-instance
-    
-        # loop over whole data set (training, dev and test set) to collect the intermediate statistics
-        print("Start collecting statistics from the whole datset to be processed...")
-        for split in partition.keys():
-            values = partition[split]
-            for year in values.keys():
-                file_dir = os.path.join(stat_dir_prefix,year)
-                for month in values[year]:
-                    # process stat-file:
-                    stat_obj.acc_stat_master(file_dir,int(month))  # process monthly statistic-file  
-        
-        # finalize statistics and write to json-file
-        stat_obj.finalize_stat_master(vars_uni)
-        stat_obj.write_stat_json(args.input_dir)
-
-        # organize parallelized partioning 
-        partition_year_month = [] #contain lists of list, each list includes three element [train,year,month]
-        partition_names = list(partition.keys())
-        print ("partition_names:",partition_names)
-        broadcast_lists = []
-        for partition_name in partition_names:
-            partition_data = partition[partition_name]        
-            years = list(partition_data.keys())
-            broadcast_lists.append([partition_name,years])
-        for nodes in range(1,p):
-            #ibroadcast_list = [partition_name,years,nodes]
-            #broadcast_lists.append(broadcast_list)
-            comm.send(broadcast_lists,dest=nodes) 
-           
-        message_counter = 1
-        while message_counter <= 12:
-            message_in = comm.recv()
-            message_counter = message_counter + 1 
-            print("Message in from slaver",message_in) 
-            
-        write_sequence_file(args.output_dir,args.seq_length,args.sequences_per_file)
-        
-        #write_sequence_file   
-    else:
-        message_in = comm.recv()
-        print ("My rank,", my_rank)   
-        print("message_in",message_in)
-        # open statistics file and feed it to norm-instance
-        print("Opening json-file: "+os.path.join(args.input_dir,"statistics.json"))
-        with open(os.path.join(args.input_dir,"statistics.json")) as js_file:
-            stats = json.load(js_file)
-        #loop the partitions (train,val,test)
-        for partition in message_in:
-            print("partition on slave ",partition)
-            partition_name = partition[0]
-            save_output_dir =  os.path.join(args.output_dir,partition_name)
-            for year in partition[1]:
-               input_file = "X_" + '{0:02}'.format(my_rank) + ".pkl"
-               temp_file = "T_" + '{0:02}'.format(my_rank) + ".pkl"
-               input_dir = os.path.join(args.input_dir,year)
-               temp_file = os.path.join(input_dir,temp_file )
-               input_file = os.path.join(input_dir,input_file)
-               # create the tfrecords-files
-               read_frames_and_save_tf_records(year=year,month=my_rank,stats=stats,output_dir=save_output_dir, \
-                                               input_file=input_file,temp_input_file=temp_file,vars_in=args.variables, \
-                                               partition_name=partition_name,seq_length=args.seq_length, \
-                                               height=args.height,width=args.width,sequences_per_file=args.sequences_per_file)   
-                                                  
-            print("Year {} finished",year)
-        message_out = ("Node:",str(my_rank),"finished","","\r\n")
-        print ("Message out for slaves:",message_out)
-        comm.send(message_out,dest=0)
-        
-    MPI.Finalize()        
-   
-if __name__ == '__main__':
-     main()
-
diff --git a/video_prediction_savp/video_prediction/datasets/google_robot_dataset.py b/video_prediction_tools/video_prediction/datasets/google_robot_dataset.py
similarity index 100%
rename from video_prediction_savp/video_prediction/datasets/google_robot_dataset.py
rename to video_prediction_tools/video_prediction/datasets/google_robot_dataset.py
diff --git a/video_prediction_savp/video_prediction/datasets/kth_dataset.py b/video_prediction_tools/video_prediction/datasets/kth_dataset.py
similarity index 100%
rename from video_prediction_savp/video_prediction/datasets/kth_dataset.py
rename to video_prediction_tools/video_prediction/datasets/kth_dataset.py
diff --git a/video_prediction_savp/video_prediction/datasets/moving_mnist.py b/video_prediction_tools/video_prediction/datasets/moving_mnist.py
similarity index 97%
rename from video_prediction_savp/video_prediction/datasets/moving_mnist.py
rename to video_prediction_tools/video_prediction/datasets/moving_mnist.py
index ba8556859951e59cdec7d72e4319cb98cabe1ac5..8d1beddcc9c1b54a2cd899326b881a4ba8f53874 100644
--- a/video_prediction_savp/video_prediction/datasets/moving_mnist.py
+++ b/video_prediction_tools/video_prediction/datasets/moving_mnist.py
@@ -1,4 +1,5 @@
 import argparse
+import sys
 import glob
 import itertools
 import os
@@ -8,22 +9,16 @@ import re
 import numpy as np
 import json
 import tensorflow as tf
-from video_prediction.datasets.base_dataset import VarLenFeatureVideoDataset
-# ML 2020/04/14: hack for getting functions of process_netCDF_v2:
-from os import path
-import sys
-sys.path.append(path.abspath('../../workflow_parallel_frame_prediction/'))
-import DataPreprocess.process_netCDF_v2 
-from general_utils import get_unique_vars
-from statistics import Calc_data_stat 
-from metadata import MetaData
-#from base_dataset import VarLenFeatureVideoDataset
-from collections import OrderedDict
 from tensorflow.contrib.training import HParams
 from mpi4py import MPI
-import glob
+from collections import OrderedDict
 import matplotlib.pyplot as plt
 import matplotlib.gridspec as gridspec
+from video_prediction.datasets.base_dataset import VarLenFeatureVideoDataset
+import data_preprocess.process_netCDF_v2 
+from general_utils import get_unique_vars
+from statistics import Calc_data_stat 
+from metadata import MetaData
 
 class MovingMnist(VarLenFeatureVideoDataset):
     def __init__(self, *args, **kwargs):
diff --git a/video_prediction_savp/video_prediction/datasets/softmotion_dataset.py b/video_prediction_tools/video_prediction/datasets/softmotion_dataset.py
similarity index 100%
rename from video_prediction_savp/video_prediction/datasets/softmotion_dataset.py
rename to video_prediction_tools/video_prediction/datasets/softmotion_dataset.py
diff --git a/video_prediction_savp/video_prediction/datasets/sv2p_dataset.py b/video_prediction_tools/video_prediction/datasets/sv2p_dataset.py
similarity index 100%
rename from video_prediction_savp/video_prediction/datasets/sv2p_dataset.py
rename to video_prediction_tools/video_prediction/datasets/sv2p_dataset.py
diff --git a/video_prediction_savp/video_prediction/datasets/ucf101_dataset.py b/video_prediction_tools/video_prediction/datasets/ucf101_dataset.py
similarity index 100%
rename from video_prediction_savp/video_prediction/datasets/ucf101_dataset.py
rename to video_prediction_tools/video_prediction/datasets/ucf101_dataset.py
diff --git a/video_prediction_savp/video_prediction/flow_ops.py b/video_prediction_tools/video_prediction/flow_ops.py
similarity index 100%
rename from video_prediction_savp/video_prediction/flow_ops.py
rename to video_prediction_tools/video_prediction/flow_ops.py
diff --git a/video_prediction_savp/video_prediction/layers/BasicConvLSTMCell.py b/video_prediction_tools/video_prediction/layers/BasicConvLSTMCell.py
similarity index 100%
rename from video_prediction_savp/video_prediction/layers/BasicConvLSTMCell.py
rename to video_prediction_tools/video_prediction/layers/BasicConvLSTMCell.py
diff --git a/video_prediction_savp/video_prediction/layers/__init__.py b/video_prediction_tools/video_prediction/layers/__init__.py
similarity index 100%
rename from video_prediction_savp/video_prediction/layers/__init__.py
rename to video_prediction_tools/video_prediction/layers/__init__.py
diff --git a/video_prediction_savp/video_prediction/layers/layer_def.py b/video_prediction_tools/video_prediction/layers/layer_def.py
similarity index 100%
rename from video_prediction_savp/video_prediction/layers/layer_def.py
rename to video_prediction_tools/video_prediction/layers/layer_def.py
diff --git a/video_prediction_savp/video_prediction/layers/mcnet_ops.py b/video_prediction_tools/video_prediction/layers/mcnet_ops.py
similarity index 100%
rename from video_prediction_savp/video_prediction/layers/mcnet_ops.py
rename to video_prediction_tools/video_prediction/layers/mcnet_ops.py
diff --git a/video_prediction_savp/video_prediction/layers/normalization.py b/video_prediction_tools/video_prediction/layers/normalization.py
similarity index 100%
rename from video_prediction_savp/video_prediction/layers/normalization.py
rename to video_prediction_tools/video_prediction/layers/normalization.py
diff --git a/video_prediction_savp/video_prediction/losses.py b/video_prediction_tools/video_prediction/losses.py
similarity index 100%
rename from video_prediction_savp/video_prediction/losses.py
rename to video_prediction_tools/video_prediction/losses.py
diff --git a/video_prediction_savp/video_prediction/metrics.py b/video_prediction_tools/video_prediction/metrics.py
similarity index 100%
rename from video_prediction_savp/video_prediction/metrics.py
rename to video_prediction_tools/video_prediction/metrics.py
diff --git a/video_prediction_savp/video_prediction/models/__init__.py b/video_prediction_tools/video_prediction/models/__init__.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/__init__.py
rename to video_prediction_tools/video_prediction/models/__init__.py
diff --git a/video_prediction_savp/video_prediction/models/base_model.py b/video_prediction_tools/video_prediction/models/base_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/base_model.py
rename to video_prediction_tools/video_prediction/models/base_model.py
diff --git a/video_prediction_savp/video_prediction/models/dna_model.py b/video_prediction_tools/video_prediction/models/dna_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/dna_model.py
rename to video_prediction_tools/video_prediction/models/dna_model.py
diff --git a/video_prediction_savp/video_prediction/models/mcnet_model.py b/video_prediction_tools/video_prediction/models/mcnet_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/mcnet_model.py
rename to video_prediction_tools/video_prediction/models/mcnet_model.py
diff --git a/video_prediction_savp/video_prediction/models/networks.py b/video_prediction_tools/video_prediction/models/networks.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/networks.py
rename to video_prediction_tools/video_prediction/models/networks.py
diff --git a/video_prediction_savp/video_prediction/models/non_trainable_model.py b/video_prediction_tools/video_prediction/models/non_trainable_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/non_trainable_model.py
rename to video_prediction_tools/video_prediction/models/non_trainable_model.py
diff --git a/video_prediction_savp/video_prediction/models/savp_model.py b/video_prediction_tools/video_prediction/models/savp_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/savp_model.py
rename to video_prediction_tools/video_prediction/models/savp_model.py
diff --git a/video_prediction_savp/video_prediction/models/sna_model.py b/video_prediction_tools/video_prediction/models/sna_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/sna_model.py
rename to video_prediction_tools/video_prediction/models/sna_model.py
diff --git a/video_prediction_savp/video_prediction/models/sv2p_model.py b/video_prediction_tools/video_prediction/models/sv2p_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/sv2p_model.py
rename to video_prediction_tools/video_prediction/models/sv2p_model.py
diff --git a/video_prediction_savp/video_prediction/models/vanilla_convLSTM_model.py b/video_prediction_tools/video_prediction/models/vanilla_convLSTM_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/vanilla_convLSTM_model.py
rename to video_prediction_tools/video_prediction/models/vanilla_convLSTM_model.py
diff --git a/video_prediction_savp/video_prediction/models/vanilla_vae_model.py b/video_prediction_tools/video_prediction/models/vanilla_vae_model.py
similarity index 100%
rename from video_prediction_savp/video_prediction/models/vanilla_vae_model.py
rename to video_prediction_tools/video_prediction/models/vanilla_vae_model.py
diff --git a/video_prediction_savp/video_prediction/ops.py b/video_prediction_tools/video_prediction/ops.py
similarity index 100%
rename from video_prediction_savp/video_prediction/ops.py
rename to video_prediction_tools/video_prediction/ops.py
diff --git a/video_prediction_savp/video_prediction/rnn_ops.py b/video_prediction_tools/video_prediction/rnn_ops.py
similarity index 100%
rename from video_prediction_savp/video_prediction/rnn_ops.py
rename to video_prediction_tools/video_prediction/rnn_ops.py
diff --git a/video_prediction_tools/video_prediction/utils/README.md b/video_prediction_tools/video_prediction/utils/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..983d60f0c1b789762b9a2c02a6260fbcf1af188b
--- /dev/null
+++ b/video_prediction_tools/video_prediction/utils/README.md
@@ -0,0 +1,2 @@
+
+This file will describe the source code in utils from which architectures 
diff --git a/video_prediction_savp/video_prediction/utils/__init__.py b/video_prediction_tools/video_prediction/utils/__init__.py
similarity index 100%
rename from video_prediction_savp/video_prediction/utils/__init__.py
rename to video_prediction_tools/video_prediction/utils/__init__.py
diff --git a/video_prediction_savp/video_prediction/utils/ffmpeg_gif.py b/video_prediction_tools/video_prediction/utils/ffmpeg_gif.py
similarity index 100%
rename from video_prediction_savp/video_prediction/utils/ffmpeg_gif.py
rename to video_prediction_tools/video_prediction/utils/ffmpeg_gif.py
diff --git a/video_prediction_savp/video_prediction/utils/gif_summary.py b/video_prediction_tools/video_prediction/utils/gif_summary.py
similarity index 100%
rename from video_prediction_savp/video_prediction/utils/gif_summary.py
rename to video_prediction_tools/video_prediction/utils/gif_summary.py
diff --git a/video_prediction_savp/video_prediction/utils/html.py b/video_prediction_tools/video_prediction/utils/html.py
similarity index 100%
rename from video_prediction_savp/video_prediction/utils/html.py
rename to video_prediction_tools/video_prediction/utils/html.py
diff --git a/video_prediction_savp/video_prediction/utils/mcnet_utils.py b/video_prediction_tools/video_prediction/utils/mcnet_utils.py
similarity index 100%
rename from video_prediction_savp/video_prediction/utils/mcnet_utils.py
rename to video_prediction_tools/video_prediction/utils/mcnet_utils.py
diff --git a/video_prediction_savp/video_prediction/utils/tf_utils.py b/video_prediction_tools/video_prediction/utils/tf_utils.py
similarity index 100%
rename from video_prediction_savp/video_prediction/utils/tf_utils.py
rename to video_prediction_tools/video_prediction/utils/tf_utils.py
diff --git a/workflow_parallel_frame_prediction/DataExtraction/helper_single_master.py b/workflow_parallel_frame_prediction/DataExtraction/helper_single_master.py
deleted file mode 100644
index a26d76395cca0ecb6d49ba53684e72f5d5d7f5b0..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataExtraction/helper_single_master.py
+++ /dev/null
@@ -1,245 +0,0 @@
-from mpi4py import MPI
-from os import walk
-import os
-import sys
-import subprocess
-import logging
-import time
-import hashlib
-import argparse
-from os import listdir
-from os.path import isfile, join
-
-# ini. MPI
-comm = MPI.COMM_WORLD
-my_rank = comm.Get_rank()  # rank of the node
-p = comm.Get_size()  # number of assigned nods
-my_rank = comm.Get_rank()  # rank of the node
-
-
-# ======================= List of functions ====================================== #
-if my_rank == 0:  # node is master
-
-    logger = logging.getLogger(__file__)
-    logger.addHandler(logging.StreamHandler(sys.stdout))
-
-
-def directory_scanner(source_path,load_level):
-    # Take a look inside a directories and make a list of ll the folders, sub directories, number of the files and size
-    # NOTE : It will neglect if there is a sub-directories inside directories!!!
-    # NOTE : It will discriminate between the load level : sub-directories / Files
-
-    dir_detail_list = []  # directories details
-    list_items_to_process = []
-    total_size_source = 0
-    total_num_files = 0
-    list_directories = []
-
-    ## =================== Here will be for the Files ================= ##
-
-    if load_level == 1:
-
-        # Listing all the files in the directory
-        for  dirpath, dirnames, filenames in os.walk(source_path):
-            list_items_to_process.extend(filenames)
-
-        for f in list_items_to_process :
-            path = source_path +"/"+ str(f)
-            statinfo = os.stat(path)
-            size = statinfo.st_size
-            total_size_source = total_size_source + int(size)
-
-        total_num_files  = len(list_items_to_process) # number of the files in the source
-        total_num_directories = int(0)      # TODO need to unify the concept as the number of items
-
-    ## ===================== Here will be for the directories ========== ##
-
-    if load_level == 0:
-        list_directories = os.listdir(source_path)
-
-        for d in list_directories:
-            path = source_path + d
-            if os.path.isdir(path):
-                list_items_to_process.append(d)
-                list_items_to_process.sort()
-                num_files = 0
-                # size of the files and subdirectories
-                size_dir = subprocess.check_output(['du', '-sc', path])
-                splitted = size_dir.split()  # fist item is the size of the folder
-                size = (splitted[0])
-                num_files = len([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))])
-                dir_detail_list.extend([d, size, num_files])
-                total_num_files = total_num_files + int(num_files)
-                total_size_source = total_size_source + int(size)
-
-            else:
-                message = path,'does not exist'
-                logging.error(message)
-
-
-        total_num_directories = int(len(list_directories))
-
-    ## ======================= End of the Directory case =================== ##
-    total_size_source = float(total_size_source / 1000000)  # human readable size source
-
-    logger.info("=== Directory Scanner output ===")
-    message = 'Total size of the source directory is:' + str(total_size_source) + 'Gb.'
-    logger.info(message)
-    message = "Total number of the files in the source directory is: " + str(total_num_files)
-    logger.info(message)
-    message = "Total number of the directories  in the source directory is: " + str(total_num_directories)
-    logger.info(message)
-
-    # Unifying the naming of this section for both cases : Sub - Directory or File
-    # dir_detail_list == > Including the name of the directories, size and number of teh files in each directory / for files is empty
-    # list_items_to_process    === > List of items to process  (Sub-Directories / Files)
-    # total_size_source  === > Total size of the items to process
-    # total_num_files    === > for Sub - Directories : sum of all files in different directories / for Files is sum of all
-    # total_num_directories  === > for Files = 0
-
-    return dir_detail_list, list_items_to_process, total_size_source, total_num_files, total_num_directories
-
-# Source - Directoy
-# Destination Rirectory
-# Dir_detail_list
-# list_items_to_process
-# load level
-
-def data_structure_builder (source_dir, destination_dir, dir_detail_list, list_items_to_process,load_level):
-
-
-    if not os.path.exists(destination_dir):  # check if the Destination dir. is existing
-        os_command = ("mkdir " + destination_dir)
-        os.system(os_command)
-        logger.info('destination path is created')
-    else:
-        logger.info('The destination path exists')
-
-
-    os.chdir(destination_dir) # chnage the directory to the destination
-
-    if load_level == 0:
-        logging.info('Load Level = 0 : Data Sctructure will be build')
-
-        for dir_name in list_items_to_process:
-            #print(dir_name)
-            dir_path = destination_dir + dir_name
-
-            # TODO : os.mkdir() it can be cleaned up to use the OS predifnie functions
-            if not os.path.exists(dir_path):
-                #print(dir_name  + " will be created ")
-                os_command = ("mkdir " + dir_name)
-                os.system(os_command)
-                logging.info(dir_name  + " is created ")
-
-
-    if load_level == 1:
-        logger.info('Load Level = 1 : File will be processed')
-
-    return
-
-
-
-def load_distributor(dir_detail_list, list_items_to_process, total_size_source, total_num_files, total_num_directories,load_level, processor_num):
-    firs_slave_processor_id = 1
-    # create a dictionary with p number of keys
-    # for each directory they add the name to one of the keys
-    # here we define the first availabe slave node as well
-    transfer_dict = dict.fromkeys(list(range(firs_slave_processor_id, processor_num)))
-    print(transfer_dict)
-    logger.info("The follwoing is in the load Balancer ")
-    logger.info(transfer_dict)
-    logger.info(list_items_to_process)
-    logger.info(total_num_directories)
-    logger.info(total_num_files)
-
-    # package_counter = 0 possibility to use the counter to fill
-    counter = firs_slave_processor_id  # this is the ID of the first available slave to p!
-
-    if load_level == 0:
-        for Directory_counter in range(0, total_num_directories):
-            if transfer_dict[counter] is None:  # if the value for the key is None add to it
-                transfer_dict[counter] = list_items_to_process[Directory_counter]
-            else:  # if key has a value join the new value to the old value
-                transfer_dict[counter] = "{};{}".format(transfer_dict[counter], list_items_to_process[Directory_counter])
-            counter = counter + 1
-            if counter == processor_num:
-                counter = firs_slave_processor_id
-
-    if load_level == 1:
-        for File_counter in range(0, total_num_files):
-            if transfer_dict[counter] is None:  # if the value for the key is None add to it
-                #print(" M1: New key made for a free processor number {my_rank}".format(my_rank = counter))
-                # statemnet if we have more than number of the files processor available
-                if counter > len(list_items_to_process) + (firs_slave_processor_id - 1 ):
-                    transfer_dict[counter] = None
-                else:
-                    transfer_dict[counter] = list_items_to_process[File_counter]
-
-
-
-            else:  # if key has a value join the new value to the old value
-                transfer_dict[counter] = "{};{}".format(transfer_dict[counter], list_items_to_process[File_counter])
-            counter = counter + 1
-            if counter == processor_num:
-                counter = firs_slave_processor_id
-
-    logging.info(transfer_dict)
-    return transfer_dict
-
-def sync_file(source_path, destination_dir, job_name, rsync_status):
-    rsync_msg = ("rsync -r " + source_path + job_name + "/" + " " + destination_dir + "/" + job_name)
-    # print('Node:', str(my_rank),'will execute :', rsync_str,'\r\n')
-    # sync the assigned folder
-
-    if rsync_status == 1:
-        os.system(rsync_msg)
-
-    return
-
-
-
-def hash_directory(source_path,job_name,hash_rep_file,input_status):
-    #sha256_hash = hashlib.sha256()
-    md5_hash = hashlib.md5()
-
-    ########## Create a hashed file repasitory for direcotry(ies) assigned to node #######
-    hash_repo_text = input_status + "_"+job_name +"_hashed.txt"
-    os.chdir(hash_rep_file)
-    hashed_text_note=open(hash_repo_text,"w+")
-
-    # job_name is the name of the subdirectory that is going to be processed
-    directory_to_process = source_path  + job_name
-    # print(directory_to_process)
-    files_list = []
-    for dirpath, dirnames, filenames in os.walk(directory_to_process):
-        files_list.extend(filenames)
-
-    os.chdir(directory_to_process) # change to the working directory
-
-    for file_to_process in filenames:
-
-        ## ======= this is the sha256 checksum ========= #
-        #with open(file_to_process,"rb") as f:
-        #    # Read and update hash in chunks of 4K
-        #   for byte_block in iter(lambda: f.read(4096),b""):
-        #       sha256_hash.update(byte_block)
-        #       hashed_file = sha256_hash.hexdigest()
-
-        with open(file_to_process,"rb") as f:
-            # Read and update hash in chunks of 4K
-           for byte_block in iter(lambda: f.read(4096),b""):
-               md5_hash.update(byte_block)
-               hashed_file = md5_hash.hexdigest()
-
-        hashed_text_note.write(hashed_file)
-
-    return
-
-def md5(fname):
-    md5_hash = hashlib.md5()
-    with open(fname,"rb") as f:
-        # Read and update hash in chunks of 4K
-        for byte_block in iter(lambda: f.read(4096),b""):
-            md5_hash.update(byte_block)
-    return md5_hash.hexdigest()
diff --git a/workflow_parallel_frame_prediction/DataExtraction/main_single_master.py b/workflow_parallel_frame_prediction/DataExtraction/main_single_master.py
deleted file mode 100644
index fda72c671d2804e87b121a2bd62038890a7f5161..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataExtraction/main_single_master.py
+++ /dev/null
@@ -1,257 +0,0 @@
-from mpi4py import MPI
-from os import walk
-import sys
-import subprocess
-import logging
-import time
-import shutil
-import glob
-import argparse
-import os
-
-
-from helper_single_master import directory_scanner
-from helper_single_master import load_distributor
-from helper_single_master import hash_directory
-from helper_single_master import data_structure_builder
-from helper_single_master import md5
-
-from prepare_era5_data import prepare_era5_data_one_file
-
-# How to Run it!
-# mpirun -np 6 python mpi_stager_v2.py
-# mpiexec -np 6 python mpi_stager_v2.py
-
-
-def main():
-    parser=argparse.ArgumentParser()
-    parser.add_argument("--job_id",type=int,default=100)
-    parser.add_argument("--source_dir",type=str,default="//home/a.mozaffari/data_era5/2017/")
-    parser.add_argument("--destination_dir",type=str,default="/home/a.mozaffari/data_dest/")
-    parser.add_argument("--log_temp",type=str,default="log_temp")
-    parser.add_argument("--checksum_status",type=int,default = 0)
-    parser.add_argument("--rsync_status",type=int,default=0)
-    parser.add_argument("--load_level",type=int,default=0)
-    parser.add_argument("--clear_destination",type=int,default=1)
-    args = parser.parse_args()
-    # for the local machine test
-    current_path = os.getcwd()
-    job_id = args.job_id
-    source_dir = args.source_dir
-    destination_dir = args.destination_dir
-    checksum_status = args.checksum_status
-    rsync_status = args.rsync_status
-    clear_destination = args.clear_destination
-    log_temp = args.log_temp
-
-
-    # for the local machine test
-    current_path = os.path.dirname(os.path.abspath(__file__))
-    os.chdir(current_path)
-    time.sleep(0)
-
-# ini. MPI
-    comm = MPI.COMM_WORLD
-    my_rank = comm.Get_rank()  # rank of the node
-    p = comm.Get_size()  # number of assigned nods
-    firs_slave_processor_id = 1
-
-
-    # ==================================== Master Logging ==================================================== #
-    # DEBUG: Detailed information, typically of interest only when diagnosing problems.
-    # INFO: Confirmation that things are working as expected.
-    # WARNING: An indication that something unexpected happened, or indicative of some problem in the near
-    # ERROR: Due to a more serious problem, the software has not been able to perform some function.
-    # CRITICAL: A serious error, indicating that the program itself may be unable to continue running.
-    # It will copy the logging messages to the stdout, for the case of container version on HPC
-
-    if my_rank == 0:  # node is master
-
-    # delete the general logger if exist
-        logger_path = current_path + '/distribution_job_{job_id}.log'.format(job_id=job_id)
-        if os.path.isfile(logger_path):
-            print("Logger Exists -> Logger Deleted")
-            os.remove(logger_path)
-        logging.basicConfig(filename='distribution_job_{job_id}.log'.format(job_id=job_id), level=logging.DEBUG,
-                            format='%(asctime)s:%(levelname)s:%(message)s')
-        logger = logging.getLogger(__file__)
-        logger.addHandler(logging.StreamHandler(sys.stdout))
-        start = time.time()  # start of the MPI
-
-# check the existence of the  source path :
-    if not os.path.exists(source_dir):  # check if the source dir. is existing
-        if my_rank == 0:
-            logger.critical('The source does not exist')
-            message_out = "Source : {source} is not existing -> Abort".format(source=source_dir)
-            logger.info('exit status : 1')
-        sys.exit(1)
-
-# Check if the destination is existing, if so, it will delete and recreate the destination_dir
-    if os.path.exists(destination_dir):
-        if my_rank == 0:
-            logger.info('The destination exist')
-            if clear_destination == 1:
-                shutil.rmtree(destination_dir)
-                os.mkdir(destination_dir)
-                logger.critical("Destination : {destination} exist -> Remove and Re-Create".format(destination=destination_dir))
-                print("Destination : {destination} exist -> Remove and Re-Create".format(destination=destination_dir))
-
-            else:
-                logger.critical("Destination : {destination} exist -> will not be removed (caution : overwrite)".format(destination=destination_dir))
-                print("Destination : {destination} exist -> will not be rmeoved (caution : overwrite)".format(destination=destination_dir))
-                
-
-
-    # 20200630 +++ Scarlet
-    else: 
-        if my_rank == 0:
-            os.makedirs(destination_dir) #, exist_ok=True)
-            logger.info("Destination : {destination} does not exist -> Create".format(destination=destination_dir))
-            print("Destination : {destination} does not exist -> Create".format(destination=destination_dir))
-
-    # 20200630 --- Scarlet
-
-
-    # Create a log folder for slave-nodes to write down their processes
-    slave_log_path = os.path.join(destination_dir,log_temp)
-
-    if my_rank == 0:
-        if os.path.exists(slave_log_path) == False:
-            # 20200630 Scarlet
-            #os.mkdir(slave_log_path)
-            os.makedirs(slave_log_path)
-
-    if my_rank == 0:  # node is master
-
-    # ==================================== Master : Directory scanner {Parent level load level = 0}  ================================= #
-
-        logger.info("The source path is  : {path}".format(path=source_dir))
-        logger.info("The destination path is  : {path}".format(path=destination_dir))
-        logger.info("==== Directory scanner : start ====")
-        load_level = 0
-        ret_dir_scanner = directory_scanner(source_dir,load_level)
-    #print(ret_dir_scanner)
-
-    # Unifying the naming of this section for both cases : Sub - Directory or File
-    # dir_detail_list == > Including the name of the directories, size and number of teh files in each directory / for files is empty
-    # list_items_to_process    === > List of items to process  (Sub-Directories / Files)
-    # total_size_source  === > Total size of the items to process
-    # total_num_files    === > for Sub - Directories : sum of all files in different directories / for Files is sum of all
-    # total_num_directories  === > for Files = 0
-
-        dir_detail_list = ret_dir_scanner[0]
-        list_items_to_process = ret_dir_scanner[1]
-        total_size_source = ret_dir_scanner[2]
-        total_num_files = ret_dir_scanner[3]
-        total_num_dir = ret_dir_scanner[4]
-        logger.info("==== Directory scanner : end ====")
-
-    # ================================= Master : Data Structure Builder {Parent level load level = 0} ========================= #
-
-        logger.info("==== Data Structure Builder : start  ====")
-        data_structure_builder(source_dir, destination_dir, dir_detail_list, list_items_to_process,load_level)
-        logger.info("==== Data Structure Builder : end  ====")
-        # message to inform the slaves that they will recive #Batch of messages including the logger_p
-        batch_info = list_items_to_process
-        for slaves in range (1,p):
-            comm.send(batch_info, dest=slaves)
-
-        for batch_counter in range (0,len(batch_info)):
-            #relative_source =  source_dir + str(batch_info[batch_counter]) +"/"
-            relative_source = os.path.join(source_dir,str(batch_info[batch_counter]))
-            print(relative_source)
-            logger.info("MA{my_rank}: Next to be processed is {task} loacted in  {path} ".format(my_rank = my_rank,task=batch_info[batch_counter], path=relative_source))
-            load_level = 1 # it will process the files in the relative source
-
-        #________ Directory Scanner ______#
-            relative_ret_dir_scanner = directory_scanner(relative_source,load_level)
-            relative_dir_detail_list = relative_ret_dir_scanner[0]
-            relative_list_items_to_process = relative_ret_dir_scanner[1]
-            relative_total_size_source = relative_ret_dir_scanner[2]
-            relative_total_num_files = relative_ret_dir_scanner[3]
-            relative_total_num_dir = relative_ret_dir_scanner[4]
-        #________ Load Distribution ________#
-            relative_ret_load_balancer = load_distributor(relative_dir_detail_list, relative_list_items_to_process, relative_total_size_source, relative_total_num_files, relative_total_num_dir,load_level, p)
-            relative_transfer_dict = relative_ret_load_balancer
-            logger.info(relative_transfer_dict)
-
-        #________ Communication ________#
-
-            for processor in range(firs_slave_processor_id, p):
-                broadcast_list = relative_transfer_dict[processor]
-                comm.send(broadcast_list, dest=processor)
-
-        receive_counter = 0
-        total_number_messages = (p-1) * len(batch_info) - 1
-        while receive_counter <= total_number_messages:
-            message_in = comm.recv()
-            logger.info("MA{my_rank}: S{message_in} ".format(my_rank=my_rank,message_in=message_in))
-            receive_counter = receive_counter + 1
-
-
-        # Cleaning up the slaves temprory log file, if it is empty.
-        if len(os.listdir(slave_log_path) ) == 0:
-            print("Temprory log file is empty, it is deleted")
-            os.removedirs(slave_log_path)
-
-
-        end = time.time()
-        termination_message = "MA{my_rank}: Sucssfully terminated with total time : {wall_time}".format(my_rank=my_rank,wall_time= end-start)
-        logger.info(termination_message)
-        sys.exit(0)
-
-    else:  # Processor is slave
-
-    # ============================================= Slave : Send / Receive ============================================ #
-    # recive the #Batch process that will be recived
-        batch_info = comm.recv(source = 0)
-        #print("S{my_rank} will receive {todo_message} batch of task to process".format(my_rank=my_rank, todo_message=len(batch_info)))
-        batch_counter = 0
-
-    # here will be a loop around all the #batchs
-
-        while batch_counter <= len(batch_info) -1:
-            message_in = comm.recv(source = 0)
-            relative_source_directory = os.path.join(source_dir,str(batch_info[batch_counter]))
-            relative_destination_directory = os.path.join(destination_dir,str(batch_info[batch_counter]))
-
-            if message_in is None:  # in case more than number of the dir. processor is assigned !
-                slave_out_message = "{my_rank} is idle".format(my_rank=my_rank)
-                # comm.send(message_out, dest=1)
-
-            else: # if the Slave node has joblist to do
-                job_list = message_in.split(';')
-                for job_count in range(0, len(job_list)):
-                    job = job_list[job_count] # job is the name of the directory(ies) assigned to slave_node
-                    #print(job)
-                    if rsync_status == 1:
-                        # prepare the rsync commoand to be excexuted by the worker node
-                        rsync_message = "rsync {relative_source_directory}/{job} {relative_destination_directory}/{job}".format(relative_source_directory=relative_source_directory,job=job, relative_destination_directory=relative_destination_directory)
-                        os.system(rsync_message)
-                        #slave_out_message= " RSYNC process"
-                    else :
-                        ## @Bing here is the job for the slaves
-                        print("S{my_rank} will execute era5 preperation on {job}".format(my_rank=my_rank, job=job))
-                        prepare_era5_data_one_file(src_file=job,directory_to_process=relative_source_directory, target=job, target_dir=relative_destination_directory)
-
-
-
-                        #if job.endswith(".nc"):
-                        #    if os.path.exists(os.path.join(relative_destination_directory, job)):
-                        #        print("{job} is has been processed in directory {directory}".format(job=job,directory=relative_destination_directory))
-                        #else:
-                        #    prepare_era5_data_one_file(src_file=job,directory_to_process=relative_source_directory, target=job, target_dir=relative_destination_directory)
-                        #    print("File {job} in directory {directory} has been processed in directory".format(job=job,directory=relative_destination_directory))
-                        #
-                        #slave_out_message = " {in_message} process".format(in_message=my_rank)
-                        # Generate a hash of the output
-
-            message_out = "{my_rank}: is finished the {in_message} .".format(my_rank=my_rank,in_message=batch_info[batch_counter])
-            comm.send(message_out, dest=0)
-            batch_counter = batch_counter + 1
-
-    MPI.Finalize()
-
-
-if __name__ == "__main__":
-    main()
diff --git a/workflow_parallel_frame_prediction/DataExtraction/readme.md b/workflow_parallel_frame_prediction/DataExtraction/readme.md
deleted file mode 100644
index 9e97dae81e2f5aa45e3cc676b2b90a1fa318145b..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataExtraction/readme.md
+++ /dev/null
@@ -1,2 +0,0 @@
-`source create_env_zam347.sh {MPI}` <br/>
-`mpirun -np {number of processors max 13 on zam347} python main_single_master.py`
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/DataExtraction/submitJob.sh b/workflow_parallel_frame_prediction/DataExtraction/submitJob.sh
deleted file mode 100755
index 2e55e377208a965d6f334a17441c573aa983fc8e..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataExtraction/submitJob.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash -x
-#SBATCH --account=deepacf
-#SBATCH --nodes=1
-#SBATCH --ntasks=1
-##SBATCH --ntasks-per-node=1
-#SBATCH --cpus-per-task=1
-#SBATCH --output=pystager-out.%j
-#SBATCH --error=pystager-err.%j
-#SBATCH --time=00:20:00
-#SBATCH --partition=devel
-#SBATCH --mail-type=ALL
-#SBATCH --mail-user=b.gong@fz-juelich.de
-##jutil env activate -p deepacf
-
-module --force purge 
-module /usr/local/software/jureca/OtherStages
-module load Stages/2019a
-module load Intel/2019.3.199-GCC-8.3.0  ParaStationMPI/5.2.2-1
-module load mpi4py/3.0.1-Python-3.6.8
-
-#srun python mpi_stager_v2.py --source_dir /p/fastdata/slmet/slmet111/met_data/ecmwf/era5/#nc/2017/  --destination_dir /p/scratch/deepacf/bing/extractedData
-srun python Extract_data_for_testing.py
diff --git a/workflow_parallel_frame_prediction/DataPostprocess/Stager_devel_N_24_evaluation.sh b/workflow_parallel_frame_prediction/DataPostprocess/Stager_devel_N_24_evaluation.sh
deleted file mode 100755
index 1aa0ae9aff1f939186c9499674385fcb4caceab6..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPostprocess/Stager_devel_N_24_evaluation.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-#SBATCH --account=jjsc42
-# budget account where contingent is taken from# TASKS = NODES * GPUS_PER_NODE
-#SBATCH --nodes=3
-#SBATCH --ntasks-per-node=4
-#SBATCH --ntasks=12
-# can be omitted if --nodes and --ntasks-per-node
-# are given
-# SBATCH --cpus-per-task=1
-# for OpenMP/hybrid jobs only
-#SBATCH --output=horovod-4ntasks%j.out
-# if keyword omitted: Default is slurm-%j.out in
-# the submission directory (%j is replaced by
-# the job ID).
-#SBATCH --error=horovod-4ntasks%j.err
-# if keyword omitted: Default is slurm-%j.out in
-# the submission directory.
-#SBATCH --time=20:00:00
-#SBATCH --gres=gpu:4
-#SBATCH --partition=gpus
-#SBATCH --mail-user=b.gong@fz-juelich.de
-#SBATCH --mail-type=ALL
-
-#create a folder to save the output
-
-module --force purge
-module --force  purge
-module load Stages/Devel-2019a
-module load GCC/8.3.0
-module load MVAPICH2/2.3.2-GDR
-#module /usr/local/software/jureca/OtherStages
-module load Stages/2019a
-module load GCCcore/.8.3.0
-module load cuDNN/7.5.1.10-CUDA-10.1.105
-module load Horovod/0.16.2-GPU-Python-3.6.8
-module load Keras/2.2.4-GPU-Python-3.6.8
-#module load Intel/2019.3.199-GCC-8.3.0  ParaStationMPI/5.2.2-1-mt
-#module load mpi4py/3.0.1-Python-3.6.8
-
-srun python3.6 kitti_evaluate_parallel.py
diff --git a/workflow_parallel_frame_prediction/DataPostprocess/data_utils.py b/workflow_parallel_frame_prediction/DataPostprocess/data_utils.py
deleted file mode 100755
index 8751fc42e36d74c81c328702db8569013ba51a69..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPostprocess/data_utils.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import hickle as hkl
-import numpy as np
-from keras import backend as K
-from keras.preprocessing.image import Iterator
-
-
-import inspect
-print(inspect.getmembers(hkl,predicate=inspect.ismethod))
-
-# Data generator that creates sequences for input into PredNet.
-class SequenceGenerator(Iterator):
-    def __init__(self, data_file, source_file, nt,
-                 batch_size=8, shuffle=False, seed=None,
-                 output_mode='error', sequence_start_mode='all', N_seq=None,
-                 data_format=K.image_data_format()):
-        self.X = hkl.load(data_file)  # X will be like (n_images, nb_cols, nb_rows, nb_channels)
-        self.sources = hkl.load(source_file) # source for each image so when creating sequences can assure that consecutive frames are from same video
-        self.nt = nt
-        self.batch_size = batch_size
-        self.data_format = data_format
-        assert sequence_start_mode in {'all', 'unique'}, 'sequence_start_mode must be in {all, unique}'
-        self.sequence_start_mode = sequence_start_mode
-        assert output_mode in {'error', 'prediction'}, 'output_mode must be in {error, prediction}'
-        self.output_mode = output_mode
-
-        if self.data_format == 'channels_first':
-            self.X = np.transpose(self.X, (0, 3, 1, 2))
-        self.im_shape = self.X[0].shape
-
-        if self.sequence_start_mode == 'all':  # allow for any possible sequence, starting from any frame
-            #bing
-            #self.possible_starts = np.array([i for i in range(self.X.shape[0] - self.nt) if self.sources[i] == self.sources[i + self.nt - 1]])
-            self.possible_starts = np.array([i for i in range(self.X.shape[0] - self.nt)])
-        elif self.sequence_start_mode == 'unique':  #create sequences where each unique frame is in at most one sequence
-            curr_location = 0
-            possible_starts = []
-            while curr_location < self.X.shape[0] - self.nt + 1:
-                if self.sources[curr_location] == self.sources[curr_location + self.nt - 1]:
-                    possible_starts.append(curr_location)
-                    curr_location += self.nt
-                else:
-                    curr_location += 1
-            self.possible_starts = possible_starts
-
-        if shuffle:
-            self.possible_starts = np.random.permutation(self.possible_starts)
-        if N_seq is not None and len(self.possible_starts) > N_seq:  # select a subset of sequences if want to
-            self.possible_starts = self.possible_starts[:N_seq]
-        self.N_sequences = len(self.possible_starts)
-        print("N_sequences", self.N_sequences)
-        super(SequenceGenerator, self).__init__(len(self.possible_starts), batch_size, shuffle, seed)
-
-    def __getitem__(self, null):
-        return self.next()
-
-    def next(self):
-        with self.lock:
-            current_index = (self.batch_index * self.batch_size) % self.n
-            index_array, current_batch_size = next(self.index_generator), self.batch_size
-        batch_x = np.zeros((current_batch_size, self.nt) + self.im_shape, np.float32)
-        for i, idx in enumerate(index_array):
-            idx = self.possible_starts[idx]
-            batch_x[i] = self.preprocess(self.X[idx:idx+self.nt])
-        if self.output_mode == 'error':  # model outputs errors, so y should be zeros
-            batch_y = np.zeros(current_batch_size, np.float32)
-        elif self.output_mode == 'prediction':  # output actual pixels
-            batch_y = batch_x
-        return batch_x, batch_y
-
-    def preprocess(self, X):
-        ### Normalization after extrema cut off: ###
-        #cut maxs & mins to mean+3*std & mean-3*std of training set for each parameter
-        #x_cut = np.zeros(shape=X.shape)
-        #x_cut = X*1 #pass X by value and not by reference
-        #x_cut[:,:,:,0][X[:,:,:,0]>311.5]=311.5 #set T2 upper limit
-        #x_cut[:,:,:,0][X[:,:,:,0]<258.9]=258.9 #set T2 lower limit
-        #x_cut[:,:,:,1][X[:,:,:,1]>104635.2]=104635.2 #set GP upper limit
-        #x_cut[:,:,:,1][X[:,:,:,1]<98205.6]=98205.6 #set GP lower limit ###Caution: Drastical cut ###
-        #x_cut[:,:,:,2][X[:,:,:,2]>6209.5]=6209.5 #set GPH upper limit ###Caution: Unnecessary as it succeeds max GPH ###
-        #x_cut[:,:,:,2][X[:,:,:,2]<5005.8]=5005.8 #set GPH lower limit
-        #normalize X based on max and min values(equals upper and lower limits except highCutGPH)
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (x_cut[:,:,:,0]-258.9)/(311.5-258.9)
-        #x_processed[:,:,:,1] = (x_cut[:,:,:,1]-98205.6)/(104635.2-98205.6)
-        #x_processed[:,:,:,2] = (x_cut[:,:,:,2]-5005.8)/(6007.097417091836-5005.8) #GPH max stays; see above
-        
-        ### 'Standard' normalization: (x-min(x))/(max(x)-min(x)) ###
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-93401.125)/(105391.4375-93401.125)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-4836.070232780612)/(6007.097417091836-4836.070232780612)
-
-        ### t2only 'Standard' normalization: (x-min(x))/(max(x)-min(x)) ###
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-235.2141571044922)/(321.46630859375-235.2141571044922)
-
-        ### t2_2MSL_1 'standard' normalization:
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-93401.125)/(105391.4375-93401.125)
-
-        ### t2_1MSL_2 'standard' normalization:
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-93401.125)/(105391.4375-93401.125)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-93401.125)/(105391.4375-93401.125)
-
-        ### t2_2gph500_1 'standard' normalization:
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-4836.070232780612)/(6007.097417091836-4836.070232780612)
-        ## t2_1gph500_2 'standard' normalization:   
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-4836.070232780612)/(6007.097417091836-4836.070232780612)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-4836.070232780612)/(6007.097417091836-4836.070232780612)
-
-        ### No standardization for moving Objects test set: Just 0s and 1s
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed = X
-
-        ### t2_1 'standard' normalization (got one dimension less, due to just one channel)
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = X[:,:,:,1]
-        #x_processed[:,:,:,2] = X[:,:,:,2]
-
-        ### t2_1 'standard' normalization (got one dimension less, due to just one channel)
-        x_processed = np.zeros(shape=X.shape)
-        x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        x_processed[:,:,:,1] = (X[:,:,:,1]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        x_processed[:,:,:,2] = X[:,:,:,2]
-
-        ### Standardization: (x-mean)/standard_deviation ###
-        #Doesn't work due to some activation functions
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-285.1751264870658)/8.770013367617763
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-101420.4382666807)/1071.5999818175521
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-5607.662795353248)/200.62593105865764
-
-        ### Standardization+Normalization ###
-        # standardize:(x-mean)/standard_deviation
-        #x_preprocessed = np.zeros(shape=X.shape)
-        #x_preprocessed[:,:,:,0] = (X[:,:,:,0]-285.1751264870658)/8.770013367617763
-        #x_preprocessed[:,:,:,1] = (X[:,:,:,1]-101420.4382666807)/1071.5999818175521
-        #x_preprocessed[:,:,:,2] = (X[:,:,:,2]-5607.662795353248)/200.62593105865764
-        # normalize:(x-min(x))/(max(x)-min(x))
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (x_preprocessed[:,:,:,0]-np.amin(x_preprocessed[:,:,:,0]))/(np.amax(x_preprocessed[:,:,:,0])-np.amin(x_preprocessed[:,:,:,0]))
-        #x_processed[:,:,:,1] = (x_preprocessed[:,:,:,1]-np.amin(x_preprocessed[:,:,:,1]))/(np.amax(x_preprocessed[:,:,:,1])-np.amin(x_preprocessed[:,:,:,1]))
-        #x_processed[:,:,:,2] = (x_preprocessed[:,:,:,2]-np.amin(x_preprocessed[:,:,:,2]))/(np.amax(x_preprocessed[:,:,:,2])-np.amin(x_preprocessed[:,:,:,2]))
-
-        return x_processed.astype(np.float32)
-        #return X.astype(np.float32) / 255
-
-    def create_all(self):
-        X_all = np.zeros((self.N_sequences, self.nt) + self.im_shape, np.float32)
-        for i, idx in enumerate(self.possible_starts):
-            X_all[i] = self.preprocess(self.X[idx:idx+self.nt])
-        return X_all
diff --git a/workflow_parallel_frame_prediction/DataPostprocess/keras_utils.py b/workflow_parallel_frame_prediction/DataPostprocess/keras_utils.py
deleted file mode 100755
index ededcc74fed982654d82cfb610b79224f1e08554..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPostprocess/keras_utils.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import os
-import numpy as np
-
-from keras import backend as K
-from keras.legacy.interfaces import generate_legacy_interface, recurrent_args_preprocessor
-from keras.models import model_from_json
-
-legacy_prednet_support = generate_legacy_interface(
-    allowed_positional_args=['stack_sizes', 'R_stack_sizes',
-                            'A_filt_sizes', 'Ahat_filt_sizes', 'R_filt_sizes'],
-    conversions=[('dim_ordering', 'data_format'),
-                 ('consume_less', 'implementation')],
-    value_conversions={'dim_ordering': {'tf': 'channels_last',
-                                        'th': 'channels_first',
-                                        'default': None},
-                        'consume_less': {'cpu': 0,
-                                        'mem': 1,
-                                        'gpu': 2}},
-    preprocessor=recurrent_args_preprocessor)
-
-# Convert old Keras (1.2) json models and weights to Keras 2.0
-def convert_model_to_keras2(old_json_file, old_weights_file, new_json_file, new_weights_file):
-    from prednet import PredNet
-    # If using tensorflow, it doesn't allow you to load the old weights.
-    if K.backend() != 'theano':
-        os.environ['KERAS_BACKEND'] = backend
-        reload(K)
-
-    f = open(old_json_file, 'r')
-    json_string = f.read()
-    f.close()
-    model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})
-    model.load_weights(old_weights_file)
-
-    weights = model.layers[1].get_weights()
-    if weights[0].shape[0] == model.layers[1].stack_sizes[1]:
-        for i, w in enumerate(weights):
-            if w.ndim == 4:
-                weights[i] = np.transpose(w, (2, 3, 1, 0))
-        model.set_weights(weights)
-
-    model.save_weights(new_weights_file)
-    json_string = model.to_json()
-    with open(new_json_file, "w") as f:
-        f.write(json_string)
-
-
-if __name__ == '__main__':
-    old_dir = './model_data/'
-    new_dir = './model_data_keras2/'
-    if not os.path.exists(new_dir):
-        os.mkdir(new_dir)
-    for w_tag in ['', '-Lall', '-extrapfinetuned']:
-        m_tag = '' if w_tag == '-Lall' else w_tag
-        convert_model_to_keras2(old_dir + 'prednet_kitti_model' + m_tag + '.json',
-                                old_dir + 'prednet_kitti_weights' + w_tag + '.hdf5',
-                                new_dir + 'prednet_kitti_model' + m_tag + '.json',
-                                new_dir + 'prednet_kitti_weights' + w_tag + '.hdf5')
diff --git a/workflow_parallel_frame_prediction/DataPostprocess/kitti_evaluate_parallel.py b/workflow_parallel_frame_prediction/DataPostprocess/kitti_evaluate_parallel.py
deleted file mode 100755
index 1c1166eebfc09f947a0bf899b9d751a1ae061e67..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPostprocess/kitti_evaluate_parallel.py
+++ /dev/null
@@ -1,159 +0,0 @@
-'''
-Evaluate trained PredNet on KITTI sequences.
-Calculates mean-squared error and plots predictions.
-'''
-import math
-import os
-import numpy as np
-#from six.moves import cPickle
-import matplotlib
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-import matplotlib.gridspec as gridspec
-from keras import backend as K
-from keras.models import Model, model_from_json
-from keras.layers import Input, Dense, Flatten
-from prednet import PredNet
-from data_utils import SequenceGenerator
-from kitti_settings import *
-##Just for checking how the shape is after generator.create_all() from Sequence Generator
-#import hickle as hkl
-import horovod.keras as hvd
-import tensorflow as tf
-
-#Horovod:initialize horovod
-hvd.init()
-#Horovod: pin GPU to be used for process local rank (one GPU per process)
-config = tf.ConfigProto()
-config.gpu_options.allow_growth = True
-config.gpu_options.visible_device_list = str(hvd.local_rank())
-K.set_session(tf.Session(config=config))
-
-n_plot = 10
-batch_size = 5
-nt = 10
-
-##Configure the horovod
-
-#weights_file = os.path.join(WEIGHTS_DIR, 'tensorflow_weights/prednet_kitti_weights.hdf5')
-weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5')
-print("weights loaded")
-json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')
-print("model loaded")
-test_file = os.path.join(DATA_DIR, 'X_train.hkl')
-print("test file loaded")
-test_sources = os.path.join(DATA_DIR, 'sources_test.hkl')
-print("test source loaded")
-
-# Load trained model
-f = open(json_file, 'r')
-json_string = f.read()
-f.close()
-train_model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})
-train_model.load_weights(weights_file)
-print("Trained model is loaded")
-
-# Create testing model (to output predictions)
-layer_config = train_model.layers[1].get_config()
-layer_config['output_mode'] = 'prediction'
-data_format = layer_config['data_format'] if 'data_format' in layer_config else layer_config['dim_ordering']
-test_prednet = PredNet(weights=train_model.layers[1].get_weights(), **layer_config)
-input_shape = list(train_model.layers[0].batch_input_shape[1:])
-input_shape[0] = nt
-inputs = Input(shape=tuple(input_shape))
-predictions = test_prednet(inputs)
-test_model = Model(inputs=inputs, outputs=predictions)
-
-#Bing: change the unique to all,need to ask severin why training data use all , but for testing use unique
-test_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='all', data_format=data_format)
-print ("test_generator finished")
-X_test = test_generator.create_all()
-print(X_test.shape)
-#Bing, replace this part with horovod because of the memeory issue
-X_test_batch_size = round(X_test.shape[0]/hvd.size())
-
-def post_process(rank = 0):
-    print ("Rank {}".format(rank))
-    X_test_batch = X_test[rank * X_test_batch_size:(rank + 1) * X_test_batch_size]
-    print ("X_test_batch size",X_test_batch.shape)
-    X_hat = test_model.predict(X_test_batch, batch_size)
-
-    if data_format == 'channels_first':
-        X_test_batch = np.transpose(X_test_batch, (0, 1, 3, 4, 2))
-        X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))
-
-    ##Just for checking how the shape is after generator.create_all() from Sequence Generator
-    #hkl.dump(X_test, os.path.join(RESULTS_SAVE_DIR, 'X_AfterGeneratorStandardized.hkl'))
-    #hkl.dump(X_hat, os.path.join(RESULTS_SAVE_DIR, 'X_hatStandardized.hkl'))
-    #
-
-    # Compare MSE of PredNet predictions vs. using last frame.  Write results to prediction_scores.txt
-    # Furthermore, calculate Model MSE from the last prediction of the sequence only
-    # as the model improves after several frames (mse_model_last)
-    # Typical shape of X_test and X_hat: (263, 10, 128, 160, 3)
-    # where 263 are the sequences, 10 ist the amount of frames in one sequence,
-    # 128 & 160 are the image sice and 3 the number of layers.
-    # For our case only take layer 0 (= T2) into account.
-    shapeXhat = str(X_hat.shape) #Just have a look at the shapes to be sure we are calculating the right MSE
-    shapeXtest = str(X_test_batch.shape)
-    mse_model = np.mean((X_test_batch[:, 1:,:,:,0] - X_hat[:, 1:,:,:,0])**2)  # look at all timesteps except the first
-    mse_model_last = np.mean((X_test_batch[:, 9,:,:,0] - X_hat[:, 9,:,:,0])**2 )
-    mse_prev = np.mean((X_test_batch[:, :-1,:,:,0] - X_test_batch[:, 1:,:,:,0])**2)
-
-    # Calculate PSNR
-    # Function to calculate PSNR
-    # In the absence of noise, the two images I and K are identical, and thus the MSE is zero. In this case the PSNR is infinite.
-    # Or here the best value: 100
-    def psnr(img1, img2):
-        mse = np.mean((img1-img2) ** 2)
-        if mse == 0: return 100
-        PIXEL_MAX = 1
-        return 20 * math.log10(PIXEL_MAX/math.sqrt(mse))
-
-    psnr_model = psnr(X_test_batch[:, 1:,:,:,0], X_hat[:, 1:,:,:,0])
-    psnr_model_last = psnr(X_test_batch[:, 9,:,:,0], X_hat[:, 9,:,:,0])
-    psnr_prev = psnr(X_test_batch[:, :-1,:,:,0], X_test_batch[:, 1:,:,:,0])
-    print("Evaluations are completed")
-
-    if not os.path.exists(RESULTS_SAVE_DIR): os.mkdir(RESULTS_SAVE_DIR)
-    f = open(RESULTS_SAVE_DIR + '/prediction_scores_rank_{}.txt'.format(rank), 'w')
-
-    f.write("X_test_batch_size:{} ; Rank: {}\n".format(X_test_batch_size,hvd.rank()))
-    f.write("Model MSE: %f\n" % mse_model)
-    f.write("Model MSE from only last prediction in sequence: %f\n" % mse_model_last)
-    f.write("Previous Frame MSE: %f\n" % mse_prev)
-    f.write("Model PSNR: %f\n" % psnr_model)
-    f.write("Model PSNR from only last prediction in sequence: %f\n" % psnr_model_last)
-    f.write("Previous frame PSNR: %f\n" % psnr_prev)
-    f.write("Shape of X_test: " + shapeXtest)
-    f.write("")
-    f.write("Shape of X_hat: " + shapeXhat)
-    f.close()
-    print("Results are saved to {}\n".format(RESULTS_SAVE_DIR + "/prediction_scores_rank_{}.txt".format(rank)))
-
-    # Plot some predictions
-    aspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]
-    plt.figure(figsize = (nt, 2*aspect_ratio))
-    gs = gridspec.GridSpec(2, nt)
-    gs.update(wspace=0., hspace=0.)
-    plot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots_rank_{}/'.format(rank))
-    if not os.path.exists(plot_save_dir): os.mkdir(plot_save_dir)
-    plot_idx = np.random.permutation(X_test_batch.shape[0])[:n_plot]
-    for i in plot_idx:
-        for t in range(nt):
-            plt.subplot(gs[t])
-            plt.imshow(X_test_batch[i,t,:,:,0], interpolation='none') #the last index sets the channel. 0 = t2
-            #plt.pcolormesh(X_test[i,t,::-1,:,0], shading='bottom', cmap=plt.cm.jet)
-            plt.tick_params(axis='both', which='both', bottom=False, top=False, left=False, right=False, labelbottom=False, labelleft=False)
-            if t==0: plt.ylabel('Actual', fontsize=10)
-
-            plt.subplot(gs[t + nt])
-            plt.imshow(X_hat[i,t,:,:,0], interpolation='none')
-            #plt.pcolormesh(X_hat[i,t,::-1,:,0], shading='bottom', cmap=plt.cm.jet)
-            plt.tick_params(axis='both', which='both', bottom=False, top=False, left=False, right=False, labelbottom=False, labelleft=False)
-            if t==0: plt.ylabel('Predicted', fontsize=10)
-        plt.savefig(plot_save_dir +  'plot_' + str(i) + '.png')
-        plt.clf()
-
-
-post_process(rank = hvd.rank())
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/DataPostprocess/kitti_settings.py b/workflow_parallel_frame_prediction/DataPostprocess/kitti_settings.py
deleted file mode 100755
index 489989eecaa4a65abb6614d76402a3f153cca850..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPostprocess/kitti_settings.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Where KITTI data will be saved if you run process_kitti.py
-# If you directly download the processed data, change to the path of the data.
-## Changed logic: Now this is the path where the processed data lies: X_train,val,test
-#DATA_DIR = './kitti_data/'
-#data directory for training data 2015 and 2016
-#DATA_DIR = '/p/project/cjjsc42/severin/try3'
-#data directory for moving objects:
-#DATA_DIR = '/p/home/jusers/hussmann1/jureca/movingObjects/se_nw'
-#data directory for featuretesting:
-##DATA_DIR = './testTry2'
-DATA_DIR = '/p/scratch/cjjsc42/bing/pystager-development/processData/splits'
-# Where model weights and config will be saved if you run kitti_train.py
-# If you directly download the trained weights, change to appropriate path.
-WEIGHTS_DIR = '/p/project/cjjsc42/bing/ml-severin/model_data_keras2'
-#WEIGHTS_DIR = '/p/project/cjjsc42/bing/ml-severin/model_data_keras2'
-
-# Where results (prediction plots and evaluation file) will be saved.
-RESULTS_SAVE_DIR = '/p/project/cjjsc42/bing/ml-severin/kitti_results'
-
diff --git a/workflow_parallel_frame_prediction/DataPostprocess/packageInstallation.sh b/workflow_parallel_frame_prediction/DataPostprocess/packageInstallation.sh
deleted file mode 100644
index 1039e5fd19fef89064f0d11ec6898117a0c6908d..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPostprocess/packageInstallation.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-module --force purge
-module /usr/local/software/jureca/OtherStages
-module load Stages/2019a
-module load Intel/2019.3.199-GCC-8.3.0  ParaStationMPI/5.2.2-1-mt
-module load mpi4py/3.0.1-Python-3.6.8
-pip3 install --user netCDF4
-pip3 install --user numpy
-
-
diff --git a/workflow_parallel_frame_prediction/DataPostprocess/parameters_kitti_evaluate.dat b/workflow_parallel_frame_prediction/DataPostprocess/parameters_kitti_evaluate.dat
deleted file mode 100755
index ac64b7046c7aa93f69d622d2b055e4fa45d53f75..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPostprocess/parameters_kitti_evaluate.dat
+++ /dev/null
@@ -1,14 +0,0 @@
-import os
-
-# ============ input parameters =================== #
-# 0:deactivate 1: active
-
-Pleas fill the following parameters list for PyStager
-Source_Directory =  /p/fastdata/slmet/slmet111/met_data/ecmwf/era5/nc/2017/
-Destination_Directory = /p/scratch/cjjsc42/bing/pystager-development/tryData/
-Log_Directory = /p/project/cjjsc42/bing/pystager-development/log
-Rsync_Status = 1
-Checksum_Status = 0
-
-
-
diff --git a/workflow_parallel_frame_prediction/DataPostprocess/prednet.py b/workflow_parallel_frame_prediction/DataPostprocess/prednet.py
deleted file mode 100755
index b5a0208ae137666c9bc284b21d6affe04d721053..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPostprocess/prednet.py
+++ /dev/null
@@ -1,311 +0,0 @@
-import numpy as np
-
-from keras import backend as K
-from keras import activations
-from keras.layers import Recurrent
-from keras.layers import Conv2D, UpSampling2D, MaxPooling2D
-from keras.engine import InputSpec
-from keras_utils import legacy_prednet_support
-
-class PredNet(Recurrent):
-    '''PredNet architecture - Lotter 2016.
-        Stacked convolutional LSTM inspired by predictive coding principles.
-
-    # Arguments
-        stack_sizes: number of channels in targets (A) and predictions (Ahat) in each layer of the architecture.
-            Length is the number of layers in the architecture.
-            First element is the number of channels in the input.
-            Ex. (3, 16, 32) would correspond to a 3 layer architecture that takes in RGB images and has 16 and 32
-                channels in the second and third layers, respectively.
-        R_stack_sizes: number of channels in the representation (R) modules.
-            Length must equal length of stack_sizes, but the number of channels per layer can be different.
-        A_filt_sizes: filter sizes for the target (A) modules.
-            Has length of 1 - len(stack_sizes).
-            Ex. (3, 3) would mean that targets for layers 2 and 3 are computed by a 3x3 convolution of the errors (E)
-                from the layer below (followed by max-pooling)
-        Ahat_filt_sizes: filter sizes for the prediction (Ahat) modules.
-            Has length equal to length of stack_sizes.
-            Ex. (3, 3, 3) would mean that the predictions for each layer are computed by a 3x3 convolution of the
-                representation (R) modules at each layer.
-        R_filt_sizes: filter sizes for the representation (R) modules.
-            Has length equal to length of stack_sizes.
-            Corresponds to the filter sizes for all convolutions in the LSTM.
-        pixel_max: the maximum pixel value.
-            Used to clip the pixel-layer prediction.
-        error_activation: activation function for the error (E) units.
-        A_activation: activation function for the target (A) and prediction (A_hat) units.
-        LSTM_activation: activation function for the cell and hidden states of the LSTM.
-        LSTM_inner_activation: activation function for the gates in the LSTM.
-        output_mode: either 'error', 'prediction', 'all' or layer specification (ex. R2, see below).
-            Controls what is outputted by the PredNet.
-            If 'error', the mean response of the error (E) units of each layer will be outputted.
-                That is, the output shape will be (batch_size, nb_layers).
-            If 'prediction', the frame prediction will be outputted.
-            If 'all', the output will be the frame prediction concatenated with the mean layer errors.
-                The frame prediction is flattened before concatenation.
-                Nomenclature of 'all' is kept for backwards compatibility, but should not be confused with returning all of the layers of the model
-            For returning the features of a particular layer, output_mode should be of the form unit_type + layer_number.
-                For instance, to return the features of the LSTM "representational" units in the lowest layer, output_mode should be specificied as 'R0'.
-                The possible unit types are 'R', 'Ahat', 'A', and 'E' corresponding to the 'representation', 'prediction', 'target', and 'error' units respectively.
-        extrap_start_time: time step for which model will start extrapolating.
-            Starting at this time step, the prediction from the previous time step will be treated as the "actual"
-        data_format: 'channels_first' or 'channels_last'.
-            It defaults to the `image_data_format` value found in your
-            Keras config file at `~/.keras/keras.json`.
-
-    # References
-        - [Deep predictive coding networks for video prediction and unsupervised learning](https://arxiv.org/abs/1605.08104)
-        - [Long short-term memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
-        - [Convolutional LSTM network: a machine learning approach for precipitation nowcasting](http://arxiv.org/abs/1506.04214)
-        - [Predictive coding in the visual cortex: a functional interpretation of some extra-classical receptive-field effects](http://www.nature.com/neuro/journal/v2/n1/pdf/nn0199_79.pdf)
-    '''
-    @legacy_prednet_support
-    def __init__(self, stack_sizes, R_stack_sizes,
-                 A_filt_sizes, Ahat_filt_sizes, R_filt_sizes,
-                 pixel_max=1., error_activation='relu', A_activation='relu',
-                 LSTM_activation='tanh', LSTM_inner_activation='hard_sigmoid',
-                 output_mode='error', extrap_start_time=None,
-                 data_format=K.image_data_format(), **kwargs):
-        self.stack_sizes = stack_sizes
-        self.nb_layers = len(stack_sizes)
-        assert len(R_stack_sizes) == self.nb_layers, 'len(R_stack_sizes) must equal len(stack_sizes)'
-        self.R_stack_sizes = R_stack_sizes
-        assert len(A_filt_sizes) == (self.nb_layers - 1), 'len(A_filt_sizes) must equal len(stack_sizes) - 1'
-        self.A_filt_sizes = A_filt_sizes
-        assert len(Ahat_filt_sizes) == self.nb_layers, 'len(Ahat_filt_sizes) must equal len(stack_sizes)'
-        self.Ahat_filt_sizes = Ahat_filt_sizes
-        assert len(R_filt_sizes) == (self.nb_layers), 'len(R_filt_sizes) must equal len(stack_sizes)'
-        self.R_filt_sizes = R_filt_sizes
-
-        self.pixel_max = pixel_max
-        self.error_activation = activations.get(error_activation)
-        self.A_activation = activations.get(A_activation)
-        self.LSTM_activation = activations.get(LSTM_activation)
-        self.LSTM_inner_activation = activations.get(LSTM_inner_activation)
-
-        default_output_modes = ['prediction', 'error', 'all']
-        layer_output_modes = [layer + str(n) for n in range(self.nb_layers) for layer in ['R', 'E', 'A', 'Ahat']]
-        assert output_mode in default_output_modes + layer_output_modes, 'Invalid output_mode: ' + str(output_mode)
-        self.output_mode = output_mode
-        if self.output_mode in layer_output_modes:
-            self.output_layer_type = self.output_mode[:-1]
-            self.output_layer_num = int(self.output_mode[-1])
-        else:
-            self.output_layer_type = None
-            self.output_layer_num = None
-        self.extrap_start_time = extrap_start_time
-
-        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
-        self.data_format = data_format
-        self.channel_axis = -3 if data_format == 'channels_first' else -1
-        self.row_axis = -2 if data_format == 'channels_first' else -3
-        self.column_axis = -1 if data_format == 'channels_first' else -2
-        super(PredNet, self).__init__(**kwargs)
-        self.input_spec = [InputSpec(ndim=5)]
-
-    def compute_output_shape(self, input_shape):
-        if self.output_mode == 'prediction':
-            out_shape = input_shape[2:]
-        elif self.output_mode == 'error':
-            out_shape = (self.nb_layers,)
-        elif self.output_mode == 'all':
-            out_shape = (np.prod(input_shape[2:]) + self.nb_layers,)
-        else:
-            stack_str = 'R_stack_sizes' if self.output_layer_type == 'R' else 'stack_sizes'
-            stack_mult = 2 if self.output_layer_type == 'E' else 1
-            out_stack_size = stack_mult * getattr(self, stack_str)[self.output_layer_num]
-            out_nb_row = input_shape[self.row_axis] / 2**self.output_layer_num
-            out_nb_col = input_shape[self.column_axis] / 2**self.output_layer_num
-            if self.data_format == 'channels_first':
-                out_shape = (out_stack_size, out_nb_row, out_nb_col)
-            else:
-                out_shape = (out_nb_row, out_nb_col, out_stack_size)
-
-        if self.return_sequences:
-            return (input_shape[0], input_shape[1]) + out_shape
-        else:
-            return (input_shape[0],) + out_shape
-
-    def get_initial_state(self, x):
-        input_shape = self.input_spec[0].shape
-        init_nb_row = input_shape[self.row_axis]
-        init_nb_col = input_shape[self.column_axis]
-
-        base_initial_state = K.zeros_like(x)  # (samples, timesteps) + image_shape
-        non_channel_axis = -1 if self.data_format == 'channels_first' else -2
-        for _ in range(2):
-            base_initial_state = K.sum(base_initial_state, axis=non_channel_axis)
-        base_initial_state = K.sum(base_initial_state, axis=1)  # (samples, nb_channels)
-
-        initial_states = []
-        states_to_pass = ['r', 'c', 'e']
-        nlayers_to_pass = {u: self.nb_layers for u in states_to_pass}
-        if self.extrap_start_time is not None:
-           states_to_pass.append('ahat')  # pass prediction in states so can use as actual for t+1 when extrapolating
-           nlayers_to_pass['ahat'] = 1
-        for u in states_to_pass:
-            for l in range(nlayers_to_pass[u]):
-                ds_factor = 2 ** l
-                nb_row = init_nb_row // ds_factor
-                nb_col = init_nb_col // ds_factor
-                if u in ['r', 'c']:
-                    stack_size = self.R_stack_sizes[l]
-                elif u == 'e':
-                    stack_size = 2 * self.stack_sizes[l]
-                elif u == 'ahat':
-                    stack_size = self.stack_sizes[l]
-                output_size = stack_size * nb_row * nb_col  # flattened size
-
-                reducer = K.zeros((input_shape[self.channel_axis], output_size)) # (nb_channels, output_size)
-                initial_state = K.dot(base_initial_state, reducer) # (samples, output_size)
-                if self.data_format == 'channels_first':
-                    output_shp = (-1, stack_size, nb_row, nb_col)
-                else:
-                    output_shp = (-1, nb_row, nb_col, stack_size)
-                initial_state = K.reshape(initial_state, output_shp)
-                initial_states += [initial_state]
-
-        if K._BACKEND == 'theano':
-            from theano import tensor as T
-            # There is a known issue in the Theano scan op when dealing with inputs whose shape is 1 along a dimension.
-            # In our case, this is a problem when training on grayscale images, and the below line fixes it.
-            initial_states = [T.unbroadcast(init_state, 0, 1) for init_state in initial_states]
-
-        if self.extrap_start_time is not None:
-            initial_states += [K.variable(0, int if K.backend() != 'tensorflow' else 'int32')]  # the last state will correspond to the current timestep
-        return initial_states
-
-    def build(self, input_shape):
-        self.input_spec = [InputSpec(shape=input_shape)]
-        self.conv_layers = {c: [] for c in ['i', 'f', 'c', 'o', 'a', 'ahat']}
-
-        for l in range(self.nb_layers):
-            for c in ['i', 'f', 'c', 'o']:
-                act = self.LSTM_activation if c == 'c' else self.LSTM_inner_activation
-                self.conv_layers[c].append(Conv2D(self.R_stack_sizes[l], self.R_filt_sizes[l], padding='same', activation=act, data_format=self.data_format))
-
-            act = 'relu' if l == 0 else self.A_activation
-            self.conv_layers['ahat'].append(Conv2D(self.stack_sizes[l], self.Ahat_filt_sizes[l], padding='same', activation=act, data_format=self.data_format))
-
-            if l < self.nb_layers - 1:
-                self.conv_layers['a'].append(Conv2D(self.stack_sizes[l+1], self.A_filt_sizes[l], padding='same', activation=self.A_activation, data_format=self.data_format))
-
-        self.upsample = UpSampling2D(data_format=self.data_format)
-        self.pool = MaxPooling2D(data_format=self.data_format)
-
-        self.trainable_weights = []
-        nb_row, nb_col = (input_shape[-2], input_shape[-1]) if self.data_format == 'channels_first' else (input_shape[-3], input_shape[-2])
-        for c in sorted(self.conv_layers.keys()):
-            for l in range(len(self.conv_layers[c])):
-                ds_factor = 2 ** l
-                if c == 'ahat':
-                    nb_channels = self.R_stack_sizes[l]
-                elif c == 'a':
-                    nb_channels = 2 * self.stack_sizes[l]
-                else:
-                    nb_channels = self.stack_sizes[l] * 2 + self.R_stack_sizes[l]
-                    if l < self.nb_layers - 1:
-                        nb_channels += self.R_stack_sizes[l+1]
-                in_shape = (input_shape[0], nb_channels, nb_row // ds_factor, nb_col // ds_factor)
-                if self.data_format == 'channels_last': in_shape = (in_shape[0], in_shape[2], in_shape[3], in_shape[1])
-                with K.name_scope('layer_' + c + '_' + str(l)):
-                    self.conv_layers[c][l].build(in_shape)
-                self.trainable_weights += self.conv_layers[c][l].trainable_weights
-
-        self.states = [None] * self.nb_layers*3
-
-        if self.extrap_start_time is not None:
-            self.t_extrap = K.variable(self.extrap_start_time, int if K.backend() != 'tensorflow' else 'int32')
-            self.states += [None] * 2  # [previous frame prediction, timestep]
-
-    def step(self, a, states):
-        r_tm1 = states[:self.nb_layers]
-        c_tm1 = states[self.nb_layers:2*self.nb_layers]
-        e_tm1 = states[2*self.nb_layers:3*self.nb_layers]
-
-        if self.extrap_start_time is not None:
-            t = states[-1]
-            a = K.switch(t >= self.t_extrap, states[-2], a)  # if past self.extrap_start_time, the previous prediction will be treated as the actual
-
-        c = []
-        r = []
-        e = []
-
-        # Update R units starting from the top
-        for l in reversed(range(self.nb_layers)):
-            inputs = [r_tm1[l], e_tm1[l]]
-            if l < self.nb_layers - 1:
-                inputs.append(r_up)
-
-            inputs = K.concatenate(inputs, axis=self.channel_axis)
-            i = self.conv_layers['i'][l].call(inputs)
-            f = self.conv_layers['f'][l].call(inputs)
-            o = self.conv_layers['o'][l].call(inputs)
-            _c = f * c_tm1[l] + i * self.conv_layers['c'][l].call(inputs)
-            _r = o * self.LSTM_activation(_c)
-            c.insert(0, _c)
-            r.insert(0, _r)
-
-            if l > 0:
-                r_up = self.upsample.call(_r)
-
-        # Update feedforward path starting from the bottom
-        for l in range(self.nb_layers):
-            ahat = self.conv_layers['ahat'][l].call(r[l])
-            if l == 0:
-                ahat = K.minimum(ahat, self.pixel_max)
-                frame_prediction = ahat
-
-            # compute errors
-            e_up = self.error_activation(ahat - a)
-            e_down = self.error_activation(a - ahat)
-
-            e.append(K.concatenate((e_up, e_down), axis=self.channel_axis))
-
-            if self.output_layer_num == l:
-                if self.output_layer_type == 'A':
-                    output = a
-                elif self.output_layer_type == 'Ahat':
-                    output = ahat
-                elif self.output_layer_type == 'R':
-                    output = r[l]
-                elif self.output_layer_type == 'E':
-                    output = e[l]
-
-            if l < self.nb_layers - 1:
-                a = self.conv_layers['a'][l].call(e[l])
-                a = self.pool.call(a)  # target for next layer
-
-        if self.output_layer_type is None:
-            if self.output_mode == 'prediction':
-                output = frame_prediction
-            else:
-                for l in range(self.nb_layers):
-                    layer_error = K.mean(K.batch_flatten(e[l]), axis=-1, keepdims=True)
-                    all_error = layer_error if l == 0 else K.concatenate((all_error, layer_error), axis=-1)
-                if self.output_mode == 'error':
-                    output = all_error
-                else:
-                    output = K.concatenate((K.batch_flatten(frame_prediction), all_error), axis=-1)
-
-        states = r + c + e
-        if self.extrap_start_time is not None:
-            states += [frame_prediction, t + 1]
-        return output, states
-
-    def get_config(self):
-        config = {'stack_sizes': self.stack_sizes,
-                  'R_stack_sizes': self.R_stack_sizes,
-                  'A_filt_sizes': self.A_filt_sizes,
-                  'Ahat_filt_sizes': self.Ahat_filt_sizes,
-                  'R_filt_sizes': self.R_filt_sizes,
-                  'pixel_max': self.pixel_max,
-                  'error_activation': self.error_activation.__name__,
-                  'A_activation': self.A_activation.__name__,
-                  'LSTM_activation': self.LSTM_activation.__name__,
-                  'LSTM_inner_activation': self.LSTM_inner_activation.__name__,
-                  'data_format': self.data_format,
-                  'extrap_start_time': self.extrap_start_time,
-                  'output_mode': self.output_mode}
-        base_config = super(PredNet, self).get_config()
-        return dict(list(base_config.items()) + list(config.items()))
diff --git a/workflow_parallel_frame_prediction/DataPreprocess/Stager_devel_N_24_process_netCDF.sh b/workflow_parallel_frame_prediction/DataPreprocess/Stager_devel_N_24_process_netCDF.sh
deleted file mode 100755
index 983336105659267b5ed29215bd1f5c2ccc04e195..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPreprocess/Stager_devel_N_24_process_netCDF.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash -x
-#SBATCH --account=deepacf
-#SBATCH --nodes=1
-#SBATCH --ntasks=12
-##SBATCH --ntasks-per-node=12
-#SBATCH --cpus-per-task=1
-#SBATCH --output=process_netcdf-out.%j
-#SBATCH --error=process_netcdf-err.%j
-#SBATCH --time=00:20:00
-#SBATCH --partition=devel
-#SBATCH --mail-type=ALL
-#SBATCH --mail-user=b.gong@fz-juelich.de
-
-module --force purge
-module use $OTHERSTAGES
-module load Stages/2019a
-module load Intel/2019.3.199-GCC-8.3.0  ParaStationMPI/5.2.2-1
-module load h5py/2.9.0-Python-3.6.8
-module load mpi4py/3.0.1-Python-3.6.8
-
-srun python mpi_stager_v2_process_netCDF.py --source_dir /p/scratch/deepacf/video_prediction_shared_folder/extractedData/\
---destination_dir /p/scratch/deepacf/bing/processData_size_64_64_3_3t_norm_test2
diff --git a/workflow_parallel_frame_prediction/DataPreprocess/external_function.py b/workflow_parallel_frame_prediction/DataPreprocess/external_function.py
deleted file mode 100755
index c360c0e0e6289d46224369d2b1a0a8bda223cb1f..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPreprocess/external_function.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#from mpi4py import MPI
-from os import walk
-import os
-import sys
-import subprocess
-import logging
-import time
-import hashlib
- 
-# ======================= List of functions ====================================== #
- 
-# check the rank and print it
- 
-def logger(file_name, logger_level, program_name):
-    #  Log file starter
- 
-    logging.basicConfig(filename=file_name, level=logger_level,
-                        format='%(asctime)s:%(levelname)s:%(message)s')
-    logging.debug(' === PyStager is started === ')
-    print(str(program_name) + ' is Running .... ')
- 
- 
-def config_file(config_file_name):
-    params = {}
-    for line in open(config_file_name):
-        line = line.strip()
-        read_in_value = line.split("=")
-        if len(read_in_value) == 2:
-            params[read_in_value[0].strip()] = read_in_value[1].strip()
- 
-    source_dir = str(params["Source_Directory"])
-    print(source_dir)
-    destination_dir = str(params["Destination_Directory"])
-    log_dir = str(params["Log_Directory"])
-    rsync_status = int(params["Rsync_Status"])
-    return source_dir, destination_dir, log_dir, rsync_status
- 
- 
-def directory_scanner(source_path):
-    # Take a look inside a directories and make a list of ll the folders, sub directories, number of the files and size
-    # NOTE : It will neglect if there is a sub-directories inside directories!!!
- 
-    dir_detail_list = []  # directories details
-    sub_dir_list = []
-    total_size_source = 0
-    total_num_files = 0
-    list_directories = []
- 
-    list_directories = os.listdir(source_path)
-    print(list_directories)
-    print(int(len(list_directories)))
- 
-    for d in list_directories:
-        print(d)
-        path = source_path + d
-        print(path)
-        if os.path.isdir(path):
-            sub_dir_list.append(d)
-            sub_dir_list.sort()
-            num_files = 0
-            # size of the files and subdirectories
-            size_dir = subprocess.check_output(['du', '-sc', path])
-            splitted = size_dir.split()  # fist item is the size of the folder
-            size = (splitted[0])
-            num_files = len([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))])
-            dir_detail_list.extend([d, size, num_files])
-            total_num_files = total_num_files + int(num_files)
-            total_size_source = total_size_source + int(size)
-        else:
-            print(path, 'does not exist')
-    print("===== Debug here =====")  
- 
-    total_num_directories = int(len(list_directories))
-    total_size_source = float(total_size_source / 1000000)
- 
-    message = 'Total size of the source directory is:' + str(total_size_source) + 'Gb.'
-    print(message)
-    message = "Total number of the files in the source directory is: " + str(total_num_files)
-    print(message)
-    message = "Total number of the directories  in the source directory is: " + str(total_num_directories)
-    print(message)
- 
-    return dir_detail_list, sub_dir_list, total_size_source, total_num_files, total_num_directories
- 
- 
-def load_distributor(dir_detail_list, sub_dir_list, total_size_source, total_num_files, total_num_directories, p):
-    # create a dictionary with p number of keys
-    # for each directory they add the name to one of the keys
-    transfer_dict = dict.fromkeys(list(range(1, p)))
-    print(transfer_dict)
-    # package_counter = 0 possibility to use the counter to fill
-    counter = 1
-    for Directory_counter in range(0, total_num_directories):
- 
-        if transfer_dict[counter] is None:  # if the value for the key is None add to it
-            transfer_dict[counter] = sub_dir_list[Directory_counter]
-        else:  # if key has a value join the new value to the old value
-            transfer_dict[counter] = "{};{}".format(transfer_dict[counter], sub_dir_list[Directory_counter])
-        counter = counter + 1
-        if counter == p:
-            counter = 1
- 
-    return transfer_dict
- 
-def sync_file(source_path, destination_dir, job_name, rsync_status):
-    rsync_msg = ("rsync -r " + source_path + job_name + "/" + " " + destination_dir + "/" + job_name)
-    # print('Node:', str(my_rank),'will execute :', rsync_str,'\r\n')
-    # sync the assigned folder
- 
-    if rsync_status == 1:
-        os.system(rsync_msg)
- 
- 
- 
-def hash_directory(source_path,job_name,hash_rep_file,input_status):
-    #sha256_hash = hashlib.sha256()
-    md5_hash = hashlib.md5()
- 
-    ########## Create a hashed file repasitory for direcotry(ies) assigned to node #######
-    hash_repo_text = input_status + "_"+job_name +"_hashed.txt"
-    os.chdir(hash_rep_file)
-    hashed_text_note=open(hash_repo_text,"w+")
- 
-    # job_name is the name of the subdirectory that is going to be processed 
-    directory_to_process = source_path  + job_name
-    # print(directory_to_process)
-    files_list = []
-    for dirpath, dirnames, filenames in os.walk(directory_to_process):
-        files_list.extend(filenames)
-     
-    os.chdir(directory_to_process) # change to the working directory 
- 
-    for file_to_process in filenames:
-         
-        ## ======= this is the sha256 checksum ========= # 
-        #with open(file_to_process,"rb") as f:
-        #    # Read and update hash in chunks of 4K
-        #   for byte_block in iter(lambda: f.read(4096),b""):
-        #       sha256_hash.update(byte_block)
-        #       hashed_file = sha256_hash.hexdigest()
- 
-        with open(file_to_process,"rb") as f:
-            # Read and update hash in chunks of 4K
-           for byte_block in iter(lambda: f.read(4096),b""):
-               md5_hash.update(byte_block)
-               hashed_file = md5_hash.hexdigest()
- 
-        hashed_text_note.write(hashed_file)
- 
-    return
- 
-def md5(fname):
-    md5_hash = hashlib.md5()
-    with open(fname,"rb") as f:
-        # Read and update hash in chunks of 4K
-        for byte_block in iter(lambda: f.read(4096),b""):
-            md5_hash.update(byte_block)
-    return md5_hash.hexdigest()
diff --git a/workflow_parallel_frame_prediction/DataPreprocess/mpi_split_data_multi_years.py b/workflow_parallel_frame_prediction/DataPreprocess/mpi_split_data_multi_years.py
deleted file mode 100644
index 68d1ddfbfe413aab00950b71a336d0c1a43cbbf8..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/DataPreprocess/mpi_split_data_multi_years.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from mpi4py import MPI
-import argparse
-from process_netCDF_v2 import *
-from metadata import MetaData
-import json
-
-#add parser arguments
-parser = argparse.ArgumentParser()
-#parser.add_argument("--source_dir", type=str, default="/p/scratch/deepacf/bing/extractedData/")
-parser.add_argument("--destination_dir","-dest",dest="destination_dir",type=str, default="/p/scratch/deepacf/bing/processData_size_64_64_3_3t_norm")
-parser.add_argument("--varnames","-vars",dest="varnames", nargs = '+')
-#parser.add_argument("--partition","-part",dest="partition",type=json.loads)
-#                    help="--partition allows to control the splitting of the processed data in training, test and validation data. Pass a dictionary-like string.")
-
-args = parser.parse_args()
-# ML 2020/06/08: Dirty workaround as long as data-splitting is done with this seperate Python-script 
-#                called from the same parent Shell-/Batch-script as 'mpi_stager_v2_process_netCDF.py'
-target_dir = os.path.join(MetaData.get_destdir_jsontmp(),"hickle")
-varnames = args.varnames
-
-#partition = args.partition
-#all_keys  = partition.keys()
-#for key in all_keys:
-#    print(partition[key]) 
-
-cv ={}
-partition1 = {
-            "train":{
-                #"2222":[1,2,3,5,6,7,8,9,10,11,12],
-                #"2010_1":[1,2,3,4,5,6,7,8,9,10,11,12],
-                #"2012":[1,2,3,4,5,6,7,8,9,10,11,12],
-                #"2013_complete":[1,2,3,4,5,6,7,8,9,10,11,12],
-                #"2015":[1,2,3,4,5,6,7,8,9,10,11,12],
-                #"2017":[1,2,3,4,5,6,7,8,9,10,11,12]
-                "2015":[1,2,3,4,5,6,7,8,9,10,11,12]
-                 },
-            "val":
-                {"2016":[1,2,3,4,5,6,7,8,9,10,11,12]
-                 },
-            "test":
-                {"2017":[1,2,3,4,5,6,7,8,9,10,11,12]
-                 }
-            }
-
-
-
-
-
-#cv["1"] = partition1
-#cv2["2"] = partition2
-# ini. MPI
-comm = MPI.COMM_WORLD
-my_rank = comm.Get_rank()  # rank of the node
-p = comm.Get_size()  # number of assigned nods
-if my_rank == 0:  # node is master
-    split_data_multiple_years(target_dir=target_dir,partition=partition1,varnames=varnames)
-else:
-    pass
diff --git a/workflow_parallel_frame_prediction/README.md b/workflow_parallel_frame_prediction/README.md
deleted file mode 100755
index 434e591f8f1b94fda5b9df119226ca6d037f81dc..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# Workflow for Frame Prediction by Parallel Deep Learning
-
-
-
-## Workflow for parallel deep learning 
-
-This project implements a workflow for parallel deep learning to predict the 2m temperature based on Severin's master thesis [code link](https://github.com/severin1992/airtemprednet) [thesis link](https://b2drop.eudat.eu/s/RmTd8K3pLsDMFw6) . 
-
-
-The workflow consists of a sqeuence of steps (Data Extraction, Data Preprocessing, Training and Data Postprocess)to implement video prediction, and In each step try to Parallel for accelerating the whole prediction process.
-
-
-The wokflow have been tested on the supercomputers from JSC, [JURECA](https://www.fz-juelich.de/ias/jsc/EN/Expertise/Supercomputers/JURECA/JURECA_node.html) and [JUWELS](https://www.fz-juelich.de/ias/jsc/EN/Expertise/Supercomputers/JUWELS/JUWELS_node.html)
-
-
-## Requirement:
-* Keras
-* Horovod
-* Python3.6
-* mpi4py
-
-
-
-## Usage
-
-1. Clone or download this repository,
-2. Install the required modules/packages on JURECA/JUWELS.
-
-    ```shell
-    source packageInstallation.sh
-    ```
-    
-    Add package directory to 'PYTHONPATH'
-    ```shell
-    export PYTHONPATH=/p/home/jusers/USERNAME/jureca/.local/lib/python3.6/site-packages:$PYTHONPATH
-    ``` 
-
-
-    ```shell
-    source packageInstallation.sh
-    ```
-    Add the packages directory to the PYTHONPATH
-    ```shell
-    export PATHONPATH=/p/home/jusers/USERNAME/jureca/.local/lib/python3.6/site-packages:$PYTHONPATH
-    ```
-
-3. Configure your input directory, output and log directory in .dat file for each step.
-
-
-4. Run .sh file for submitting job
-
-
-## Workflow example
-
-![Compare all types of models in one leading day](Workflow.png?raw=true )
-
diff --git a/workflow_parallel_frame_prediction/Training/data_utils.py b/workflow_parallel_frame_prediction/Training/data_utils.py
deleted file mode 100755
index d048236ffb1b84addde4d28aca6165dea8815dae..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/data_utils.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import hickle as hkl
-import numpy as np
-from keras import backend as K
-from keras.preprocessing.image import Iterator
-
-
-import inspect
-print(inspect.getmembers(hkl,predicate=inspect.ismethod))
-
-# Data generator that creates sequences for input into PredNet.
-class SequenceGenerator(Iterator):
-    def __init__(self, data_file, source_file, nt,
-                 batch_size=8, shuffle=False, seed=None,
-                 output_mode='error', sequence_start_mode='all', N_seq=None,
-                 data_format=K.image_data_format()):
-        self.X = hkl.load(data_file)  # X will be like (n_images, nb_cols, nb_rows, nb_channels)
-        self.sources = hkl.load(source_file) # source for each image so when creating sequences can assure that consecutive frames are from same video
-        self.nt = nt
-        self.batch_size = batch_size
-        self.data_format = data_format
-        assert sequence_start_mode in {'all', 'unique'}, 'sequence_start_mode must be in {all, unique}'
-        self.sequence_start_mode = sequence_start_mode
-        assert output_mode in {'error', 'prediction'}, 'output_mode must be in {error, prediction}'
-        self.output_mode = output_mode
-
-        if self.data_format == 'channels_first':
-            self.X = np.transpose(self.X, (0, 3, 1, 2))
-        self.im_shape = self.X[0].shape
-        if self.sequence_start_mode == 'all':  # allow for any possible sequence, starting from any frame
-            #bing
-            #self.possible_starts = np.array([i for i in range(self.X.shape[0] - self.nt) if self.sources[i] == self.sources[i + self.nt - 1]])
-            self.possible_starts = np.array([i for i in range(self.X.shape[0] - self.nt)])
-        elif self.sequence_start_mode == 'unique':  #create sequences where each unique frame is in at most one sequence
-            curr_location = 0
-            possible_starts = []
-            while curr_location < self.X.shape[0] - self.nt + 1:
-                if self.sources[curr_location] == self.sources[curr_location + self.nt - 1]:
-                    possible_starts.append(curr_location)
-                    curr_location += self.nt
-                else:
-                    curr_location += 1
-            self.possible_starts = possible_starts
-
-        if shuffle:
-            self.possible_starts = np.random.permutation(self.possible_starts)
-        if N_seq is not None and len(self.possible_starts) > N_seq:  # select a subset of sequences if want to
-            self.possible_starts = self.possible_starts[:N_seq]
-        self.N_sequences = len(self.possible_starts)
-        print("N_sequences",self.N_sequences)
-        super(SequenceGenerator, self).__init__(len(self.possible_starts), batch_size, shuffle, seed)
-
-    def __getitem__(self, null):
-        return self.next()
-
-    def next(self):
-        with self.lock:
-            current_index = (self.batch_index * self.batch_size) % self.n
-            index_array, current_batch_size = next(self.index_generator), self.batch_size
-        batch_x = np.zeros((current_batch_size, self.nt) + self.im_shape, np.float32)
-        for i, idx in enumerate(index_array):
-            idx = self.possible_starts[idx]
-            batch_x[i] = self.preprocess(self.X[idx:idx+self.nt])
-        if self.output_mode == 'error':  # model outputs errors, so y should be zeros
-            batch_y = np.zeros(current_batch_size, np.float32)
-        elif self.output_mode == 'prediction':  # output actual pixels
-            batch_y = batch_x
-        return batch_x, batch_y
-
-    def preprocess(self, X):
-        ### Normalization after extrema cut off: ###
-        #cut maxs & mins to mean+3*std & mean-3*std of training set for each parameter
-        #x_cut = np.zeros(shape=X.shape)
-        #x_cut = X*1 #pass X by value and not by reference
-        #x_cut[:,:,:,0][X[:,:,:,0]>311.5]=311.5 #set T2 upper limit
-        #x_cut[:,:,:,0][X[:,:,:,0]<258.9]=258.9 #set T2 lower limit
-        #x_cut[:,:,:,1][X[:,:,:,1]>104635.2]=104635.2 #set GP upper limit
-        #x_cut[:,:,:,1][X[:,:,:,1]<98205.6]=98205.6 #set GP lower limit ###Caution: Drastical cut ###
-        #x_cut[:,:,:,2][X[:,:,:,2]>6209.5]=6209.5 #set GPH upper limit ###Caution: Unnecessary as it succeeds max GPH ###
-        #x_cut[:,:,:,2][X[:,:,:,2]<5005.8]=5005.8 #set GPH lower limit
-        #normalize X based on max and min values(equals upper and lower limits except highCutGPH)
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (x_cut[:,:,:,0]-258.9)/(311.5-258.9)
-        #x_processed[:,:,:,1] = (x_cut[:,:,:,1]-98205.6)/(104635.2-98205.6)
-        #x_processed[:,:,:,2] = (x_cut[:,:,:,2]-5005.8)/(6007.097417091836-5005.8) #GPH max stays; see above
-        
-        ### 'Standard' normalization: (x-min(x))/(max(x)-min(x)) ###
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-93401.125)/(105391.4375-93401.125)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-4836.070232780612)/(6007.097417091836-4836.070232780612)
-
-        ### t2only 'Standard' normalization: (x-min(x))/(max(x)-min(x)) ###
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-235.2141571044922)/(321.46630859375-235.2141571044922)
-
-        ### t2_2MSL_1 'standard' normalization:
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-93401.125)/(105391.4375-93401.125)
-
-        ### t2_1MSL_2 'standard' normalization:
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-93401.125)/(105391.4375-93401.125)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-93401.125)/(105391.4375-93401.125)
-
-        ### t2_2gph500_1 'standard' normalization:
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-4836.070232780612)/(6007.097417091836-4836.070232780612)
-        ## t2_1gph500_2 'standard' normalization:   
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-4836.070232780612)/(6007.097417091836-4836.070232780612)
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-4836.070232780612)/(6007.097417091836-4836.070232780612)
-
-        ### No standardization for moving Objects test set: Just 0s and 1s
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed = X
-
-        ### t2_1 'standard' normalization (got one dimension less, due to just one channel)
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        #x_processed[:,:,:,1] = X[:,:,:,1]
-        #x_processed[:,:,:,2] = X[:,:,:,2]
-
-        ### t2_1 'standard' normalization (got one dimension less, due to just one channel)
-        x_processed = np.zeros(shape=X.shape)
-        x_processed[:,:,:,0] = (X[:,:,:,0]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        x_processed[:,:,:,1] = (X[:,:,:,1]-235.2141571044922)/(321.46630859375-235.2141571044922)
-        x_processed[:,:,:,2] = X[:,:,:,2]
-
-        ### Standardization: (x-mean)/standard_deviation ###
-        #Doesn't work due to some activation functions
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (X[:,:,:,0]-285.1751264870658)/8.770013367617763
-        #x_processed[:,:,:,1] = (X[:,:,:,1]-101420.4382666807)/1071.5999818175521
-        #x_processed[:,:,:,2] = (X[:,:,:,2]-5607.662795353248)/200.62593105865764
-
-        ### Standardization+Normalization ###
-        # standardize:(x-mean)/standard_deviation
-        #x_preprocessed = np.zeros(shape=X.shape)
-        #x_preprocessed[:,:,:,0] = (X[:,:,:,0]-285.1751264870658)/8.770013367617763
-        #x_preprocessed[:,:,:,1] = (X[:,:,:,1]-101420.4382666807)/1071.5999818175521
-        #x_preprocessed[:,:,:,2] = (X[:,:,:,2]-5607.662795353248)/200.62593105865764
-        # normalize:(x-min(x))/(max(x)-min(x))
-        #x_processed = np.zeros(shape=X.shape)
-        #x_processed[:,:,:,0] = (x_preprocessed[:,:,:,0]-np.amin(x_preprocessed[:,:,:,0]))/(np.amax(x_preprocessed[:,:,:,0])-np.amin(x_preprocessed[:,:,:,0]))
-        #x_processed[:,:,:,1] = (x_preprocessed[:,:,:,1]-np.amin(x_preprocessed[:,:,:,1]))/(np.amax(x_preprocessed[:,:,:,1])-np.amin(x_preprocessed[:,:,:,1]))
-        #x_processed[:,:,:,2] = (x_preprocessed[:,:,:,2]-np.amin(x_preprocessed[:,:,:,2]))/(np.amax(x_preprocessed[:,:,:,2])-np.amin(x_preprocessed[:,:,:,2]))
-
-        return x_processed.astype(np.float32)
-        #return X.astype(np.float32) / 255
-
-    def create_all(self):
-        X_all = np.zeros((self.N_sequences, self.nt) + self.im_shape, np.float32)
-        for i, idx in enumerate(self.possible_starts):
-            X_all[i] = self.preprocess(self.X[idx:idx+self.nt])
-        return X_all
diff --git a/workflow_parallel_frame_prediction/Training/devel_horovodJob.sh b/workflow_parallel_frame_prediction/Training/devel_horovodJob.sh
deleted file mode 100644
index 6b442c1861075ed3909435ffe70c2636ec0df115..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/devel_horovodJob.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-#SBATCH --account=deepacf
-# budget account where contingent is taken from# TASKS = NODES * GPUS_PER_NODE
-#SBATCH --nodes=1
-#SBATCH --ntasks-per-node=2
-#SBATCH --ntasks=2
-# can be omitted if --nodes and --ntasks-per-node
-# are given
-# SBATCH --cpus-per-task=1
-# for OpenMP/hybrid jobs only
-#SBATCH --output=horovod-%j.out
-# if keyword omitted: Default is slurm-%j.out in
-# the submission directory (%j is replaced by
-# the job ID).
-#SBATCH --error=horovod-%j.err
-# if keyword omitted: Default is slurm-%j.out in
-# the submission directory.
-#SBATCH --time=00:20:00
-#SBATCH --gres=gpu:2
-#SBATCH --partition=develgpus
-#SBATCH --mail-user=b.gong@fz-juelich.de
-#SBATCH --mail-type=ALL
-
-#create a folder to save the output
-jutil env activate -p deepacf
-module --force  purge
-module load Stages/Devel-2019a
-module load GCC/8.3.0
-module load MVAPICH2/2.3.2-GDR
-module load Stages/2019a
-module load Horovod/0.16.2-GPU-Python-3.6.8
-module load Keras/2.2.4-GPU-Python-3.6.8
-#module load ParaStationMPI/5.2.2-1
-#module load h5py/2.9.0-Python-3.6.8
-# *** start of job script ***:
-# Note: The current working directory at this point is
-# the directory where sbatch was executed.
-# export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
-# *** start of job script ***
-# Note: The current working directory at this point is
-# the directory where sbatch was executed.
-# export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
-srun --cpu_bind=none python3.6 kitti_train_horovod.py
diff --git a/workflow_parallel_frame_prediction/Training/evaluate_multistep.py b/workflow_parallel_frame_prediction/Training/evaluate_multistep.py
deleted file mode 100644
index a555a92973613868fc5dc7008fd0e6aa131133b6..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/evaluate_multistep.py
+++ /dev/null
@@ -1,121 +0,0 @@
-'''
-Evaluate trained PredNet
-Calculates mean-squared error and plots predictions.
-'''
-
-import os
-#import sys, argparse
-import numpy as np
-#from six.moves import cPickle
-import matplotlib
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-import matplotlib.gridspec as gridspec
-
-from keras import backend as K
-from keras.models import Model, model_from_json
-from keras.layers import Input, Dense, Flatten
-
-from prednet import PredNet
-from data_utils import SequenceGenerator
-from kitti_settings import *
-#from scipy.misc import imsave
-
-##Just for checking how the shape is after generator.create_all() from Sequence Generator
-#import hickle as hkl
-##
-n_plot = 10 #number of plots
-batch_size = 10
-nt = 15 #number of timesteps used for sequences in training
-numtests = 18
-extrap = 10 #frame number from where extrapolation will start to be produced
-
-#parser = argparse.ArgumentParser()
-#parser.add_argument('-ft', help="fine-tune multistep: add extrap time")
-#args=parser.parse_args()
-
-weights_file = os.path.join(WEIGHTS_DIR, 'tensorflow_weights/prednet_kitti_weights-extrapfinetuned.hdf5')
-json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model-extrapfinetuned.json')
-test_file = os.path.join(DATA_DIR, 'X_test.hkl')
-test_sources = os.path.join(DATA_DIR, 'sources_test.hkl')
-
-#if args.ft is not None:
-#	extrap = int(args.ft)
-#	nt = extrap + 5
-#	weights_file = os.path.join(MODELS_DIR, 'prednet_ee_weights-extrapfinetuned.hdf5')
-#	json_file = os.path.join(MODELS_DIR, 'prednet_ee_model-extrapfinetuned.json')
-
-# Load trained model
-f = open(json_file, 'r')
-json_string = f.read()
-f.close()
-train_model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})
-train_model.load_weights(weights_file)
-
-# Create testing model (to output predictions)
-layer_config = train_model.layers[1].get_config()
-layer_config['output_mode'] = 'prediction' #'prediction'
-layer_config['extrap_start_time'] = extrap;
-data_format = layer_config['data_format'] if 'data_format' in layer_config else layer_config['dim_ordering']
-test_prednet = PredNet(weights=train_model.layers[1].get_weights(), **layer_config)
-input_shape = list(train_model.layers[0].batch_input_shape[1:])
-input_shape[0] = nt
-inputs = Input(shape=tuple(input_shape))
-predictions = test_prednet(inputs)
-test_model = Model(inputs=inputs, outputs=predictions)
-
-test_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', data_format=data_format) # orig: unique
-X_test = test_generator.create_all()
-X_hat = test_model.predict(X_test, batch_size)
-if data_format == 'channels_first':
-    X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
-    X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))
-
-# Compare MSE of PredNet predictions vs. using last frame.  Write results to prediction_scores.txt
-shapeXhat = str(X_hat.shape) #Just have a look at the shapes to be sure we are calculating the right MSE 
-shapeXtest = str(X_test.shape) 
-mse_model = np.mean( (X_test[:, 1:,:,:,0] - X_hat[:, 1:,:,:,0])**2 )  # look at all timesteps except the first
-mse_model_last = np.mean( (X_test[:, 9,:,:,0] - X_hat[:, 14,:,:,0])**2 )
-#mse_prev = np.mean( (X_test[:, :-1,:,:,0] - X_test[:, 1:,:,:,0])**2 )
-mse_prev = np.mean( (X_test[:, 9,:,:,0] - X_test[:, 14,:,:,0])**2 )
-if not os.path.exists(RESULTS_SAVE_DIR): os.mkdir(RESULTS_SAVE_DIR)
-f = open(os.path.join(RESULTS_SAVE_DIR, 'prediction_scores.txt'), 'w')
-f.write("Model MSE: %f\n" % mse_model)
-f.write("Model MSE from only last prediction in sequence in comparison with extrap start time: %f\n" % mse_model_last)
-f.write("Previous Frame MSE last frame vs extrap start time: %f" % mse_prev)
-f.write("Shape of X_test: " +  shapeXtest)
-f.write("")
-f.write("Shape of X_hat: " +  shapeXhat)
-f.close()
-
-# Plot some predictions
-aspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]
-plt.figure(figsize = (nt, 2*aspect_ratio))
-gs = gridspec.GridSpec(2, nt)
-gs.update(wspace=0., hspace=0.)
-plot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')
-if not os.path.exists(plot_save_dir): os.mkdir(plot_save_dir)
-plot_idx = np.random.permutation(X_test.shape[0])[:n_plot]
-for i in plot_idx:
-    for t in range(nt):
-        plt.subplot(gs[t])
-        plt.imshow(X_test[i,t,:,:,0], interpolation='none')
-        plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
-        if t==0: plt.ylabel('Actual', fontsize=10)
-
-        plt.subplot(gs[t + nt])
-        plt.imshow(X_hat[i,t,:,:,0], interpolation='none')
-        plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
-        if t==0: plt.ylabel('Predicted', fontsize=10)
-
-    plt.savefig(plot_save_dir +  'plot_' + str(i) + '.jpg')
-    plt.clf()
-
-#abe
-#for test in range(numtests):
-#    testdir = "tile-" + str(test)
-#    testdir = os.path.join(plot_save_dir, testdir)
-#    if not os.path.exists( testdir ) : os.mkdir( testdir )
-#    for t in range(nt):
-#	imsave( testdir + "/pred-%02d.jpg" % (t,), X_hat[test,t] )
-#	imsave( testdir + "/orig-%02d.jpg" % (t,), X_test[test,t])
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/hickle/bin/f2py b/workflow_parallel_frame_prediction/Training/hickle/bin/f2py
deleted file mode 100755
index fcc774fba52f3705ff41babc8dbb21dae36d2c29..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/bin/f2py
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/local/software/jureca/Stages/2018b/software/Python/3.6.6-GCCcore-7.3.0/bin/python
-# EASY-INSTALL-SCRIPT: 'numpy==1.15.2','f2py'
-__requires__ = 'numpy==1.15.2'
-__import__('pkg_resources').run_script('numpy==1.15.2', 'f2py')
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/__pycache__/site.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/__pycache__/site.cpython-36.pyc
deleted file mode 100644
index abc2e28feeb37e7e377c2cf5eadc076e5edb1e93..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/__pycache__/site.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/easy-install.pth b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/easy-install.pth
deleted file mode 100755
index 09ac282550d7bba3d89ef3a91ea75877f66f0384..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/easy-install.pth
+++ /dev/null
@@ -1,4 +0,0 @@
-./hickle-3.4.3-py3.6.egg
-/usr/local/software/jureca/Stages/2018b/software/h5py/2.8.0-ipsmpi-2018b-Python-3.6.6/lib/python3.6/site-packages/h5py-2.8.0-py3.6-linux-x86_64.egg
-/usr/local/software/jureca/Stages/2018b/software/SciPy-Stack/2018b-gcccoremkl-7.3.0-2019.0.117-Python-3.6.6/lib/python3.6/site-packages/numpy-1.15.2-py3.6-linux-x86_64.egg
-/usr/local/software/jureca/Stages/2018b/software/Python/3.6.6-GCCcore-7.3.0/lib/python3.6/site-packages/six-1.11.0-py3.6.egg
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/PKG-INFO b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/PKG-INFO
deleted file mode 100755
index 5f8214504c72f2cfb7307cf8259de678fba12236..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/PKG-INFO
+++ /dev/null
@@ -1,207 +0,0 @@
-Metadata-Version: 2.1
-Name: hickle
-Version: 3.4.3
-Summary: Hickle - a HDF5 based version of pickle
-Home-page: http://github.com/telegraphic/hickle
-Author: Danny Price
-Author-email: dan@thetelegraphic.com
-License: UNKNOWN
-Download-URL: https://github.com/telegraphic/hickle/archive/3.4.3.tar.gz
-Description: [![Build Status](https://travis-ci.org/telegraphic/hickle.svg?branch=master)](https://travis-ci.org/telegraphic/hickle)
-        [![JOSS Status](http://joss.theoj.org/papers/0c6638f84a1a574913ed7c6dd1051847/status.svg)](http://joss.theoj.org/papers/0c6638f84a1a574913ed7c6dd1051847)
-        
-        
-        Hickle
-        ======
-        
-        Hickle is a [HDF5](https://www.hdfgroup.org/solutions/hdf5/) based clone of `pickle`, with a twist: instead of serializing to a pickle file,
-        Hickle dumps to a HDF5 file (Hierarchical Data Format). It is designed to be a "drop-in" replacement for pickle (for common data objects), but is
-        really an amalgam of `h5py` and `dill`/`pickle` with extended functionality.
-        
-        That is: `hickle` is a neat little way of dumping python variables to HDF5 files that can be read in most programming
-        languages, not just Python. Hickle is fast, and allows for transparent compression of your data (LZF / GZIP).
-        
-        Why use Hickle?
-        ---------------
-        
-        While `hickle` is designed to be a drop-in replacement for `pickle` (or something like `json`), it works very differently.
-        Instead of serializing / json-izing, it instead stores the data using the excellent [h5py](https://www.h5py.org/) module.
-        
-        The main reasons to use hickle are:
-        
-          1. It's faster than pickle and cPickle.
-          2. It stores data in HDF5.
-          3. You can easily compress your data.
-        
-        The main reasons not to use hickle are:
-        
-          1. You don't want to store your data in HDF5. While hickle can serialize arbitrary python objects, this functionality is provided only for convenience, and you're probably better off just using the pickle module.
-          2. You want to convert your data in human-readable JSON/YAML, in which case, you should do that instead.
-        
-        So, if you want your data in HDF5, or if your pickling is taking too long, give hickle a try.
-        Hickle is particularly good at storing large numpy arrays, thanks to `h5py` running under the hood.
-        
-        Documentation
-        -------------
-        
-        Documentation for hickle can be found at [telegraphic.github.io/hickle/](http://telegraphic.github.io/hickle/).
-        
-        
-        Usage example
-        -------------
-        
-        Hickle is nice and easy to use, and should look very familiar to those of you who have pickled before.
-        
-        In short, `hickle` provides two methods: a [hickle.load](http://telegraphic.github.io/hickle/toc.html#hickle.load)
-        method, for loading hickle files, and a [hickle.dump](http://telegraphic.github.io/hickle/toc.html#hickle.dump)
-        method, for dumping data into HDF5. Here's a complete example:
-        
-        ```python
-        import os
-        import hickle as hkl
-        import numpy as np
-        
-        # Create a numpy array of data
-        array_obj = np.ones(32768, dtype='float32')
-        
-        # Dump to file
-        hkl.dump(array_obj, 'test.hkl', mode='w')
-        
-        # Dump data, with compression
-        hkl.dump(array_obj, 'test_gzip.hkl', mode='w', compression='gzip')
-        
-        # Compare filesizes
-        print('uncompressed: %i bytes' % os.path.getsize('test.hkl'))
-        print('compressed:   %i bytes' % os.path.getsize('test_gzip.hkl'))
-        
-        # Load data
-        array_hkl = hkl.load('test_gzip.hkl')
-        
-        # Check the two are the same file
-        assert array_hkl.dtype == array_obj.dtype
-        assert np.all((array_hkl, array_obj))
-        ```
-        
-        ### HDF5 compression options
-        
-        A major benefit of `hickle` over `pickle` is that it allows fancy HDF5 features to
-        be applied, by passing on keyword arguments on to `h5py`. So, you can do things like:
-          ```python
-          hkl.dump(array_obj, 'test_lzf.hkl', mode='w', compression='lzf', scaleoffset=0,
-                   chunks=(100, 100), shuffle=True, fletcher32=True)
-          ```
-        A detailed explanation of these keywords is given at http://docs.h5py.org/en/latest/high/dataset.html,
-        but we give a quick rundown below.
-        
-        In HDF5, datasets are stored as B-trees, a tree data structure that has speed benefits over contiguous
-        blocks of data. In the B-tree, data are split into [chunks](http://docs.h5py.org/en/latest/high/dataset.html#chunked-storage),
-        which is leveraged to allow [dataset resizing](http://docs.h5py.org/en/latest/high/dataset.html#resizable-datasets) and
-        compression via [filter pipelines](http://docs.h5py.org/en/latest/high/dataset.html#filter-pipeline). Filters such as
-        `shuffle` and `scaleoffset` move your data around to improve compression ratios, and `fletcher32` computes a checksum.
-        These file-level options are abstracted away from the data model.
-        
-        Recent changes
-        --------------
-        
-        * December 2018: Accepted to Journal of Open-Source Software (JOSS).
-        * June 2018: Major refactor and support for Python 3.
-        * Aug 2016: Added support for scipy sparse matrices `bsr_matrix`, `csr_matrix` and `csc_matrix`.
-        
-        Performance comparison
-        ----------------------
-        
-        Hickle runs a lot faster than pickle with its default settings, and a little faster than pickle with `protocol=2` set:
-        
-        ```Python
-        In [1]: import numpy as np
-        
-        In [2]: x = np.random.random((2000, 2000))
-        
-        In [3]: import pickle
-        
-        In [4]: f = open('foo.pkl', 'w')
-        
-        In [5]: %time pickle.dump(x, f)  # slow by default
-        CPU times: user 2 s, sys: 274 ms, total: 2.27 s
-        Wall time: 2.74 s
-        
-        In [6]: f = open('foo.pkl', 'w')
-        
-        In [7]: %time pickle.dump(x, f, protocol=2)  # actually very fast
-        CPU times: user 18.8 ms, sys: 36 ms, total: 54.8 ms
-        Wall time: 55.6 ms
-        
-        In [8]: import hickle
-        
-        In [9]: f = open('foo.hkl', 'w')
-        
-        In [10]: %time hickle.dump(x, f)  # a bit faster
-        dumping <type 'numpy.ndarray'> to file <HDF5 file "foo.hkl" (mode r+)>
-        CPU times: user 764 us, sys: 35.6 ms, total: 36.4 ms
-        Wall time: 36.2 ms
-        ```
-        
-        So if you do continue to use pickle, add the `protocol=2` keyword (thanks @mrocklin for pointing this out).  
-        
-        For storing python dictionaries of lists, hickle beats the python json encoder, but is slower than uJson. For a dictionary with 64 entries, each containing a 4096 length list of random numbers, the times are:
-        
-        
-            json took 2633.263 ms
-            uJson took 138.482 ms
-            hickle took 232.181 ms
-        
-        
-        It should be noted that these comparisons are of course not fair: storing in HDF5 will not help you convert something into JSON, nor will it help you serialize a string. But for quick storage of the contents of a python variable, it's a pretty good option.
-        
-        Installation guidelines (for Linux and Mac OS).
-        -----------------------------------------------
-        
-        ### Easy method
-        Install with `pip` by running `pip install hickle` from the command line.
-        
-        ### Manual install
-        
-        1. You should have Python 2.7 and above installed
-        
-        2. Install h5py
-        (Official page: http://docs.h5py.org/en/latest/build.html)
-        
-        3. Install hdf5
-        (Official page: http://www.hdfgroup.org/ftp/HDF5/current/src/unpacked/release_docs/INSTALL)
-        
-        4. Download `hickle`:
-        via terminal: git clone https://github.com/telegraphic/hickle.git
-        via manual download: Go to https://github.com/telegraphic/hickle and on right hand side you will find `Download ZIP` file
-        
-        5. cd to your downloaded `hickle` directory
-        
-        6. Then run the following command in the `hickle` directory:
-             `python setup.py install`
-        
-        ### Testing
-        
-        Once installed from source, run `python setup.py test` to check it's all working.
-        
-        
-        Bugs & contributing
-        --------------------
-        
-        Contributions and bugfixes are very welcome. Please check out our [contribution guidelines](https://github.com/telegraphic/hickle/blob/master/CONTRIBUTING.md)
-        for more details on how to contribute to development.
-        
-        
-        Referencing hickle
-        ------------------
-        
-        If you use `hickle` in academic research, we would be grateful if you could reference [our paper](http://joss.theoj.org/papers/0c6638f84a1a574913ed7c6dd1051847) in the [Journal of Open-Source Software (JOSS)](http://joss.theoj.org/about).
-        
-        ```
-        Price et al., (2018). Hickle: A HDF5-based python pickle replacement. Journal of Open Source Software, 3(32), 1115, https://doi.org/10.21105/joss.01115
-        ```
-        
-Keywords: pickle,hdf5,data storage,data export
-Platform: Cross platform (Linux
-Platform: Mac OSX
-Platform: Windows)
-Requires-Python: >=2.7
-Description-Content-Type: text/markdown
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/SOURCES.txt b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/SOURCES.txt
deleted file mode 100755
index bf56f059f14d80d641efba6de75e401b4410786f..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/SOURCES.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-.gitignore
-.nojekyll
-.pylintrc
-.travis.yml
-CODE_OF_CONDUCT.md
-CONTRIBUTING.md
-LICENSE
-README.md
-_config.yml
-paper.bib
-paper.md
-requirements.txt
-setup.cfg
-setup.py
-docs/Makefile
-docs/make_docs.sh
-docs/source/conf.py
-docs/source/index.md
-docs/source/toc.rst
-docs/source/_static/empty.txt
-docs/source/_templates/empty.txt
-hickle/__init__.py
-hickle/helpers.py
-hickle/hickle.py
-hickle/hickle_legacy.py
-hickle/hickle_legacy2.py
-hickle/lookup.py
-hickle.egg-info/PKG-INFO
-hickle.egg-info/SOURCES.txt
-hickle.egg-info/dependency_links.txt
-hickle.egg-info/not-zip-safe
-hickle.egg-info/requires.txt
-hickle.egg-info/top_level.txt
-hickle/loaders/__init__.py
-hickle/loaders/load_astropy.py
-hickle/loaders/load_numpy.py
-hickle/loaders/load_pandas.py
-hickle/loaders/load_python.py
-hickle/loaders/load_python3.py
-hickle/loaders/load_scipy.py
-tests/__init__.py
-tests/test_astropy.py
-tests/test_hickle.py
-tests/test_hickle_helpers.py
-tests/test_legacy_load.py
-tests/test_scipy.py
-tests/legacy_hkls/generate_test_hickle.py
-tests/legacy_hkls/hickle_1_1_0.hkl
-tests/legacy_hkls/hickle_1_3_2.hkl
-tests/legacy_hkls/hickle_1_4_0.hkl
-tests/legacy_hkls/hickle_2_0_5.hkl
-tests/legacy_hkls/hickle_2_1_0.hkl
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/dependency_links.txt b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/dependency_links.txt
deleted file mode 100755
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/not-zip-safe b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/not-zip-safe
deleted file mode 100755
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/not-zip-safe
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/requires.txt b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/requires.txt
deleted file mode 100755
index 8ccd55587b619ea766f8d1a76bc06739e176f552..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/requires.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-numpy
-h5py
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/top_level.txt b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/top_level.txt
deleted file mode 100755
index ce3b9fb874814125f842378fab0204ff0e9184a3..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/EGG-INFO/top_level.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-hickle
-tests
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__init__.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__init__.py
deleted file mode 100755
index 46e2ea2c6d0f5578529b3e40e060b1a244420772..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .hickle import dump, load
-from .hickle import __version__
-
-
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/__init__.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/__init__.cpython-36.pyc
deleted file mode 100644
index 638a67eaaa3ab784f6e31d96cc63e6c3a1acc1e7..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/__init__.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/helpers.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/helpers.cpython-36.pyc
deleted file mode 100644
index 944de5cd7f681a49a8d9fbf2024be8e218cadb71..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/helpers.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle.cpython-36.pyc
deleted file mode 100644
index ff8228b3857699bdd27288d585d63a1bcfa08c69..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle_legacy.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle_legacy.cpython-36.pyc
deleted file mode 100755
index e81a055331c0c861fbe8dbf300783bb85bcdd730..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle_legacy.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle_legacy2.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle_legacy2.cpython-36.pyc
deleted file mode 100755
index 477aa15d70a77296c4b1d4b98e55aa747dd6552f..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/hickle_legacy2.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/lookup.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/lookup.cpython-36.pyc
deleted file mode 100644
index 6cd877598197f890a4cef46feab4f938a2529c61..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/__pycache__/lookup.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/helpers.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/helpers.py
deleted file mode 100755
index 6c3d7f9f3853101723380f4658487978605f0cf3..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/helpers.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import re
-import six
-
-def get_type_and_data(h_node):
-    """ Helper function to return the py_type and data block for a HDF node """
-    py_type = h_node.attrs["type"][0]
-    data = h_node[()]
-#    if h_node.shape == ():
-#        data = h_node.value
-#    else:
-#        data  = h_node[:]
-    return py_type, data
-
-def get_type(h_node):
-    """ Helper function to return the py_type for a HDF node """
-    py_type = h_node.attrs["type"][0]
-    return py_type
-
-def sort_keys(key_list):
-    """ Take a list of strings and sort it by integer value within string
-
-    Args:
-        key_list (list): List of keys
-
-    Returns:
-        key_list_sorted (list): List of keys, sorted by integer
-    """
-
-    # Py3 h5py returns an irritating KeysView object
-    # Py3 also complains about bytes and strings, convert all keys to bytes
-    if six.PY3:
-        key_list2 = []
-        for key in key_list:
-            if isinstance(key, str):
-                key = bytes(key, 'ascii')
-            key_list2.append(key)
-        key_list = key_list2
-
-    # Check which keys contain a number
-    numbered_keys = [re.search(b'\d+', key) for key in key_list]
-
-    # Sort the keys on number if they have it, or normally if not
-    if(len(key_list) and not numbered_keys.count(None)):
-        to_int = lambda x: int(re.search(b'\d+', x).group(0))
-        return(sorted(key_list, key=to_int))
-    else:
-        return(sorted(key_list))
-
-
-def check_is_iterable(py_obj):
-    """ Check whether a python object is iterable.
-
-    Note: this treats unicode and string as NON ITERABLE
-
-    Args:
-        py_obj: python object to test
-
-    Returns:
-        iter_ok (bool): True if item is iterable, False is item is not
-    """
-    if six.PY2:
-        string_types = (str, unicode)
-    else:
-        string_types = (str, bytes, bytearray)
-    if isinstance(py_obj, string_types):
-        return False
-    try:
-        iter(py_obj)
-        return True
-    except TypeError:
-        return False
-
-
-def check_is_hashable(py_obj):
-    """ Check if a python object is hashable
-
-    Note: this function is currently not used, but is useful for future
-          development.
-
-    Args:
-        py_obj: python object to test
-    """
-
-    try:
-        py_obj.__hash__()
-        return True
-    except TypeError:
-        return False
-
-
-def check_iterable_item_type(iter_obj):
-    """ Check if all items within an iterable are the same type.
-
-    Args:
-        iter_obj: iterable object
-
-    Returns:
-        iter_type: type of item contained within the iterable. If
-                   the iterable has many types, a boolean False is returned instead.
-
-    References:
-    http://stackoverflow.com/questions/13252333/python-check-if-all-elements-of-a-list-are-the-same-type
-    """
-    iseq = iter(iter_obj)
-
-    try:
-        first_type = type(next(iseq))
-    except StopIteration:
-        return False
-    except Exception as ex:
-        return False
-    else:
-        return first_type if all((type(x) is first_type) for x in iseq) else False
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle.py
deleted file mode 100755
index 24b38c3e1283618c9ce2c4d97b6960334cc08530..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle.py
+++ /dev/null
@@ -1,611 +0,0 @@
-# encoding: utf-8
-"""
-# hickle.py
-
-Created by Danny Price 2016-02-03.
-
-Hickle is a HDF5 based clone of Pickle. Instead of serializing to a pickle
-file, Hickle dumps to a HDF5 file. It is designed to be as similar to pickle in
-usage as possible, providing a load() and dump() function.
-
-## Notes
-
-Hickle has two main advantages over Pickle:
-1) LARGE PICKLE HANDLING. Unpickling a large pickle is slow, as the Unpickler
-reads the entire pickle thing and loads it into memory. In comparison, HDF5
-files are designed for large datasets. Things are only loaded when accessed.
-
-2) CROSS PLATFORM SUPPORT. Attempting to unpickle a pickle pickled on Windows
-on Linux and vice versa is likely to fail with errors like "Insecure string
-pickle". HDF5 files will load fine, as long as both machines have
-h5py installed.
-
-"""
-
-from __future__ import absolute_import, division, print_function
-import sys
-import os
-from pkg_resources import get_distribution, DistributionNotFound
-from ast import literal_eval
-
-import numpy as np
-import h5py as h5
-
-
-from .helpers import get_type, sort_keys, check_is_iterable, check_iterable_item_type
-from .lookup import types_dict, hkl_types_dict, types_not_to_sort, \
-    container_types_dict, container_key_types_dict
-from .lookup import check_is_ndarray_like
-
-
-try:
-    from exceptions import Exception
-    from types import NoneType
-except ImportError:
-    pass        # above imports will fail in python3
-
-from six import PY2, PY3, string_types, integer_types
-import io
-
-# Make several aliases for Python2/Python3 compatibility
-if PY3:
-    file = io.TextIOWrapper
-
-# Import a default 'pickler'
-# Not the nicest import code, but should work on Py2/Py3
-try:
-    import dill as pickle
-except ImportError:
-    try:
-        import cPickle as pickle
-    except ImportError:
-        import pickle
-
-import warnings
-
-try:
-    __version__ = get_distribution('hickle').version
-except DistributionNotFound:
-    __version__ = '0.0.0 - please install via pip/setup.py'
-
-##################
-# Error handling #
-##################
-
-class FileError(Exception):
-    """ An exception raised if the file is fishy """
-    def __init__(self):
-        return
-
-    def __str__(self):
-        return ("Cannot open file. Please pass either a filename "
-                "string, a file object, or a h5py.File")
-
-
-class ClosedFileError(Exception):
-    """ An exception raised if the file is fishy """
-    def __init__(self):
-        return
-
-    def __str__(self):
-        return ("HDF5 file has been closed. Please pass either "
-                "a filename string, a file object, or an open h5py.File")
-
-
-class NoMatchError(Exception):
-    """ An exception raised if the object type is not understood (or
-    supported)"""
-    def __init__(self):
-        return
-
-    def __str__(self):
-        return ("Error: this type of python object cannot be converted into a "
-                "hickle.")
-
-
-class ToDoError(Exception):
-    """ An exception raised for non-implemented functionality"""
-    def __init__(self):
-        return
-
-    def __str__(self):
-        return "Error: this functionality hasn't been implemented yet."
-
-
-class SerializedWarning(UserWarning):
-    """ An object type was not understood
-
-    The data will be serialized using pickle.
-    """
-    pass
-
-
-######################
-# H5PY file wrappers #
-######################
-
-class H5GroupWrapper(h5.Group):
-    """ Group wrapper that provides a track_times kwarg.
-
-    track_times is a boolean flag that can be set to False, so that two
-    files created at different times will have identical MD5 hashes.
-    """
-    def create_dataset(self, *args, **kwargs):
-        kwargs['track_times'] = getattr(self, 'track_times', True)
-        return super(H5GroupWrapper, self).create_dataset(*args, **kwargs)
-
-    def create_group(self, *args, **kwargs):
-        group = super(H5GroupWrapper, self).create_group(*args, **kwargs)
-        group.__class__ = H5GroupWrapper
-        group.track_times = getattr(self, 'track_times', True)
-        return group
-
-
-class H5FileWrapper(h5.File):
-    """ Wrapper for h5py File that provides a track_times kwarg.
-
-    track_times is a boolean flag that can be set to False, so that two
-    files created at different times will have identical MD5 hashes.
-    """
-    def create_dataset(self, *args, **kwargs):
-        kwargs['track_times'] = getattr(self, 'track_times', True)
-        return super(H5FileWrapper, self).create_dataset(*args, **kwargs)
-
-    def create_group(self, *args, **kwargs):
-        group = super(H5FileWrapper, self).create_group(*args, **kwargs)
-        group.__class__ = H5GroupWrapper
-        group.track_times = getattr(self, 'track_times', True)
-        return group
-
-
-def file_opener(f, mode='r', track_times=True):
-    """ A file opener helper function with some error handling.  This can open
-    files through a file object, a h5py file, or just the filename.
-
-    Args:
-        f (file, h5py.File, or string): File-identifier, e.g. filename or file object.
-        mode (str): File open mode. Only required if opening by filename string.
-        track_times (bool): Track time in HDF5; turn off if you want hickling at
-                 different times to produce identical files (e.g. for MD5 hash check).
-
-    """
-
-    # Assume that we will have to close the file after dump or load
-    close_flag = True
-
-    # Were we handed a file object or just a file name string?
-    if isinstance(f, (file, io.TextIOWrapper)):
-        filename, mode = f.name, f.mode
-        f.close()
-        h5f = h5.File(filename, mode)
-    elif isinstance(f, string_types):
-        filename = f
-        h5f = h5.File(filename, mode)
-    elif isinstance(f, (H5FileWrapper, h5._hl.files.File)):
-        try:
-            filename = f.filename
-        except ValueError:
-            raise ClosedFileError
-        h5f = f
-        # Since this file was already open, do not close the file afterward
-        close_flag = False
-    else:
-        print(f.__class__)
-        raise FileError
-
-    h5f.__class__ = H5FileWrapper
-    h5f.track_times = track_times
-    return(h5f, close_flag)
-
-
-###########
-# DUMPERS #
-###########
-
-
-def _dump(py_obj, h_group, call_id=0, **kwargs):
-    """ Dump a python object to a group within a HDF5 file.
-
-    This function is called recursively by the main dump() function.
-
-    Args:
-        py_obj: python object to dump.
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-
-    # Get list of dumpable dtypes
-    dumpable_dtypes = []
-    for lst in [[bool, complex, bytes, float], string_types, integer_types]:
-        dumpable_dtypes.extend(lst)
-
-    # Firstly, check if item is a numpy array. If so, just dump it.
-    if check_is_ndarray_like(py_obj):
-        create_hkl_dataset(py_obj, h_group, call_id, **kwargs)
-
-    # Next, check if item is a dict
-    elif isinstance(py_obj, dict):
-        create_hkl_dataset(py_obj, h_group, call_id, **kwargs)
-
-    # If not, check if item is iterable
-    elif check_is_iterable(py_obj):
-        item_type = check_iterable_item_type(py_obj)
-
-        # item_type == False implies multiple types. Create a dataset
-        if item_type is False:
-            h_subgroup = create_hkl_group(py_obj, h_group, call_id)
-            for ii, py_subobj in enumerate(py_obj):
-                _dump(py_subobj, h_subgroup, call_id=ii, **kwargs)
-
-        # otherwise, subitems have same type. Check if subtype is an iterable
-        # (e.g. list of lists), or not (e.g. list of ints, which should be treated
-        # as a single dataset).
-        else:
-            if item_type in dumpable_dtypes:
-                create_hkl_dataset(py_obj, h_group, call_id, **kwargs)
-            else:
-                h_subgroup = create_hkl_group(py_obj, h_group, call_id)
-                for ii, py_subobj in enumerate(py_obj):
-                    _dump(py_subobj, h_subgroup, call_id=ii, **kwargs)
-
-    # item is not iterable, so create a dataset for it
-    else:
-        create_hkl_dataset(py_obj, h_group, call_id, **kwargs)
-
-
-def dump(py_obj, file_obj, mode='w', track_times=True, path='/', **kwargs):
-    """ Write a pickled representation of obj to the open file object file.
-
-    Args:
-    obj (object): python object o store in a Hickle
-    file: file object, filename string, or h5py.File object
-            file in which to store the object. A h5py.File or a filename is also
-            acceptable.
-    mode (str): optional argument, 'r' (read only), 'w' (write) or 'a' (append).
-            Ignored if file is a file object.
-    compression (str): optional argument. Applies compression to dataset. Options: None, gzip,
-            lzf (+ szip, if installed)
-    track_times (bool): optional argument. If set to False, repeated hickling will produce
-            identical files.
-    path (str): path within hdf5 file to save data to. Defaults to root /
-    """
-
-    # Make sure that file is not closed unless modified
-    # This is to avoid trying to close a file that was never opened
-    close_flag = False
-
-    try:
-        # Open the file
-        h5f, close_flag = file_opener(file_obj, mode, track_times)
-        h5f.attrs["CLASS"] = b'hickle'
-        h5f.attrs["VERSION"] = get_distribution('hickle').version
-        h5f.attrs["type"] = [b'hickle']
-        # Log which version of python was used to generate the hickle file
-        pv = sys.version_info
-        py_ver = "%i.%i.%i" % (pv[0], pv[1], pv[2])
-        h5f.attrs["PYTHON_VERSION"] = py_ver
-
-        h_root_group = h5f.get(path)
-
-        if h_root_group is None:
-            h_root_group = h5f.create_group(path)
-            h_root_group.attrs["type"] = [b'hickle']
-
-        _dump(py_obj, h_root_group, **kwargs)
-    except NoMatchError:
-        fname = h5f.filename
-        h5f.close()
-        try:
-            os.remove(fname)
-        except OSError:
-            warnings.warn("Dump failed. Could not remove %s" % fname)
-        finally:
-            raise NoMatchError
-    finally:
-        # Close the file if requested.
-        # Closing a file twice will not cause any problems
-        if close_flag:
-            h5f.close()
-
-
-def create_dataset_lookup(py_obj):
-    """ What type of object are we trying to pickle?  This is a python
-    dictionary based equivalent of a case statement.  It returns the correct
-    helper function for a given data type.
-
-    Args:
-        py_obj: python object to look-up what function to use to dump to disk
-
-    Returns:
-        match: function that should be used to dump data to a new dataset
-    """
-    t = type(py_obj)
-    types_lookup = {dict: create_dict_dataset}
-    types_lookup.update(types_dict)
-
-    match = types_lookup.get(t, no_match)
-
-    return match
-
-
-
-def create_hkl_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Create a dataset within the hickle HDF5 file
-
-    Args:
-        py_obj: python object to dump.
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-
-    """
-    #lookup dataset creator type based on python object type
-    create_dataset = create_dataset_lookup(py_obj)
-
-    # do the creation
-    create_dataset(py_obj, h_group, call_id, **kwargs)
-
-
-def create_hkl_group(py_obj, h_group, call_id=0):
-    """ Create a new group within the hickle file
-
-    Args:
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-
-    """
-    h_subgroup = h_group.create_group('data_%i' % call_id)
-    h_subgroup.attrs['type'] = [str(type(py_obj)).encode('ascii', 'ignore')]
-    return h_subgroup
-
-
-def create_dict_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Creates a data group for each key in dictionary
-
-    Notes:
-        This is a very important function which uses the recursive _dump
-        method to build up hierarchical data models stored in the HDF5 file.
-        As this is critical to functioning, it is kept in the main hickle.py
-        file instead of in the loaders/ directory.
-
-    Args:
-        py_obj: python object to dump; should be dictionary
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    h_dictgroup = h_group.create_group('data_%i' % call_id)
-    h_dictgroup.attrs['type'] = [str(type(py_obj)).encode('ascii', 'ignore')]
-
-    for key, py_subobj in py_obj.items():
-        if isinstance(key, string_types):
-            h_subgroup = h_dictgroup.create_group("%r" % (key))
-        else:
-            h_subgroup = h_dictgroup.create_group(str(key))
-        h_subgroup.attrs["type"] = [b'dict_item']
-
-        h_subgroup.attrs["key_type"] = [str(type(key)).encode('ascii', 'ignore')]
-
-        _dump(py_subobj, h_subgroup, call_id=0, **kwargs)
-
-
-def no_match(py_obj, h_group, call_id=0, **kwargs):
-    """ If no match is made, raise an exception
-
-    Args:
-        py_obj: python object to dump; default if item is not matched.
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    pickled_obj = pickle.dumps(py_obj)
-    d = h_group.create_dataset('data_%i' % call_id, data=[pickled_obj])
-    d.attrs["type"] = [b'pickle']
-
-    warnings.warn("%s type not understood, data have been serialized" % type(py_obj),
-                  SerializedWarning)
-
-
-
-#############
-## LOADERS ##
-#############
-
-class PyContainer(list):
-    """ A group-like object into which to load datasets.
-
-    In order to build up a tree-like structure, we need to be able
-    to load datasets into a container with an append() method.
-    Python tuples and sets do not allow this. This class provides
-    a list-like object that be converted into a list, tuple, set or dict.
-    """
-    def __init__(self):
-        super(PyContainer, self).__init__()
-        self.container_type = None
-        self.name = None
-        self.key_type = None
-
-    def convert(self):
-        """ Convert from PyContainer to python core data type.
-
-        Returns: self, either as a list, tuple, set or dict
-                 (or other type specified in lookup.py)
-        """
-
-        if self.container_type in container_types_dict.keys():
-            convert_fn = container_types_dict[self.container_type]
-            return convert_fn(self)
-        if self.container_type == str(dict).encode('ascii', 'ignore'):
-            keys = []
-            for item in self:
-                key = item.name.split('/')[-1]
-                key_type = item.key_type[0]
-                if key_type in container_key_types_dict.keys():
-                    to_type_fn = container_key_types_dict[key_type]
-                    key = to_type_fn(key)
-                keys.append(key)
-
-            items = [item[0] for item in self]
-            return dict(zip(keys, items))
-        else:
-            return self
-
-def no_match_load(key):
-    """ If no match is made when loading, need to raise an exception
-    """
-    raise RuntimeError("Cannot load %s data type" % key)
-    #pass
-
-def load_dataset_lookup(key):
-    """ What type of object are we trying to unpickle?  This is a python
-    dictionary based equivalent of a case statement.  It returns the type
-    a given 'type' keyword in the hickle file.
-
-    Args:
-        py_obj: python object to look-up what function to use to dump to disk
-
-    Returns:
-        match: function that should be used to dump data to a new dataset
-    """
-
-    match = hkl_types_dict.get(key, no_match_load)
-
-    return match
-
-def load(fileobj, path='/', safe=True):
-    """ Load a hickle file and reconstruct a python object
-
-    Args:
-        fileobj: file object, h5py.File, or filename string
-            safe (bool): Disable automatic depickling of arbitrary python objects.
-            DO NOT set this to False unless the file is from a trusted source.
-            (see http://www.cs.jhu.edu/~s/musings/pickle.html for an explanation)
-
-        path (str): path within hdf5 file to save data to. Defaults to root /
-    """
-
-    # Make sure that the file is not closed unless modified
-    # This is to avoid trying to close a file that was never opened
-    close_flag = False
-
-    try:
-        h5f, close_flag = file_opener(fileobj)
-        h_root_group = h5f.get(path)
-        try:
-            assert 'CLASS' in h5f.attrs.keys()
-            assert 'VERSION' in h5f.attrs.keys()
-            VER = h5f.attrs['VERSION']
-            try:
-                VER_MAJOR = int(VER)
-            except ValueError:
-                VER_MAJOR = int(VER[0])
-            if VER_MAJOR == 1:
-                if PY2:
-                    warnings.warn("Hickle file versioned as V1, attempting legacy loading...")
-                    from . import hickle_legacy
-                    return hickle_legacy.load(fileobj, safe)
-                else:
-                    raise RuntimeError("Cannot open file. This file was likely"
-                                       " created with Python 2 and an old hickle version.")
-            elif VER_MAJOR == 2:
-                if PY2:
-                    warnings.warn("Hickle file appears to be old version (v2), attempting "
-                                  "legacy loading...")
-                    from . import hickle_legacy2
-                    return hickle_legacy2.load(fileobj, path=path, safe=safe)
-                else:
-                    raise RuntimeError("Cannot open file. This file was likely"
-                                       " created with Python 2 and an old hickle version.")
-            # There is an unfortunate period of time where hickle 2.1.0 claims VERSION = int(3)
-            # For backward compatibility we really need to catch this.
-            # Actual hickle v3 files are versioned as A.B.C (e.g. 3.1.0)
-            elif VER_MAJOR == 3 and VER == VER_MAJOR:
-                if PY2:
-                    warnings.warn("Hickle file appears to be old version (v2.1.0), attempting "
-                                  "legacy loading...")
-                    from . import hickle_legacy2
-                    return hickle_legacy2.load(fileobj, path=path, safe=safe)
-                else:
-                    raise RuntimeError("Cannot open file. This file was likely"
-                                       " created with Python 2 and an old hickle version.")
-            elif VER_MAJOR >= 3:
-                py_container = PyContainer()
-                py_container.container_type = 'hickle'
-                py_container = _load(py_container, h_root_group)
-                return py_container[0][0]
-
-        except AssertionError:
-            if PY2:
-                warnings.warn("Hickle file is not versioned, attempting legacy loading...")
-                from . import hickle_legacy
-                return hickle_legacy.load(fileobj, safe)
-            else:
-                raise RuntimeError("Cannot open file. This file was likely"
-                                   " created with Python 2 and an old hickle version.")
-    finally:
-        # Close the file if requested.
-        # Closing a file twice will not cause any problems
-        if close_flag:
-            h5f.close()
-
-def load_dataset(h_node):
-    """ Load a dataset, converting into its correct python type
-
-    Args:
-        h_node (h5py dataset): h5py dataset object to read
-
-    Returns:
-        data: reconstructed python object from loaded data
-    """
-    py_type = get_type(h_node)
-
-    try:
-        load_fn = load_dataset_lookup(py_type)
-        return load_fn(h_node)
-    except:
-        raise
-        #raise RuntimeError("Hickle type %s not understood." % py_type)
-
-def _load(py_container, h_group):
-    """ Load a hickle file
-
-    Recursive funnction to load hdf5 data into a PyContainer()
-
-    Args:
-        py_container (PyContainer): Python container to load data into
-        h_group (h5 group or dataset): h5py object, group or dataset, to spider
-                and load all datasets.
-    """
-
-    group_dtype   = h5._hl.group.Group
-    dataset_dtype = h5._hl.dataset.Dataset
-
-    #either a file, group, or dataset
-    if isinstance(h_group, (H5FileWrapper, group_dtype)):
-
-        py_subcontainer = PyContainer()
-        try:
-            py_subcontainer.container_type = bytes(h_group.attrs['type'][0])
-        except KeyError:
-            raise
-            #py_subcontainer.container_type = ''
-        py_subcontainer.name = h_group.name
-
-        if py_subcontainer.container_type == b'dict_item':
-            py_subcontainer.key_type = h_group.attrs['key_type']
-
-        if py_subcontainer.container_type not in types_not_to_sort:
-            h_keys = sort_keys(h_group.keys())
-        else:
-            h_keys = h_group.keys()
-
-        for h_name in h_keys:
-            h_node = h_group[h_name]
-            py_subcontainer = _load(py_subcontainer, h_node)
-
-        sub_data = py_subcontainer.convert()
-        py_container.append(sub_data)
-
-    else:
-        # must be a dataset
-        subdata = load_dataset(h_group)
-        py_container.append(subdata)
-
-    return py_container
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy.py
deleted file mode 100755
index 61a171fde3d39304d78d1ddede9656dd7ad50940..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy.py
+++ /dev/null
@@ -1,535 +0,0 @@
-# encoding: utf-8
-"""
-# hickle_legacy.py
-
-Created by Danny Price 2012-05-28.
-
-Hickle is a HDF5 based clone of Pickle. Instead of serializing to a
-pickle file, Hickle dumps to a HDF5 file. It is designed to be as similar
-to pickle in usage as possible.
-
-## Notes
-
-This is a legacy handler, for hickle v1 files.
-If V2 reading fails, this will be called as a fail-over.
-
-"""
-
-import os
-import sys
-import numpy as np
-import h5py as h5
-
-if sys.version_info.major == 3:
-    NoneType = type(None)
-else:
-    from types import NoneType
-
-__version__ = "1.3.0"
-__author__ = "Danny Price"
-
-####################
-## Error handling ##
-####################
-
-
-class FileError(Exception):
-    """ An exception raised if the file is fishy"""
-
-    def __init__(self):
-        return
-
-    def __str__(self):
-        print("Error: cannot open file. Please pass either a filename string, a file object, "
-              "or a h5py.File")
-
-
-class NoMatchError(Exception):
-    """ An exception raised if the object type is not understood (or supported)"""
-
-    def __init__(self):
-        return
-
-    def __str__(self):
-        print("Error: this type of python object cannot be converted into a hickle.")
-
-
-class ToDoError(Exception):
-    """ An exception raised for non-implemented functionality"""
-
-    def __init__(self):
-        return
-
-    def __str__(self):
-        print("Error: this functionality hasn't been implemented yet.")
-
-
-class H5GroupWrapper(h5.Group):
-    def create_dataset(self, *args, **kwargs):
-        kwargs['track_times'] = getattr(self, 'track_times', True)
-        return super(H5GroupWrapper, self).create_dataset(*args, **kwargs)
-    
-    def create_group(self, *args, **kwargs):
-        group = super(H5GroupWrapper, self).create_group(*args, **kwargs)
-        group.__class__ = H5GroupWrapper
-        group.track_times = getattr(self, 'track_times', True)
-        return group
-
-
-class H5FileWrapper(h5.File):
-    def create_dataset(self, *args, **kwargs):
-        kwargs['track_times'] = getattr(self, 'track_times', True)
-        return super(H5FileWrapper, self).create_dataset(*args, **kwargs)
-
-    def create_group(self, *args, **kwargs):
-        group = super(H5FileWrapper, self).create_group(*args, **kwargs)
-        group.__class__ = H5GroupWrapper
-        group.track_times = getattr(self, 'track_times', True)
-        return group
-
-
-def file_opener(f, mode='r', track_times=True):
-    """ A file opener helper function with some error handling.
-  
-  This can open files through a file object, a h5py file, or just the filename.
-  """
-    # Were we handed a file object or just a file name string?
-    if isinstance(f, file):
-        filename, mode = f.name, f.mode
-        f.close()
-        h5f = h5.File(filename, mode)
-
-    elif isinstance(f, h5._hl.files.File):
-        h5f = f
-    elif isinstance(f, str):
-        filename = f
-        h5f = h5.File(filename, mode)
-    else:
-        raise FileError
-   
-    h5f.__class__ = H5FileWrapper
-    h5f.track_times = track_times
-    return h5f
-
-
-#############
-## dumpers ##
-#############
-
-def dump_ndarray(obj, h5f, **kwargs):
-    """ dumps an ndarray object to h5py file"""
-    h5f.create_dataset('data', data=obj, **kwargs)
-    h5f.create_dataset('type', data=['ndarray'])
-
-
-def dump_np_dtype(obj, h5f, **kwargs):
-    """ dumps an np dtype object to h5py file"""
-    h5f.create_dataset('data', data=obj)
-    h5f.create_dataset('type', data=['np_dtype'])
-
-
-def dump_np_dtype_dict(obj, h5f, **kwargs):
-    """ dumps an np dtype object within a group"""
-    h5f.create_dataset('data', data=obj)
-    h5f.create_dataset('_data', data=['np_dtype'])
-
-
-def dump_masked(obj, h5f, **kwargs):
-    """ dumps an ndarray object to h5py file"""
-    h5f.create_dataset('data', data=obj, **kwargs)
-    h5f.create_dataset('mask', data=obj.mask, **kwargs)
-    h5f.create_dataset('type', data=['masked'])
-
-
-def dump_list(obj, h5f, **kwargs):
-    """ dumps a list object to h5py file"""
-
-    # Check if there are any numpy arrays in the list
-    contains_numpy = any(isinstance(el, np.ndarray) for el in obj)
-
-    if contains_numpy:
-        _dump_list_np(obj, h5f, **kwargs)
-    else:
-        h5f.create_dataset('data', data=obj, **kwargs)
-        h5f.create_dataset('type', data=['list'])
-
-
-def _dump_list_np(obj, h5f, **kwargs):
-    """ Dump a list of numpy objects to file """
-
-    np_group = h5f.create_group('data')
-    h5f.create_dataset('type', data=['np_list'])
-
-    ii = 0
-    for np_item in obj:
-        np_group.create_dataset("%s" % ii, data=np_item, **kwargs)
-        ii += 1
-
-
-def dump_tuple(obj, h5f, **kwargs):
-    """ dumps a list object to h5py file"""
-
-    # Check if there are any numpy arrays in the list
-    contains_numpy = any(isinstance(el, np.ndarray) for el in obj)
-
-    if contains_numpy:
-        _dump_tuple_np(obj, h5f, **kwargs)
-    else:
-        h5f.create_dataset('data', data=obj, **kwargs)
-        h5f.create_dataset('type', data=['tuple'])
-
-
-def _dump_tuple_np(obj, h5f, **kwargs):
-    """ Dump a tuple of numpy objects to file """
-
-    np_group = h5f.create_group('data')
-    h5f.create_dataset('type', data=['np_tuple'])
-
-    ii = 0
-    for np_item in obj:
-        np_group.create_dataset("%s" % ii, data=np_item, **kwargs)
-        ii += 1
-
-
-def dump_set(obj, h5f, **kwargs):
-    """ dumps a set object to h5py file"""
-    obj = list(obj)
-    h5f.create_dataset('data', data=obj, **kwargs)
-    h5f.create_dataset('type', data=['set'])
-
-
-def dump_string(obj, h5f, **kwargs):
-    """ dumps a list object to h5py file"""
-    h5f.create_dataset('data', data=[obj], **kwargs)
-    h5f.create_dataset('type', data=['string'])
-
-
-def dump_none(obj, h5f, **kwargs):
-    """ Dump None type to file """
-    h5f.create_dataset('data', data=[0], **kwargs)
-    h5f.create_dataset('type', data=['none'])
-
-
-def dump_unicode(obj, h5f, **kwargs):
-    """ dumps a list object to h5py file"""
-    dt = h5.special_dtype(vlen=unicode)
-    ll = len(obj)
-    dset = h5f.create_dataset('data', shape=(ll, ), dtype=dt, **kwargs)
-    dset[:ll] = obj
-    h5f.create_dataset('type', data=['unicode'])
-
-
-def _dump_dict(dd, hgroup, **kwargs):
-    for key in dd:
-        if type(dd[key]) in (str, int, float, unicode, bool):
-            # Figure out type to be stored
-            types = {str: 'str', int: 'int', float: 'float',
-                     unicode: 'unicode', bool: 'bool', NoneType: 'none'}
-            _key = types.get(type(dd[key]))
-
-            # Store along with dtype info
-            if _key == 'unicode':
-                dd[key] = str(dd[key])
-
-            hgroup.create_dataset("%s" % key, data=[dd[key]], **kwargs)
-            hgroup.create_dataset("_%s" % key, data=[_key])
-
-        elif type(dd[key]) in (type(np.array([1])), type(np.ma.array([1]))):
-
-            if hasattr(dd[key], 'mask'):
-                hgroup.create_dataset("_%s" % key, data=["masked"])
-                hgroup.create_dataset("%s" % key, data=dd[key].data, **kwargs)
-                hgroup.create_dataset("_%s_mask" % key, data=dd[key].mask, **kwargs)
-            else:
-                hgroup.create_dataset("_%s" % key, data=["ndarray"])
-                hgroup.create_dataset("%s" % key, data=dd[key], **kwargs)
-
-        elif type(dd[key]) is list:
-            hgroup.create_dataset("%s" % key, data=dd[key], **kwargs)
-            hgroup.create_dataset("_%s" % key, data=["list"])
-            
-        elif type(dd[key]) is tuple:
-            hgroup.create_dataset("%s" % key, data=dd[key], **kwargs)
-            hgroup.create_dataset("_%s" % key, data=["tuple"])
-
-        elif type(dd[key]) is set:
-            hgroup.create_dataset("%s" % key, data=list(dd[key]), **kwargs)
-            hgroup.create_dataset("_%s" % key, data=["set"])
-
-        elif isinstance(dd[key], dict):
-            new_group = hgroup.create_group("%s" % key)
-            _dump_dict(dd[key], new_group, **kwargs)
-            
-        elif type(dd[key]) is NoneType:
-            hgroup.create_dataset("%s" % key, data=[0], **kwargs)
-            hgroup.create_dataset("_%s" % key, data=["none"])
-            
-        else:
-            if type(dd[key]).__module__ == np.__name__:
-                #print type(dd[key])
-                hgroup.create_dataset("%s" % key, data=dd[key])
-                hgroup.create_dataset("_%s" % key, data=["np_dtype"])
-                #new_group = hgroup.create_group("%s" % key)
-                #dump_np_dtype_dict(dd[key], new_group)
-            else:
-                raise NoMatchError
-
-
-def dump_dict(obj, h5f='', **kwargs):
-    """ dumps a dictionary to h5py file """
-    h5f.create_dataset('type', data=['dict'])
-    hgroup = h5f.create_group('data')
-    _dump_dict(obj, hgroup, **kwargs)
-
-
-def no_match(obj, h5f, *args, **kwargs):
-    """ If no match is made, raise an exception """
-    try:
-        import dill as cPickle
-    except ImportError:
-        import cPickle
-
-    pickled_obj = cPickle.dumps(obj)
-    h5f.create_dataset('type', data=['pickle'])
-    h5f.create_dataset('data', data=[pickled_obj])
-
-    print("Warning: %s type not understood, data have been serialized" % type(obj))
-    #raise NoMatchError
-
-
-def dumper_lookup(obj):
-    """ What type of object are we trying to pickle?
-   
-  This is a python dictionary based equivalent of a case statement.
-  It returns the correct helper function for a given data type.
-  """
-    t = type(obj)
-
-    types = {
-        list: dump_list,
-        tuple: dump_tuple,
-        set: dump_set,
-        dict: dump_dict,
-        str: dump_string,
-        unicode: dump_unicode,
-        NoneType: dump_none,
-        np.ndarray: dump_ndarray,
-        np.ma.core.MaskedArray: dump_masked,
-        np.float16: dump_np_dtype,
-        np.float32: dump_np_dtype,
-        np.float64: dump_np_dtype,
-        np.int8: dump_np_dtype,
-        np.int16: dump_np_dtype,
-        np.int32: dump_np_dtype,
-        np.int64: dump_np_dtype,
-        np.uint8: dump_np_dtype,
-        np.uint16: dump_np_dtype,
-        np.uint32: dump_np_dtype,
-        np.uint64: dump_np_dtype,
-        np.complex64: dump_np_dtype,
-        np.complex128: dump_np_dtype,
-    }
-
-    match = types.get(t, no_match)
-    return match
-
-
-def dump(obj, file, mode='w', track_times=True, **kwargs):
-    """ Write a pickled representation of obj to the open file object file.
-  
-  Parameters
-  ----------
-  obj: object
-    python object o store in a Hickle
-  file: file object, filename string, or h5py.File object
-    file in which to store the object. A h5py.File or a filename is also acceptable.
-  mode: string
-    optional argument, 'r' (read only), 'w' (write) or 'a' (append). Ignored if file
-    is a file object.
-  compression: str
-    optional argument. Applies compression to dataset. Options: None, gzip, lzf (+ szip,
-    if installed)
-  track_times: bool
-    optional argument. If set to False, repeated hickling will produce identical files.
-  """
-
-    try:
-        # See what kind of object to dump
-        dumper = dumper_lookup(obj)
-        # Open the file
-        h5f = file_opener(file, mode, track_times)
-        print("dumping %s to file %s" % (type(obj), repr(h5f)))
-        dumper(obj, h5f, **kwargs)
-        h5f.close()
-    except NoMatchError:
-        fname = h5f.filename
-        h5f.close()
-        try:
-            os.remove(fname)
-        except:
-            print("Warning: dump failed. Could not remove %s" % fname)
-        finally:
-            raise NoMatchError
-
-
-#############
-## loaders ##
-#############
-
-def load(file, safe=True):
-    """ Load a hickle file and reconstruct a python object
-  
-  Parameters
-  ----------
-  file: file object, h5py.File, or filename string
-  
-  safe (bool): Disable automatic depickling of arbitrary python objects. 
-  DO NOT set this to False unless the file is from a trusted source.
-  (see http://www.cs.jhu.edu/~s/musings/pickle.html for an explanation)
-  """
-
-    try:
-        h5f = file_opener(file)
-        dtype = h5f["type"][0]
-
-        if dtype == 'dict':
-            group = h5f["data"]
-            data = load_dict(group)
-        elif dtype == 'pickle':
-            data = load_pickle(h5f, safe)
-        elif dtype == 'np_list':
-            group = h5f["data"]
-            data = load_np_list(group)
-        elif dtype == 'np_tuple':
-            group = h5f["data"]
-            data = load_np_tuple(group)
-        elif dtype == 'masked':
-            data = np.ma.array(h5f["data"][:], mask=h5f["mask"][:])
-        elif dtype == 'none':
-            data = None
-        else:
-            if dtype in ('string', 'unicode'):
-                data = h5f["data"][0]
-            else:
-                try:
-                    data = h5f["data"][:]
-                except ValueError:
-                    data = h5f["data"]
-            types = {
-                'list': list,
-                'set': set,
-                'unicode': unicode,
-                'string': str,
-                'ndarray': load_ndarray,
-                'np_dtype': load_np_dtype
-            }
-
-            mod = types.get(dtype, no_match)
-            data = mod(data)
-    finally:
-        if 'h5f' in locals():
-            h5f.close()
-    return data
-
-
-def load_pickle(h5f, safe=True):
-    """ Deserialize and load a pickled object within a hickle file
-  
-  WARNING: Pickle has 
-  
-  Parameters
-  ----------
-  h5f: h5py.File object
-  
-  safe (bool): Disable automatic depickling of arbitrary python objects. 
-  DO NOT set this to False unless the file is from a trusted source.
-  (see http://www.cs.jhu.edu/~s/musings/pickle.html for an explanation)
-  """
-
-    if not safe:
-        try:
-            import dill as cPickle
-        except ImportError:
-            import cPickle
-
-        data = h5f["data"][:]
-        data = cPickle.loads(data[0])
-        return data
-    else:
-        print("\nWarning: Object is of an unknown type, and has not been loaded")
-        print("         for security reasons (it could be malicious code). If")
-        print("         you wish to continue, manually set safe=False\n")
-
-
-def load_np_list(group):
-    """ load a numpy list """
-    np_list = []
-    for key in sorted(group.keys()):
-        data = group[key][:]
-        np_list.append(data)
-    return np_list
-
-
-def load_np_tuple(group):
-    """ load a tuple containing numpy arrays """
-    return tuple(load_np_list(group))
-
-
-def load_ndarray(arr):
-    """ Load a numpy array """
-    # Nothing to be done!
-    return arr
-
-
-def load_np_dtype(arr):
-    """ Load a numpy array """
-    # Just return first value
-    return arr.value
-
-
-def load_dict(group):
-    """ Load dictionary """
-
-    dd = {}
-    for key in group.keys():
-        if isinstance(group[key], h5._hl.group.Group):
-            new_group = group[key]
-            dd[key] = load_dict(new_group)
-        elif not key.startswith("_"):
-            _key = "_%s" % key
-
-            if group[_key][0] == 'np_dtype':
-                dd[key] = group[key].value
-            elif group[_key][0] in ('str', 'int', 'float', 'unicode', 'bool'):
-                dd[key] = group[key][0]
-            elif group[_key][0] == 'masked':
-                key_ma = "_%s_mask" % key
-                dd[key] = np.ma.array(group[key][:], mask=group[key_ma])
-            else:
-                dd[key] = group[key][:]
-
-            # Convert numpy constructs back to string
-            dtype = group[_key][0]
-            types = {'str': str, 'int': int, 'float': float,
-                     'unicode': unicode, 'bool': bool, 'list': list, 'none' : NoneType}
-            try:
-                mod = types.get(dtype)
-                if dtype == 'none':
-                    dd[key] = None
-                else:
-                    dd[key] = mod(dd[key])
-            except:
-                pass
-    return dd
-
-
-def load_large(file):
-    """ Load a large hickle file (returns the h5py object not the data)
-
-  Parameters
-  ----------
-  file: file object, h5py.File, or filename string  
-  """
-
-    h5f = file_opener(file)
-    return h5f
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy2.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy2.py
deleted file mode 100755
index 4d018fde9a161713213b00190267439257cb876d..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/hickle_legacy2.py
+++ /dev/null
@@ -1,672 +0,0 @@
-# encoding: utf-8
-"""
-# hickle_legacy2.py
-
-Created by Danny Price 2016-02-03.
-
-This is a legacy handler, for hickle v2 files.
-If V3 reading fails, this will be called as a fail-over.
-
-"""
-
-import os
-import numpy as np
-import h5py as h5
-import re
-
-try:
-    from exceptions import Exception
-    from types import NoneType
-except ImportError:
-    pass        # above imports will fail in python3
-
-import warnings
-__version__ = "2.0.4"
-__author__ = "Danny Price"
-
-
-##################
-# Error handling #
-##################
-
-class FileError(Exception):
-    """ An exception raised if the file is fishy """
-    def __init__(self):
-        return
-
-    def __str__(self):
-        return ("Cannot open file. Please pass either a filename "
-                "string, a file object, or a h5py.File")
-
-
-class ClosedFileError(Exception):
-    """ An exception raised if the file is fishy """
-    def __init__(self):
-        return
-
-    def __str__(self):
-        return ("HDF5 file has been closed. Please pass either "
-                "a filename string, a file object, or an open h5py.File")
-
-
-class NoMatchError(Exception):
-    """ An exception raised if the object type is not understood (or
-    supported)"""
-    def __init__(self):
-        return
-
-    def __str__(self):
-        return ("Error: this type of python object cannot be converted into a "
-                "hickle.")
-
-
-class ToDoError(Exception):
-    """ An exception raised for non-implemented functionality"""
-    def __init__(self):
-        return
-
-    def __str__(self):
-        return "Error: this functionality hasn't been implemented yet."
-
-
-######################
-# H5PY file wrappers #
-######################
-
-class H5GroupWrapper(h5.Group):
-    """ Group wrapper that provides a track_times kwarg.
-
-    track_times is a boolean flag that can be set to False, so that two
-    files created at different times will have identical MD5 hashes.
-    """
-    def create_dataset(self, *args, **kwargs):
-        kwargs['track_times'] = getattr(self, 'track_times', True)
-        return super(H5GroupWrapper, self).create_dataset(*args, **kwargs)
-
-    def create_group(self, *args, **kwargs):
-        group = super(H5GroupWrapper, self).create_group(*args, **kwargs)
-        group.__class__ = H5GroupWrapper
-        group.track_times = getattr(self, 'track_times', True)
-        return group
-
-
-class H5FileWrapper(h5.File):
-    """ Wrapper for h5py File that provides a track_times kwarg.
-
-    track_times is a boolean flag that can be set to False, so that two
-    files created at different times will have identical MD5 hashes.
-    """
-    def create_dataset(self, *args, **kwargs):
-        kwargs['track_times'] = getattr(self, 'track_times', True)
-        return super(H5FileWrapper, self).create_dataset(*args, **kwargs)
-
-    def create_group(self, *args, **kwargs):
-        group = super(H5FileWrapper, self).create_group(*args, **kwargs)
-        group.__class__ = H5GroupWrapper
-        group.track_times = getattr(self, 'track_times', True)
-        return group
-
-
-def file_opener(f, mode='r', track_times=True):
-    """ A file opener helper function with some error handling.  This can open
-    files through a file object, a h5py file, or just the filename.
-
-    Args:
-        f (file, h5py.File, or string): File-identifier, e.g. filename or file object.
-        mode (str): File open mode. Only required if opening by filename string.
-        track_times (bool): Track time in HDF5; turn off if you want hickling at
-                 different times to produce identical files (e.g. for MD5 hash check).
-
-    """
-    # Were we handed a file object or just a file name string?
-    if isinstance(f, file):
-        filename, mode = f.name, f.mode
-        f.close()
-        h5f = h5.File(filename, mode)
-    elif isinstance(f, str) or isinstance(f, unicode):
-        filename = f
-        h5f = h5.File(filename, mode)
-    elif isinstance(f, H5FileWrapper) or isinstance(f, h5._hl.files.File):
-        try:
-            filename = f.filename
-        except ValueError:
-            raise ClosedFileError()
-        h5f = f
-    else:
-        print(type(f))
-        raise FileError
-
-    h5f.__class__ = H5FileWrapper
-    h5f.track_times = track_times
-    return h5f
-
-
-###########
-# DUMPERS #
-###########
-
-def check_is_iterable(py_obj):
-    """ Check whether a python object is iterable.
-
-    Note: this treats unicode and string as NON ITERABLE
-
-    Args:
-        py_obj: python object to test
-
-    Returns:
-        iter_ok (bool): True if item is iterable, False is item is not
-    """
-    if type(py_obj) in (str, unicode):
-        return False
-    try:
-        iter(py_obj)
-        return True
-    except TypeError:
-        return False
-
-
-def check_iterable_item_type(iter_obj):
-    """ Check if all items within an iterable are the same type.
-
-    Args:
-        iter_obj: iterable object
-
-    Returns:
-        iter_type: type of item contained within the iterable. If
-                   the iterable has many types, a boolean False is returned instead.
-
-    References:
-    http://stackoverflow.com/questions/13252333/python-check-if-all-elements-of-a-list-are-the-same-type
-    """
-    iseq = iter(iter_obj)
-    first_type = type(next(iseq))
-    return first_type if all((type(x) is first_type) for x in iseq) else False
-
-
-def check_is_numpy_array(py_obj):
-    """ Check if a python object is a numpy array (masked or regular)
-
-    Args:
-        py_obj: python object to check whether it is a numpy array
-
-    Returns
-        is_numpy (bool): Returns True if it is a numpy array, else False if it isn't
-    """
-
-    is_numpy = type(py_obj) in (type(np.array([1])), type(np.ma.array([1])))
-
-    return is_numpy
-
-
-def _dump(py_obj, h_group, call_id=0, **kwargs):
-    """ Dump a python object to a group within a HDF5 file.
-
-    This function is called recursively by the main dump() function.
-
-    Args:
-        py_obj: python object to dump.
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-
-    dumpable_dtypes = set([bool, int, float, long, complex, str, unicode])
-
-    # Firstly, check if item is a numpy array. If so, just dump it.
-    if check_is_numpy_array(py_obj):
-        create_hkl_dataset(py_obj, h_group, call_id, **kwargs)
-
-    # next, check if item is iterable
-    elif check_is_iterable(py_obj):
-        item_type = check_iterable_item_type(py_obj)
-
-        # item_type == False implies multiple types. Create a dataset
-        if item_type is False:
-            h_subgroup = create_hkl_group(py_obj, h_group, call_id)
-            for ii, py_subobj in enumerate(py_obj):
-                _dump(py_subobj, h_subgroup, call_id=ii, **kwargs)
-
-        # otherwise, subitems have same type. Check if subtype is an iterable
-        # (e.g. list of lists), or not (e.g. list of ints, which should be treated
-        # as a single dataset).
-        else:
-            if item_type in dumpable_dtypes:
-                create_hkl_dataset(py_obj, h_group, call_id, **kwargs)
-            else:
-                h_subgroup = create_hkl_group(py_obj, h_group, call_id)
-                for ii, py_subobj in enumerate(py_obj):
-                    #print py_subobj, h_subgroup, ii
-                    _dump(py_subobj, h_subgroup, call_id=ii, **kwargs)
-
-    # item is not iterable, so create a dataset for it
-    else:
-        create_hkl_dataset(py_obj, h_group, call_id, **kwargs)
-
-
-def dump(py_obj, file_obj, mode='w', track_times=True, path='/', **kwargs):
-    """ Write a pickled representation of obj to the open file object file.
-
-    Args:
-    obj (object): python object o store in a Hickle
-    file: file object, filename string, or h5py.File object
-            file in which to store the object. A h5py.File or a filename is also
-            acceptable.
-    mode (str): optional argument, 'r' (read only), 'w' (write) or 'a' (append).
-            Ignored if file is a file object.
-    compression (str): optional argument. Applies compression to dataset. Options: None, gzip,
-            lzf (+ szip, if installed)
-    track_times (bool): optional argument. If set to False, repeated hickling will produce
-            identical files.
-    path (str): path within hdf5 file to save data to. Defaults to root /
-    """
-
-    try:
-        # Open the file
-        h5f = file_opener(file_obj, mode, track_times)
-        h5f.attrs["CLASS"] = 'hickle'
-        h5f.attrs["VERSION"] = 2
-        h5f.attrs["type"] = ['hickle']
-
-        h_root_group = h5f.get(path)
-
-        if h_root_group is None:
-            h_root_group = h5f.create_group(path)
-            h_root_group.attrs["type"] = ['hickle']
-
-        _dump(py_obj, h_root_group, **kwargs)
-        h5f.close()
-    except NoMatchError:
-        fname = h5f.filename
-        h5f.close()
-        try:
-            os.remove(fname)
-        except OSError:
-            warnings.warn("Dump failed. Could not remove %s" % fname)
-        finally:
-            raise NoMatchError
-
-
-def create_dataset_lookup(py_obj):
-    """ What type of object are we trying to pickle?  This is a python
-    dictionary based equivalent of a case statement.  It returns the correct
-    helper function for a given data type.
-
-    Args:
-        py_obj: python object to look-up what function to use to dump to disk
-
-    Returns:
-        match: function that should be used to dump data to a new dataset
-    """
-    t = type(py_obj)
-
-    types = {
-        dict: create_dict_dataset,
-        list: create_listlike_dataset,
-        tuple: create_listlike_dataset,
-        set: create_listlike_dataset,
-        str: create_stringlike_dataset,
-        unicode: create_stringlike_dataset,
-        int: create_python_dtype_dataset,
-        float: create_python_dtype_dataset,
-        long: create_python_dtype_dataset,
-        bool: create_python_dtype_dataset,
-        complex: create_python_dtype_dataset,
-        NoneType: create_none_dataset,
-        np.ndarray: create_np_array_dataset,
-        np.ma.core.MaskedArray: create_np_array_dataset,
-        np.float16: create_np_dtype_dataset,
-        np.float32: create_np_dtype_dataset,
-        np.float64: create_np_dtype_dataset,
-        np.int8: create_np_dtype_dataset,
-        np.int16: create_np_dtype_dataset,
-        np.int32: create_np_dtype_dataset,
-        np.int64: create_np_dtype_dataset,
-        np.uint8: create_np_dtype_dataset,
-        np.uint16: create_np_dtype_dataset,
-        np.uint32: create_np_dtype_dataset,
-        np.uint64: create_np_dtype_dataset,
-        np.complex64: create_np_dtype_dataset,
-        np.complex128: create_np_dtype_dataset
-    }
-
-    match = types.get(t, no_match)
-    return match
-
-
-def create_hkl_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Create a dataset within the hickle HDF5 file
-
-    Args:
-        py_obj: python object to dump.
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-
-    """
-    #lookup dataset creator type based on python object type
-    create_dataset = create_dataset_lookup(py_obj)
-
-    # do the creation
-    create_dataset(py_obj, h_group, call_id, **kwargs)
-
-
-def create_hkl_group(py_obj, h_group, call_id=0):
-    """ Create a new group within the hickle file
-
-    Args:
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-
-    """
-    h_subgroup = h_group.create_group('data_%i' % call_id)
-    h_subgroup.attrs["type"] = [str(type(py_obj))]
-    return h_subgroup
-
-
-def create_listlike_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Dumper for list, set, tuple
-
-    Args:
-        py_obj: python object to dump; should be list-like
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    dtype = str(type(py_obj))
-    obj = list(py_obj)
-    d = h_group.create_dataset('data_%i' % call_id, data=obj, **kwargs)
-    d.attrs["type"] = [dtype]
-
-
-def create_np_dtype_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an np dtype object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a numpy scalar, e.g. np.float16(1)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    d = h_group.create_dataset('data_%i' % call_id, data=py_obj, **kwargs)
-    d.attrs["type"] = ['np_dtype']
-    d.attrs["np_dtype"] = str(d.dtype)
-
-
-def create_python_dtype_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps a python dtype object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    d = h_group.create_dataset('data_%i' % call_id, data=py_obj,
-                               dtype=type(py_obj), **kwargs)
-    d.attrs["type"] = ['python_dtype']
-    d.attrs['python_subdtype'] = str(type(py_obj))
-
-
-def create_dict_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Creates a data group for each key in dictionary
-
-    Args:
-        py_obj: python object to dump; should be dictionary
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    h_dictgroup = h_group.create_group('data_%i' % call_id)
-    h_dictgroup.attrs["type"] = ['dict']
-    for key, py_subobj in py_obj.items():
-        h_subgroup = h_dictgroup.create_group(key)
-        h_subgroup.attrs["type"] = ['dict_item']
-        _dump(py_subobj, h_subgroup, call_id=0, **kwargs)
-
-
-def create_np_array_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an ndarray object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a numpy array or np.ma.array (masked)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    if isinstance(py_obj, type(np.ma.array([1]))):
-        d = h_group.create_dataset('data_%i' % call_id, data=py_obj, **kwargs)
-        #m = h_group.create_dataset('mask_%i' % call_id, data=py_obj.mask, **kwargs)
-        m = h_group.create_dataset('data_%i_mask' % call_id, data=py_obj.mask, **kwargs)
-        d.attrs["type"] = ['ndarray_masked_data']
-        m.attrs["type"] = ['ndarray_masked_mask']
-    else:
-        d = h_group.create_dataset('data_%i' % call_id, data=py_obj, **kwargs)
-        d.attrs["type"] = ['ndarray']
-
-
-def create_stringlike_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps a list object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be string-like (unicode or string)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    if isinstance(py_obj, str):
-        d = h_group.create_dataset('data_%i' % call_id, data=[py_obj], **kwargs)
-        d.attrs["type"] = ['string']
-    else:
-        dt = h5.special_dtype(vlen=unicode)
-        dset = h_group.create_dataset('data_%i' % call_id, shape=(1, ), dtype=dt, **kwargs)
-        dset[0] = py_obj
-        dset.attrs['type'] = ['unicode']
-
-
-def create_none_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Dump None type to file
-
-    Args:
-        py_obj: python object to dump; must be None object
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    d = h_group.create_dataset('data_%i' % call_id, data=[0], **kwargs)
-    d.attrs["type"] = ['none']
-
-
-def no_match(py_obj, h_group, call_id=0, **kwargs):
-    """ If no match is made, raise an exception
-
-    Args:
-        py_obj: python object to dump; default if item is not matched.
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    try:
-        import dill as cPickle
-    except ImportError:
-        import cPickle
-
-    pickled_obj = cPickle.dumps(py_obj)
-    d = h_group.create_dataset('data_%i' % call_id, data=[pickled_obj])
-    d.attrs["type"] = ['pickle']
-
-    warnings.warn("%s type not understood, data have been "
-                  "serialized" % type(py_obj))
-
-
-#############
-## LOADERS ##
-#############
-
-class PyContainer(list):
-    """ A group-like object into which to load datasets.
-
-    In order to build up a tree-like structure, we need to be able
-    to load datasets into a container with an append() method.
-    Python tuples and sets do not allow this. This class provides
-    a list-like object that be converted into a list, tuple, set or dict.
-    """
-    def __init__(self):
-        super(PyContainer, self).__init__()
-        self.container_type = None
-        self.name = None
-
-    def convert(self):
-        """ Convert from PyContainer to python core data type.
-
-        Returns: self, either as a list, tuple, set or dict
-        """
-        if self.container_type == "<type 'list'>":
-            return list(self)
-        if self.container_type == "<type 'tuple'>":
-            return tuple(self)
-        if self.container_type == "<type 'set'>":
-            return set(self)
-        if self.container_type == "dict":
-            keys = [str(item.name.split('/')[-1]) for item in self]
-            items = [item[0] for item in self]
-            return dict(zip(keys, items))
-        else:
-            return self
-
-
-def load(fileobj, path='/', safe=True):
-    """ Load a hickle file and reconstruct a python object
-
-    Args:
-        fileobj: file object, h5py.File, or filename string
-            safe (bool): Disable automatic depickling of arbitrary python objects.
-            DO NOT set this to False unless the file is from a trusted source.
-            (see http://www.cs.jhu.edu/~s/musings/pickle.html for an explanation)
-
-        path (str): path within hdf5 file to save data to. Defaults to root /
-    """
-
-    try:
-        h5f = file_opener(fileobj)
-        h_root_group = h5f.get(path)
-
-        try:
-            assert 'CLASS' in h5f.attrs.keys()
-            assert 'VERSION' in h5f.attrs.keys()
-            py_container = PyContainer()
-            py_container.container_type = 'hickle'
-            py_container = _load(py_container, h_root_group)
-            return py_container[0][0]
-        except AssertionError:
-            import hickle_legacy
-            return hickle_legacy.load(fileobj, safe)
-    finally:
-        if 'h5f' in locals():
-            h5f.close()
-
-
-def load_dataset(h_node):
-    """ Load a dataset, converting into its correct python type
-
-    Args:
-        h_node (h5py dataset): h5py dataset object to read
-
-    Returns:
-        data: reconstructed python object from loaded data
-    """
-    py_type = h_node.attrs["type"][0]
-
-    if h_node.shape == ():
-        data = h_node.value
-    else:
-        data  = h_node[:]
-
-    if py_type == "<type 'list'>":
-        #print self.name
-        return list(data)
-    elif py_type == "<type 'tuple'>":
-        return tuple(data)
-    elif py_type == "<type 'set'>":
-        return set(data)
-    elif py_type == "np_dtype":
-        subtype = h_node.attrs["np_dtype"]
-        data = np.array(data, dtype=subtype)
-        return data
-    elif py_type == 'ndarray':
-        return np.array(data)
-    elif py_type == 'ndarray_masked_data':
-        try:
-            mask_path = h_node.name + "_mask"
-            h_root = h_node.parent
-            mask = h_root.get(mask_path)[:]
-        except IndexError:
-            mask = h_root.get(mask_path)
-        except ValueError:
-            mask = h_root.get(mask_path)
-        data = np.ma.array(data, mask=mask)
-        return data
-    elif py_type == 'python_dtype':
-        subtype = h_node.attrs["python_subdtype"]
-        type_dict = {
-            "<type 'int'>": int,
-            "<type 'float'>": float,
-            "<type 'long'>": long,
-            "<type 'bool'>": bool,
-            "<type 'complex'>": complex
-        }
-        tcast = type_dict.get(subtype)
-        return tcast(data)
-    elif py_type == 'string':
-        return str(data[0])
-    elif py_type == 'unicode':
-        return unicode(data[0])
-    elif py_type == 'none':
-        return None
-    else:
-        print(h_node.name, py_type, h_node.attrs.keys())
-        return data
-
-
-def sort_keys(key_list):
-    """ Take a list of strings and sort it by integer value within string
-
-    Args:
-        key_list (list): List of keys
-
-    Returns:
-        key_list_sorted (list): List of keys, sorted by integer
-    """
-    to_int = lambda x: int(re.search('\d+', x).group(0))
-    keys_by_int = sorted([(to_int(key), key) for key in key_list])
-    return [ii[1] for ii in keys_by_int]
-
-
-def _load(py_container, h_group):
-    """ Load a hickle file
-
-    Recursive funnction to load hdf5 data into a PyContainer()
-
-    Args:
-        py_container (PyContainer): Python container to load data into
-        h_group (h5 group or dataset): h5py object, group or dataset, to spider
-                and load all datasets.
-    """
-
-    group_dtype   = h5._hl.group.Group
-    dataset_dtype = h5._hl.dataset.Dataset
-
-    #either a file, group, or dataset
-    if isinstance(h_group, H5FileWrapper) or isinstance(h_group, group_dtype):
-        py_subcontainer = PyContainer()
-        py_subcontainer.container_type = h_group.attrs['type'][0]
-        py_subcontainer.name = h_group.name
-
-        if py_subcontainer.container_type != 'dict':
-            h_keys = sort_keys(h_group.keys())
-        else:
-            h_keys = h_group.keys()
-
-        for h_name in h_keys:
-            h_node = h_group[h_name]
-            py_subcontainer = _load(py_subcontainer, h_node)
-
-        sub_data = py_subcontainer.convert()
-        py_container.append(sub_data)
-
-    else:
-        # must be a dataset
-        subdata = load_dataset(h_group)
-        py_container.append(subdata)
-
-    #print h_group.name, py_container
-    return py_container
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__init__.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__init__.py
deleted file mode 100755
index 3be6bd298581fb3086bb5a261de72a56970faddf..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from __future__ import absolute_import
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/__init__.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/__init__.cpython-36.pyc
deleted file mode 100644
index 059bbcb18d24b4ed243c011342d5220fc0ca9b4b..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/__init__.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_astropy.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_astropy.cpython-36.pyc
deleted file mode 100644
index 3856511eb477a1bd3a48b33dd2325efe9afb4735..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_astropy.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_numpy.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_numpy.cpython-36.pyc
deleted file mode 100644
index 0b431b401afea544b1894a3b08b282d070c988e1..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_numpy.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_pandas.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_pandas.cpython-36.pyc
deleted file mode 100644
index 2df2075b66902547e8435e4a81eba6a175408411..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_pandas.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_python.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_python.cpython-36.pyc
deleted file mode 100755
index ada924cd472a1993704ccdda86ab632d87e62aa2..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_python.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_python3.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_python3.cpython-36.pyc
deleted file mode 100644
index 53f514453416469ddfd6d8843477414904a3276f..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_python3.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_scipy.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_scipy.cpython-36.pyc
deleted file mode 100644
index aff088cfb6e7c7c16bc2fd5a3ae5dd05f77bdd41..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/__pycache__/load_scipy.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_astropy.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_astropy.py
deleted file mode 100755
index dd8efce655c2223262b42868cbb1d9ba5c580acb..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_astropy.py
+++ /dev/null
@@ -1,237 +0,0 @@
-import numpy as np
-from astropy.units import Quantity
-from astropy.coordinates import Angle, SkyCoord
-from astropy.constants import Constant, EMConstant
-from astropy.table import Table
-from astropy.time import Time
-
-from hickle.helpers import get_type_and_data
-import six
-
-def create_astropy_quantity(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an astropy quantity
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    # kwarg compression etc does not work on scalars
-    d = h_group.create_dataset('data_%i' % call_id, data=py_obj.value,
-                               dtype='float64')     #, **kwargs)
-    d.attrs["type"] = [b'astropy_quantity']
-    if six.PY3:
-        unit = bytes(str(py_obj.unit), 'ascii')
-    else:
-        unit = str(py_obj.unit)
-    d.attrs['unit'] = [unit]
-
-def create_astropy_angle(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an astropy quantity
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    # kwarg compression etc does not work on scalars
-    d = h_group.create_dataset('data_%i' % call_id, data=py_obj.value,
-                               dtype='float64')     #, **kwargs)
-    d.attrs["type"] = [b'astropy_angle']
-    if six.PY3:
-        unit = str(py_obj.unit).encode('ascii')
-    else:
-        unit = str(py_obj.unit)
-    d.attrs['unit'] = [unit]
-
-def create_astropy_skycoord(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an astropy quantity
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    # kwarg compression etc does not work on scalars
-    lat = py_obj.data.lat.value
-    lon = py_obj.data.lon.value
-    dd = np.column_stack((lon, lat))
-
-    d = h_group.create_dataset('data_%i' % call_id, data=dd,
-                               dtype='float64')     #, **kwargs)
-    d.attrs["type"] = [b'astropy_skycoord']
-    if six.PY3:
-        lon_unit = str(py_obj.data.lon.unit).encode('ascii')
-        lat_unit = str(py_obj.data.lat.unit).encode('ascii')
-    else:
-        lon_unit = str(py_obj.data.lon.unit)
-        lat_unit = str(py_obj.data.lat.unit)
-    d.attrs['lon_unit'] = [lon_unit]
-    d.attrs['lat_unit'] = [lat_unit]
-
-def create_astropy_time(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an astropy Time object
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-
-    # kwarg compression etc does not work on scalars
-    data = py_obj.value
-    dtype = str(py_obj.value.dtype)
-
-    # Need to catch string times
-    if '<U' in dtype:
-        dtype = dtype.replace('<U', '|S')
-        print(dtype)
-        data = []
-        for item in py_obj.value:
-            data.append(str(item).encode('ascii'))
-
-    d = h_group.create_dataset('data_%i' % call_id, data=data, dtype=dtype)     #, **kwargs)
-    d.attrs["type"] = [b'astropy_time']
-    if six.PY2:
-        fmt   = str(py_obj.format)
-        scale = str(py_obj.scale)
-    else:
-        fmt   = str(py_obj.format).encode('ascii')
-        scale = str(py_obj.scale).encode('ascii')
-    d.attrs['format'] = [fmt]
-    d.attrs['scale']  = [scale]
-
-def create_astropy_constant(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an astropy constant
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    # kwarg compression etc does not work on scalars
-    d = h_group.create_dataset('data_%i' % call_id, data=py_obj.value,
-                               dtype='float64')     #, **kwargs)
-    d.attrs["type"]   = [b'astropy_constant']
-    d.attrs["unit"]   = [str(py_obj.unit)]
-    d.attrs["abbrev"] = [str(py_obj.abbrev)]
-    d.attrs["name"]   = [str(py_obj.name)]
-    d.attrs["reference"] = [str(py_obj.reference)]
-    d.attrs["uncertainty"] = [py_obj.uncertainty]
-
-    if py_obj.system:
-        d.attrs["system"] = [py_obj.system]
-
-
-def create_astropy_table(py_obj, h_group, call_id=0, **kwargs):
-    """ Dump an astropy Table
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    data = py_obj.as_array()
-    d = h_group.create_dataset('data_%i' % call_id, data=data, dtype=data.dtype, **kwargs)
-    d.attrs['type']  = [b'astropy_table']
-
-    if six.PY3:
-        colnames = [bytes(cn, 'ascii') for cn in py_obj.colnames]
-    else:
-        colnames = py_obj.colnames
-    d.attrs['colnames'] = colnames
-    for key, value in py_obj.meta.items():
-     d.attrs[key] = value
-
-
-def load_astropy_quantity_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    unit = h_node.attrs["unit"][0]
-    q = Quantity(data, unit)
-    return q
-
-def load_astropy_time_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    if six.PY3:
-        fmt = h_node.attrs["format"][0].decode('ascii')
-        scale = h_node.attrs["scale"][0].decode('ascii')
-    else:
-        fmt = h_node.attrs["format"][0]
-        scale = h_node.attrs["scale"][0]
-    q = Time(data, format=fmt, scale=scale)
-    return q
-
-def load_astropy_angle_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    unit = h_node.attrs["unit"][0]
-    q = Angle(data, unit)
-    return q
-
-def load_astropy_skycoord_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    lon_unit = h_node.attrs["lon_unit"][0]
-    lat_unit = h_node.attrs["lat_unit"][0]
-    q = SkyCoord(data[:,0], data[:, 1], unit=(lon_unit, lat_unit))
-    return q
-
-def load_astropy_constant_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    unit   = h_node.attrs["unit"][0]
-    abbrev = h_node.attrs["abbrev"][0]
-    name   = h_node.attrs["name"][0]
-    ref    = h_node.attrs["reference"][0]
-    unc    = h_node.attrs["uncertainty"][0]
-
-    system = None
-    if "system" in h_node.attrs.keys():
-        system = h_node.attrs["system"][0]
-
-    c = Constant(abbrev, name, data, unit, unc, ref, system)
-    return c
-
-def load_astropy_table(h_node):
-    py_type, data = get_type_and_data(h_node)
-    metadata = dict(h_node.attrs.items())
-    metadata.pop('type')
-    metadata.pop('colnames')
-
-    if six.PY3:
-        colnames = [cn.decode('ascii') for cn in h_node.attrs["colnames"]]
-    else:
-        colnames = h_node.attrs["colnames"]
-
-    t = Table(data, names=colnames, meta=metadata)
-    return t
-
-def check_is_astropy_table(py_obj):
-    return isinstance(py_obj, Table)
-
-def check_is_astropy_quantity_array(py_obj):
-    if isinstance(py_obj, Quantity) or isinstance(py_obj, Time) or \
-       isinstance(py_obj, Angle) or isinstance(py_obj, SkyCoord):
-        if py_obj.isscalar:
-            return False
-        else:
-            return True
-    else:
-        return False
-
-
-#####################
-# Lookup dictionary #
-#####################
-
-class_register = [
-    [Quantity, b'astropy_quantity', create_astropy_quantity, load_astropy_quantity_dataset,
-     True, check_is_astropy_quantity_array],
-    [Time,     b'astropy_time', create_astropy_time, load_astropy_time_dataset,
-     True, check_is_astropy_quantity_array],
-    [Angle,    b'astropy_angle', create_astropy_angle, load_astropy_angle_dataset,
-     True, check_is_astropy_quantity_array],
-    [SkyCoord, b'astropy_skycoord', create_astropy_skycoord, load_astropy_skycoord_dataset,
-     True, check_is_astropy_quantity_array],
-    [Constant, b'astropy_constant', create_astropy_constant, load_astropy_constant_dataset,
-     True, None],
-    [Table,    b'astropy_table',  create_astropy_table, load_astropy_table,
-     True, check_is_astropy_table]
-]
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_numpy.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_numpy.py
deleted file mode 100755
index 7a31b12e235b07cccb6b1f0045ca9ccbfb874454..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_numpy.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# encoding: utf-8
-"""
-# load_numpy.py
-
-Utilities and dump / load handlers for handling numpy and scipy arrays
-
-"""
-import six
-import numpy as np
-
-
-from hickle.helpers import get_type_and_data
-
-
-def check_is_numpy_array(py_obj):
-    """ Check if a python object is a numpy array (masked or regular)
-
-    Args:
-        py_obj: python object to check whether it is a numpy array
-
-    Returns
-        is_numpy (bool): Returns True if it is a numpy array, else False if it isn't
-    """
-
-    is_numpy = type(py_obj) in (type(np.array([1])), type(np.ma.array([1])))
-
-    return is_numpy
-
-
-def create_np_scalar_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an np dtype object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a numpy scalar, e.g. np.float16(1)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-
-    # DO NOT PASS KWARGS TO SCALAR DATASETS!
-    d = h_group.create_dataset('data_%i' % call_id, data=py_obj)  # **kwargs)
-    d.attrs["type"] = [b'np_scalar']
-
-    if six.PY2:
-        d.attrs["np_dtype"] = str(d.dtype)
-    else:
-        d.attrs["np_dtype"] = bytes(str(d.dtype), 'ascii')
-
-
-def create_np_dtype(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an np dtype object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a numpy scalar, e.g. np.float16(1)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    d = h_group.create_dataset('data_%i' % call_id, data=[str(py_obj)])
-    d.attrs["type"] = [b'np_dtype']
-
-
-def create_np_array_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an ndarray object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a numpy array or np.ma.array (masked)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    if isinstance(py_obj, type(np.ma.array([1]))):
-        d = h_group.create_dataset('data_%i' % call_id, data=py_obj, **kwargs)
-        #m = h_group.create_dataset('mask_%i' % call_id, data=py_obj.mask, **kwargs)
-        m = h_group.create_dataset('data_%i_mask' % call_id, data=py_obj.mask, **kwargs)
-        d.attrs["type"] = [b'ndarray_masked_data']
-        m.attrs["type"] = [b'ndarray_masked_mask']
-    else:
-        d = h_group.create_dataset('data_%i' % call_id, data=py_obj, **kwargs)
-        d.attrs["type"] = [b'ndarray']
-
-
-
-
-#######################
-## Lookup dictionary ##
-#######################
-
-types_dict = {
-    np.ndarray:  create_np_array_dataset,
-    np.ma.core.MaskedArray: create_np_array_dataset,
-    np.float16:    create_np_scalar_dataset,
-    np.float32:    create_np_scalar_dataset,
-    np.float64:    create_np_scalar_dataset,
-    np.int8:       create_np_scalar_dataset,
-    np.int16:      create_np_scalar_dataset,
-    np.int32:      create_np_scalar_dataset,
-    np.int64:      create_np_scalar_dataset,
-    np.uint8:      create_np_scalar_dataset,
-    np.uint16:     create_np_scalar_dataset,
-    np.uint32:     create_np_scalar_dataset,
-    np.uint64:     create_np_scalar_dataset,
-    np.complex64:  create_np_scalar_dataset,
-    np.complex128: create_np_scalar_dataset,
-    np.dtype:      create_np_dtype
-}
-
-def load_np_dtype_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    data = np.dtype(data[0])
-    return data
-
-def load_np_scalar_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    subtype = h_node.attrs["np_dtype"]
-    data = np.array([data], dtype=subtype)[0]
-    return data
-
-def load_ndarray_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return np.array(data, copy=False)
-
-def load_ndarray_masked_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    try:
-        mask_path = h_node.name + "_mask"
-        h_root = h_node.parent
-        mask = h_root.get(mask_path)[:]
-    except IndexError:
-        mask = h_root.get(mask_path)
-    except ValueError:
-        mask = h_root.get(mask_path)
-    data = np.ma.array(data, mask=mask)
-    return data
-
-def load_nothing(h_hode):
-    pass
-
-hkl_types_dict = {
-    b"np_dtype"            : load_np_dtype_dataset,
-    b"np_scalar"           : load_np_scalar_dataset,
-    b"ndarray"             : load_ndarray_dataset,
-    b"numpy.ndarray"       : load_ndarray_dataset,
-    b"ndarray_masked_data" : load_ndarray_masked_dataset,
-    b"ndarray_masked_mask" : load_nothing        # Loaded autormatically
-}
-
-
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_pandas.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_pandas.py
deleted file mode 100755
index 0b5185533dafe9d2f8b2c45405967d7489ce7caf..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_pandas.py
+++ /dev/null
@@ -1,4 +0,0 @@
-import pandas as pd
-
-# TODO: populate with classes to load
-class_register = []
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python.py
deleted file mode 100755
index 58de921ed13e2e9b0c57ad724e94fa2ac9a3268f..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# encoding: utf-8
-"""
-# load_python.py
-
-Handlers for dumping and loading built-in python types.
-NB: As these are for built-in types, they are critical to the functioning of hickle.
-
-"""
-
-from hickle.helpers import get_type_and_data
-
-import sys
-if sys.version_info.major == 3:
-    unicode = type(str)
-    str = type(bytes)
-    long = type(int)
-    NoneType = type(None)
-else:
-    from types import NoneType
-
-import h5py as h5
-
-def create_listlike_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Dumper for list, set, tuple
-
-    Args:
-        py_obj: python object to dump; should be list-like
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    dtype = str(type(py_obj))
-    obj = list(py_obj)
-    d = h_group.create_dataset('data_%i' % call_id, data=obj, **kwargs)
-    d.attrs["type"] = [dtype]
-
-
-def create_python_dtype_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps a python dtype object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    # kwarg compression etc does not work on scalars
-    d = h_group.create_dataset('data_%i' % call_id, data=py_obj,
-                               dtype=type(py_obj))     #, **kwargs)
-    d.attrs["type"] = ['python_dtype']
-    d.attrs['python_subdtype'] = str(type(py_obj))
-
-
-def create_stringlike_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps a list object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be string-like (unicode or string)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    if isinstance(py_obj, str):
-        d = h_group.create_dataset('data_%i' % call_id, data=[py_obj], **kwargs)
-        d.attrs["type"] = ['string']
-    else:
-        dt = h5.special_dtype(vlen=unicode)
-        dset = h_group.create_dataset('data_%i' % call_id, shape=(1, ), dtype=dt, **kwargs)
-        dset[0] = py_obj
-        dset.attrs['type'] = ['unicode']
-
-
-def create_none_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Dump None type to file
-
-    Args:
-        py_obj: python object to dump; must be None object
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    d = h_group.create_dataset('data_%i' % call_id, data=[0], **kwargs)
-    d.attrs["type"] = ['none']
-
-
-def load_list_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return list(data)
-
-def load_tuple_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return tuple(data)
-
-def load_set_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return set(data)
-
-def load_string_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return str(data[0])
-
-def load_unicode_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return unicode(data[0])
-
-def load_none_dataset(h_node):
-    return None
-
-def load_python_dtype_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    subtype = h_node.attrs["python_subdtype"]
-    type_dict = {
-        "<type 'int'>": int,
-        "<type 'float'>": float,
-        "<type 'long'>": long,
-        "<type 'bool'>": bool,
-        "<type 'complex'>": complex
-    }
-    tcast = type_dict.get(subtype)
-    return tcast(data)
-
-types_dict = {
-    list:        create_listlike_dataset,
-    tuple:       create_listlike_dataset,
-    set:         create_listlike_dataset,
-    str:         create_stringlike_dataset,
-    unicode:     create_stringlike_dataset,
-    int:         create_python_dtype_dataset,
-    float:       create_python_dtype_dataset,
-    long:        create_python_dtype_dataset,
-    bool:        create_python_dtype_dataset,
-    complex:     create_python_dtype_dataset,
-    NoneType:    create_none_dataset,
-}
-
-hkl_types_dict = {
-    "<type 'list'>"  : load_list_dataset,
-    "<type 'tuple'>" : load_tuple_dataset,
-    "<type 'set'>"   : load_set_dataset,
-    "python_dtype"   : load_python_dtype_dataset,
-    "string"         : load_string_dataset,
-    "unicode"        : load_unicode_dataset,
-    "none"           : load_none_dataset
-}
-
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python3.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python3.py
deleted file mode 100755
index c6b173fd07af42735dd05dd7acb9c42e1c651e38..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_python3.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# encoding: utf-8
-"""
-# load_python.py
-
-Handlers for dumping and loading built-in python types.
-NB: As these are for built-in types, they are critical to the functioning of hickle.
-
-"""
-
-import six
-from hickle.helpers import get_type_and_data
-
-try:
-    from exceptions import Exception
-except ImportError:
-    pass        # above imports will fail in python3
-
-try:
-    ModuleNotFoundError  # This fails on Py3.5 and below
-except NameError:
-    ModuleNotFoundError = ImportError
-
-import h5py as h5
-
-
-def get_py3_string_type(h_node):
-    """ Helper function to return the python string type for items in a list.
-
-    Notes:
-        Py3 string handling is a bit funky and doesn't play too nicely with HDF5.
-        We needed to add metadata to say if the strings in a list started off as
-        bytes, string, etc. This helper loads
-
-    """
-    try:
-        py_type = h_node.attrs["py3_string_type"][0]
-        return py_type
-    except:
-        return None
-
-def create_listlike_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Dumper for list, set, tuple
-
-    Args:
-        py_obj: python object to dump; should be list-like
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    dtype = str(type(py_obj))
-    obj = list(py_obj)
-
-    # h5py does not handle Py3 'str' objects well. Need to catch this
-    # Only need to check first element as this method
-    # is only called if all elements have same dtype
-    py3_str_type = None
-    if type(obj[0]) in (str, bytes):
-        py3_str_type = bytes(str(type(obj[0])), 'ascii')
-
-    if type(obj[0]) is str:
-        #print(py3_str_type)
-        #print(obj, "HERE")
-        obj = [bytes(oo, 'utf8') for oo in obj]
-        #print(obj, "HERE")
-
-
-    d = h_group.create_dataset('data_%i' % call_id, data=obj, **kwargs)
-    d.attrs["type"] = [bytes(dtype, 'ascii')]
-
-    # Need to add some metadata to aid in unpickling if it's a string type
-    if py3_str_type is not None:
-        d.attrs["py3_string_type"] = [py3_str_type]
-
-
-
-def create_python_dtype_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps a python dtype object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a python type (int, float, bool etc)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    # kwarg compression etc does not work on scalars
-    d = h_group.create_dataset('data_%i' % call_id, data=py_obj,
-                               dtype=type(py_obj))     #, **kwargs)
-    d.attrs["type"] = [b'python_dtype']
-    d.attrs['python_subdtype'] = bytes(str(type(py_obj)), 'ascii')
-
-
-def create_stringlike_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps a list object to h5py file
-
-    Args:
-        py_obj: python object to dump; should be string-like (unicode or string)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    if isinstance(py_obj, bytes):
-        d = h_group.create_dataset('data_%i' % call_id, data=[py_obj], **kwargs)
-        d.attrs["type"] = [b'bytes']
-    elif isinstance(py_obj, str):
-        dt = h5.special_dtype(vlen=str)
-        dset = h_group.create_dataset('data_%i' % call_id, shape=(1, ), dtype=dt, **kwargs)
-        dset[0] = py_obj
-        dset.attrs['type'] = [b'string']
-
-def create_none_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ Dump None type to file
-
-    Args:
-        py_obj: python object to dump; must be None object
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    d = h_group.create_dataset('data_%i' % call_id, data=[0], **kwargs)
-    d.attrs["type"] = [b'none']
-
-
-def load_list_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    py3_str_type = get_py3_string_type(h_node)
-
-    if py3_str_type == b"<class 'bytes'>":
-        # Yuck. Convert numpy._bytes -> str -> bytes
-        return [bytes(str(item, 'utf8'), 'utf8') for item in data]
-    if py3_str_type == b"<class 'str'>":
-        return [str(item, 'utf8') for item in data]
-    else:
-        return list(data)
-
-def load_tuple_dataset(h_node):
-    data = load_list_dataset(h_node)
-    return tuple(data)
-
-def load_set_dataset(h_node):
-    data = load_list_dataset(h_node)
-    return set(data)
-
-def load_bytes_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return bytes(data[0])
-
-def load_string_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return str(data[0])
-
-def load_unicode_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    return unicode(data[0])
-
-def load_none_dataset(h_node):
-    return None
-
-def load_pickled_data(h_node):
-    py_type, data = get_type_and_data(h_node)
-    try:
-        import cPickle as pickle
-    except ModuleNotFoundError:
-        import pickle
-    return pickle.loads(data[0])
-
-
-def load_python_dtype_dataset(h_node):
-    py_type, data = get_type_and_data(h_node)
-    subtype = h_node.attrs["python_subdtype"]
-    type_dict = {
-        b"<class 'int'>": int,
-        b"<class 'float'>": float,
-        b"<class 'bool'>": bool,
-        b"<class 'complex'>": complex
-    }
-
-    tcast = type_dict.get(subtype)
-    return tcast(data)
-
-
-
-types_dict = {
-    list:        create_listlike_dataset,
-    tuple:       create_listlike_dataset,
-    set:         create_listlike_dataset,
-    bytes:         create_stringlike_dataset,
-    str:           create_stringlike_dataset,
-    #bytearray:     create_stringlike_dataset,
-    int:         create_python_dtype_dataset,
-    float:       create_python_dtype_dataset,
-    bool:        create_python_dtype_dataset,
-    complex:     create_python_dtype_dataset,
-    type(None):    create_none_dataset,
-}
-
-hkl_types_dict = {
-    b"<class 'list'>"  : load_list_dataset,
-    b"<class 'tuple'>" : load_tuple_dataset,
-    b"<class 'set'>"   : load_set_dataset,
-    b"bytes"           : load_bytes_dataset,
-    b"python_dtype"   : load_python_dtype_dataset,
-    b"string"         : load_string_dataset,
-    b"pickle"         : load_pickled_data,
-    b"none"           : load_none_dataset,
-}
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_scipy.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_scipy.py
deleted file mode 100755
index ab09fe23c69ea791371e4b6a808b553c84195289..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/loaders/load_scipy.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import six
-import scipy
-from scipy import sparse
-
-from hickle.helpers import get_type_and_data
-
-def check_is_scipy_sparse_array(py_obj):
-    """ Check if a python object is a scipy sparse array
-
-    Args:
-        py_obj: python object to check whether it is a sparse array
-
-    Returns
-        is_numpy (bool): Returns True if it is a sparse array, else False if it isn't
-    """
-    t_csr = type(scipy.sparse.csr_matrix([0]))
-    t_csc = type(scipy.sparse.csc_matrix([0]))
-    t_bsr = type(scipy.sparse.bsr_matrix([0]))
-    is_sparse = type(py_obj) in (t_csr, t_csc, t_bsr)
-
-    return is_sparse
-
-
-def create_sparse_dataset(py_obj, h_group, call_id=0, **kwargs):
-    """ dumps an sparse array to h5py file
-
-    Args:
-        py_obj: python object to dump; should be a numpy array or np.ma.array (masked)
-        h_group (h5.File.group): group to dump data into.
-        call_id (int): index to identify object's relative location in the iterable.
-    """
-    h_sparsegroup = h_group.create_group('data_%i' % call_id)
-    data = h_sparsegroup.create_dataset('data', data=py_obj.data, **kwargs)
-    indices = h_sparsegroup.create_dataset('indices', data=py_obj.indices, **kwargs)
-    indptr = h_sparsegroup.create_dataset('indptr', data=py_obj.indptr, **kwargs)
-    shape = h_sparsegroup.create_dataset('shape', data=py_obj.shape, **kwargs)
-
-    if isinstance(py_obj, type(sparse.csr_matrix([0]))):
-        type_str = 'csr'
-    elif isinstance(py_obj, type(sparse.csc_matrix([0]))):
-        type_str = 'csc'
-    elif isinstance(py_obj, type(sparse.bsr_matrix([0]))):
-        type_str = 'bsr'
-
-    if six.PY2:
-        h_sparsegroup.attrs["type"] = [b'%s_matrix' % type_str]
-        data.attrs["type"]          = [b"%s_matrix_data" % type_str]
-        indices.attrs["type"]       = [b"%s_matrix_indices" % type_str]
-        indptr.attrs["type"]        = [b"%s_matrix_indptr" % type_str]
-        shape.attrs["type"]         = [b"%s_matrix_shape" % type_str]
-    else:
-        h_sparsegroup.attrs["type"] = [bytes(str('%s_matrix' % type_str), 'ascii')]
-        data.attrs["type"]          = [bytes(str("%s_matrix_data" % type_str), 'ascii')]
-        indices.attrs["type"]       = [bytes(str("%s_matrix_indices" % type_str), 'ascii')]
-        indptr.attrs["type"]        = [bytes(str("%s_matrix_indptr" % type_str), 'ascii')]
-        shape.attrs["type"]         = [bytes(str("%s_matrix_shape" % type_str), 'ascii')]
-
-def load_sparse_matrix_data(h_node):
-
-    py_type, data = get_type_and_data(h_node)
-    h_root  = h_node.parent
-    indices = h_root.get('indices')[:]
-    indptr  = h_root.get('indptr')[:]
-    shape   = h_root.get('shape')[:]
-
-    if py_type == b'csc_matrix_data':
-        smat = sparse.csc_matrix((data, indices, indptr), dtype=data.dtype, shape=shape)
-    elif py_type == b'csr_matrix_data':
-        smat = sparse.csr_matrix((data, indices, indptr), dtype=data.dtype, shape=shape)
-    elif py_type == b'bsr_matrix_data':
-        smat = sparse.bsr_matrix((data, indices, indptr), dtype=data.dtype, shape=shape)
-    return smat
-
-
-
-
-
-class_register = [
-    [scipy.sparse.csr_matrix, b'csr_matrix_data', create_sparse_dataset, load_sparse_matrix_data, False, check_is_scipy_sparse_array],
-    [scipy.sparse.csc_matrix, b'csc_matrix_data', create_sparse_dataset, load_sparse_matrix_data, False, check_is_scipy_sparse_array],
-    [scipy.sparse.bsr_matrix, b'bsr_matrix_data', create_sparse_dataset, load_sparse_matrix_data, False, check_is_scipy_sparse_array],
-]
-
-exclude_register = []
-
-# Need to ignore things like csc_matrix_indices which are loaded automatically
-for mat_type in ('csr', 'csc', 'bsr'):
-    for attrib in ('indices', 'indptr', 'shape'):
-        hkl_key = "%s_matrix_%s" % (mat_type, attrib)
-        if not six.PY2:
-            hkl_key = hkl_key.encode('ascii')
-        exclude_register.append(hkl_key)
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/lookup.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/lookup.py
deleted file mode 100755
index 99d13df9315be642540e46efc44d8e3d293de708..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/hickle/lookup.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""
-#lookup.py
-
-This file contains all the mappings between hickle/HDF5 metadata and python types.
-There are four dictionaries and one set that are populated here:
-
-1) types_dict
-types_dict: mapping between python types and dataset creation functions, e.g.
-    types_dict = {
-        list:        create_listlike_dataset,
-        int:         create_python_dtype_dataset,
-        np.ndarray:  create_np_array_dataset
-        }
-
-2) hkl_types_dict
-hkl_types_dict: mapping between hickle metadata and dataset loading functions, e.g.
-    hkl_types_dict = {
-        "<type 'list'>"  : load_list_dataset,
-        "<type 'tuple'>" : load_tuple_dataset
-        }
-
-3) container_types_dict
-container_types_dict: mapping required to convert the PyContainer object in hickle.py
-                      back into the required native type. PyContainer is required as
-                      some iterable types are immutable (do not have an append() function).
-                      Here is an example:
-    container_types_dict = {
-        "<type 'list'>": list,
-        "<type 'tuple'>": tuple
-        }
-
-4) container_key_types_dict
-container_key_types_dict: mapping specifically for converting hickled dict data back into
-                          a dictionary with the same key type. While python dictionary keys
-                          can be any hashable object, in HDF5 a unicode/string is required
-                          for a dataset name. Example:
-    container_key_types_dict = {
-        "<type 'str'>": str,
-        "<type 'unicode'>": unicode
-        }
-
-5) types_not_to_sort
-type_not_to_sort is a list of hickle type attributes that may be hierarchical,
-but don't require sorting by integer index.
-
-## Extending hickle to add support for other classes and types
-
-The process to add new load/dump capabilities is as follows:
-
-1) Create a file called load_[newstuff].py in loaders/
-2) In the load_[newstuff].py file, define your create_dataset and load_dataset functions,
-   along with all required mapping dictionaries.
-3) Add an import call here, and populate the lookup dictionaries with update() calls:
-    # Add loaders for [newstuff]
-    try:
-        from .loaders.load_[newstuff[ import types_dict as ns_types_dict
-        from .loaders.load_[newstuff[ import hkl_types_dict as ns_hkl_types_dict
-        types_dict.update(ns_types_dict)
-        hkl_types_dict.update(ns_hkl_types_dict)
-        ... (Add container_types_dict etc if required)
-    except ImportError:
-        raise
-"""
-
-import six
-from ast import literal_eval
-
-def return_first(x):
-    """ Return first element of a list """
-    return x[0]
-
-def load_nothing(h_hode):
-    pass
-
-types_dict = {}
-
-hkl_types_dict = {}
-
-types_not_to_sort = [b'dict', b'csr_matrix', b'csc_matrix', b'bsr_matrix']
-
-container_types_dict = {
-    b"<type 'list'>": list,
-    b"<type 'tuple'>": tuple,
-    b"<type 'set'>": set,
-    b"<class 'list'>": list,
-    b"<class 'tuple'>": tuple,
-    b"<class 'set'>": set,
-    b"csr_matrix":  return_first,
-    b"csc_matrix": return_first,
-    b"bsr_matrix": return_first
-    }
-
-# Technically, any hashable object can be used, for now sticking with built-in types
-container_key_types_dict = {
-    b"<type 'str'>": literal_eval,
-    b"<type 'float'>": float,
-    b"<type 'bool'>": bool,
-    b"<type 'int'>": int,
-    b"<type 'complex'>": complex,
-    b"<type 'tuple'>": literal_eval,
-    b"<class 'str'>": literal_eval,
-    b"<class 'float'>": float,
-    b"<class 'bool'>": bool,
-    b"<class 'int'>": int,
-    b"<class 'complex'>": complex,
-    b"<class 'tuple'>": literal_eval
-    }
-
-if six.PY2:
-    container_key_types_dict[b"<type 'unicode'>"] = literal_eval
-    container_key_types_dict[b"<type 'long'>"] = long
-
-# Add loaders for built-in python types
-if six.PY2:
-    from .loaders.load_python import types_dict as py_types_dict
-    from .loaders.load_python import hkl_types_dict as py_hkl_types_dict
-else:
-    from .loaders.load_python3 import types_dict as py_types_dict
-    from .loaders.load_python3 import hkl_types_dict as py_hkl_types_dict
-
-types_dict.update(py_types_dict)
-hkl_types_dict.update(py_hkl_types_dict)
-
-# Add loaders for numpy types
-from .loaders.load_numpy import  types_dict as np_types_dict
-from .loaders.load_numpy import  hkl_types_dict as np_hkl_types_dict
-from .loaders.load_numpy import check_is_numpy_array
-types_dict.update(np_types_dict)
-hkl_types_dict.update(np_hkl_types_dict)
-
-#######################
-## ND-ARRAY checking ##
-#######################
-
-ndarray_like_check_fns = [
-    check_is_numpy_array
-]
-
-def check_is_ndarray_like(py_obj):
-    is_ndarray_like = False
-    for ii, check_fn in enumerate(ndarray_like_check_fns):
-        is_ndarray_like = check_fn(py_obj)
-        if is_ndarray_like:
-            break
-    return is_ndarray_like
-
-
-
-
-#######################
-## loading optional  ##
-#######################
-
-def register_class(myclass_type, hkl_str, dump_function, load_function,
-                   to_sort=True, ndarray_check_fn=None):
-    """ Register a new hickle class.
-
-    Args:
-        myclass_type type(class): type of class
-        dump_function (function def): function to write data to HDF5
-        load_function (function def): function to load data from HDF5
-        is_iterable (bool): Is the item iterable?
-        hkl_str (str): String to write to HDF5 file to describe class
-        to_sort (bool): If the item is iterable, does it require sorting?
-        ndarray_check_fn (function def): function to use to check if
-
-    """
-    types_dict.update({myclass_type: dump_function})
-    hkl_types_dict.update({hkl_str: load_function})
-    if to_sort == False:
-        types_not_to_sort.append(hkl_str)
-    if ndarray_check_fn is not None:
-        ndarray_like_check_fns.append(ndarray_check_fn)
-
-def register_class_list(class_list):
-    """ Register multiple classes in a list
-
-    Args:
-        class_list (list): A list, where each item is an argument to
-                           the register_class() function.
-
-    Notes: This just runs the code:
-            for item in mylist:
-                register_class(*item)
-    """
-    for class_item in class_list:
-        register_class(*class_item)
-
-def register_class_exclude(hkl_str_to_ignore):
-    """ Tell loading funciton to ignore any HDF5 dataset with attribute 'type=XYZ'
-
-    Args:
-        hkl_str_to_ignore (str): attribute type=string to ignore and exclude from loading.
-    """
-    hkl_types_dict[hkl_str_to_ignore] = load_nothing
-
-def register_exclude_list(exclude_list):
-    """ Ignore HDF5 datasets with attribute type='XYZ' from loading
-
-    ArgsL
-        exclude_list (list): List of strings, which correspond to hdf5/hickle
-                             type= attributes not to load.
-    """
-    for hkl_str in exclude_list:
-        register_class_exclude(hkl_str)
-
-########################
-## Scipy sparse array ##
-########################
-
-try:
-    from .loaders.load_scipy import class_register, exclude_register
-    register_class_list(class_register)
-    register_exclude_list(exclude_register)
-except ImportError:
-    pass
-except NameError:
-    pass
-
-####################
-## Astropy  stuff ##
-####################
-
-try:
-    from .loaders.load_astropy import class_register
-    register_class_list(class_register)
-except ImportError:
-    pass
-
-##################
-## Pandas stuff ##
-##################
-
-try:
-    from .loaders.load_pandas import class_register
-    register_class_list(class_register)
-except ImportError:
-    pass
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__init__.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__init__.py
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/__init__.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/__init__.cpython-36.pyc
deleted file mode 100755
index 86d97d222a8780ca54c672085ff548dfa6a28be0..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/__init__.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_astropy.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_astropy.cpython-36.pyc
deleted file mode 100755
index 8c672a706793402cf52bedb977e67f6a6b91ef51..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_astropy.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_hickle.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_hickle.cpython-36.pyc
deleted file mode 100755
index 046d30d0527994b989f6c055be5a3cb573b6163f..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_hickle.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_hickle_helpers.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_hickle_helpers.cpython-36.pyc
deleted file mode 100755
index d10d047823c6177abba7330f72046787d86a5e46..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_hickle_helpers.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_legacy_load.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_legacy_load.cpython-36.pyc
deleted file mode 100755
index 387c9076b34bdbd560ab83a5798a241b2adc1ece..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_legacy_load.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_scipy.cpython-36.pyc b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_scipy.cpython-36.pyc
deleted file mode 100755
index 6f7dbb8741560fc88a3fb0bd2356c226bae2cd69..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/__pycache__/test_scipy.cpython-36.pyc and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_astropy.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_astropy.py
deleted file mode 100755
index 2086ec37456b2bbcde77fbed2d5370b67ee89381..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_astropy.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import hickle as hkl
-from astropy.units import Quantity
-from astropy.time import Time
-from astropy.coordinates import Angle, SkyCoord
-from astropy.constants import Constant, EMConstant, G
-from astropy.table import Table
-import numpy as np
-from py.path import local
-
-# Set the current working directory to the temporary directory
-local.get_temproot().chdir()
-
-def test_astropy_quantity():
-
-    for uu in ['m^3', 'm^3 / s', 'kg/pc']:
-        a = Quantity(7, unit=uu)
-
-        hkl.dump(a, "test_ap.h5")
-        b = hkl.load("test_ap.h5")
-
-        assert a == b
-        assert a.unit == b.unit
-
-        a *= a
-        hkl.dump(a, "test_ap.h5")
-        b = hkl.load("test_ap.h5")
-        assert a == b
-        assert a.unit == b.unit
-
-def TODO_test_astropy_constant():
-        hkl.dump(G, "test_ap.h5")
-        gg = hkl.load("test_ap.h5")
-
-        print(G)
-        print(gg)
-
-def test_astropy_table():
-    t = Table([[1, 2], [3, 4]], names=('a', 'b'), meta={'name': 'test_thing'})
-
-    hkl.dump({'a': t}, "test_ap.h5")
-    t2 = hkl.load("test_ap.h5")['a']
-
-    print(t)
-    print(t.meta)
-    print(t2)
-    print(t2.meta)
-
-    print(t.dtype, t2.dtype)
-    assert t.meta == t2.meta
-    assert t.dtype == t2.dtype
-
-    assert np.allclose(t['a'].astype('float32'), t2['a'].astype('float32'))
-    assert np.allclose(t['b'].astype('float32'), t2['b'].astype('float32'))
-
-def test_astropy_quantity_array():
-    a = Quantity([1,2,3], unit='m')
-
-    hkl.dump(a, "test_ap.h5")
-    b = hkl.load("test_ap.h5")
-
-    assert np.allclose(a.value, b.value)
-    assert a.unit == b.unit
-
-def test_astropy_time_array():
-    times = ['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00']
-    t1 = Time(times, format='isot', scale='utc')
-    hkl.dump(t1, "test_ap2.h5")
-    t2 = hkl.load("test_ap2.h5")
-
-    print(t1)
-    print(t2)
-    assert t1.value.shape == t2.value.shape
-    for ii in range(len(t1)):
-        assert t1.value[ii] == t2.value[ii]
-    assert t1.format == t2.format
-    assert t1.scale == t2.scale
-
-    times = [58264, 58265, 58266]
-    t1 = Time(times, format='mjd', scale='utc')
-    hkl.dump(t1, "test_ap2.h5")
-    t2 = hkl.load("test_ap2.h5")
-
-    print(t1)
-    print(t2)
-    assert t1.value.shape == t2.value.shape
-    assert np.allclose(t1.value, t2.value)
-    assert t1.format == t2.format
-    assert t1.scale == t2.scale
-
-def test_astropy_angle():
-    for uu in ['radian', 'degree']:
-        a = Angle(1.02, unit=uu)
-
-        hkl.dump(a, "test_ap.h5")
-        b = hkl.load("test_ap.h5")
-        assert a == b
-        assert a.unit == b.unit
-
-def test_astropy_angle_array():
-    a = Angle([1,2,3], unit='degree')
-
-    hkl.dump(a, "test_ap.h5")
-    b = hkl.load("test_ap.h5")
-
-    assert np.allclose(a.value, b.value)
-    assert a.unit == b.unit
-
-def test_astropy_skycoord():
-    ra = Angle(['1d20m', '1d21m'], unit='degree')
-    dec = Angle(['33d0m0s', '33d01m'], unit='degree')
-    radec = SkyCoord(ra, dec)
-    hkl.dump(radec, "test_ap.h5")
-    radec2 = hkl.load("test_ap.h5")
-    assert np.allclose(radec.ra.value, radec2.ra.value)
-    assert np.allclose(radec.dec.value, radec2.dec.value)
-
-    ra = Angle(['1d20m', '1d21m'], unit='hourangle')
-    dec = Angle(['33d0m0s', '33d01m'], unit='degree')
-    radec = SkyCoord(ra, dec)
-    hkl.dump(radec, "test_ap.h5")
-    radec2 = hkl.load("test_ap.h5")
-    assert np.allclose(radec.ra.value, radec2.ra.value)
-    assert np.allclose(radec.dec.value, radec2.dec.value)
-
-if __name__ == "__main__":
-    test_astropy_quantity()
-    #test_astropy_constant()
-    test_astropy_table()
-    test_astropy_quantity_array()
-    test_astropy_time_array()
-    test_astropy_angle()
-    test_astropy_angle_array()
-    test_astropy_skycoord()
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle.py
deleted file mode 100755
index 5491054239372a3b5d42c9e6f07b6fc5701ed933..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle.py
+++ /dev/null
@@ -1,826 +0,0 @@
-#! /usr/bin/env python
-# encoding: utf-8
-"""
-# test_hickle.py
-
-Unit tests for hickle module.
-
-"""
-
-import h5py
-import hashlib
-import numpy as np
-import os
-import six
-import time
-from pprint import pprint
-
-from py.path import local
-
-import hickle
-from hickle.hickle import *
-
-
-# Set current working directory to the temporary directory
-local.get_temproot().chdir()
-
-NESTED_DICT = {
-    "level1_1": {
-        "level2_1": [1, 2, 3],
-        "level2_2": [4, 5, 6]
-    },
-    "level1_2": {
-        "level2_1": [1, 2, 3],
-        "level2_2": [4, 5, 6]
-    },
-    "level1_3": {
-        "level2_1": {
-            "level3_1": [1, 2, 3],
-            "level3_2": [4, 5, 6]
-        },
-        "level2_2": [4, 5, 6]
-    }
-}
-
-DUMP_CACHE = []             # Used in test_track_times()
-
-
-def test_string():
-    """ Dumping and loading a string """
-    if six.PY2:
-        filename, mode = 'test.h5', 'w'
-        string_obj = "The quick brown fox jumps over the lazy dog"
-        dump(string_obj, filename, mode)
-        string_hkl = load(filename)
-        #print "Initial list:   %s"%list_obj
-        #print "Unhickled data: %s"%list_hkl
-        assert type(string_obj) == type(string_hkl) == str
-        assert string_obj == string_hkl
-    else:
-        pass
-
-
-def test_unicode():
-    """ Dumping and loading a unicode string """
-    if six.PY2:
-        filename, mode = 'test.h5', 'w'
-        u = unichr(233) + unichr(0x0bf2) + unichr(3972) + unichr(6000)
-        dump(u, filename, mode)
-        u_hkl = load(filename)
-
-        assert type(u) == type(u_hkl) == unicode
-        assert u == u_hkl
-        # For those interested, uncomment below to see what those codes are:
-        # for i, c in enumerate(u_hkl):
-        #     print i, '%04x' % ord(c), unicodedata.category(c),
-        #     print unicodedata.name(c)
-    else:
-        pass
-
-
-def test_unicode2():
-    if six.PY2:
-        a = u"unicode test"
-        dump(a, 'test.hkl', mode='w')
-
-        z = load('test.hkl')
-        assert a == z
-        assert type(a) == type(z) == unicode
-        pprint(z)
-    else:
-        pass
-
-def test_list():
-    """ Dumping and loading a list """
-    filename, mode = 'test_list.h5', 'w'
-    list_obj = [1, 2, 3, 4, 5]
-    dump(list_obj, filename, mode=mode)
-    list_hkl = load(filename)
-    #print(f'Initial list: {list_obj}')
-    #print(f'Unhickled data: {list_hkl}')
-    try:
-        assert type(list_obj) == type(list_hkl) == list
-        assert list_obj == list_hkl
-        import h5py
-        a = h5py.File(filename)
-        a.close()
-
-    except AssertionError:
-        print("ERR:", list_obj, list_hkl)
-        import h5py
-
-        raise()
-
-
-def test_set():
-    """ Dumping and loading a list """
-    filename, mode = 'test_set.h5', 'w'
-    list_obj = set([1, 0, 3, 4.5, 11.2])
-    dump(list_obj, filename, mode)
-    list_hkl = load(filename)
-    #print "Initial list:   %s"%list_obj
-    #print "Unhickled data: %s"%list_hkl
-    try:
-        assert type(list_obj) == type(list_hkl) == set
-        assert list_obj == list_hkl
-    except AssertionError:
-        print(type(list_obj))
-        print(type(list_hkl))
-        #os.remove(filename)
-        raise
-
-
-def test_numpy():
-    """ Dumping and loading numpy array """
-    filename, mode = 'test.h5', 'w'
-    dtypes = ['float32', 'float64', 'complex64', 'complex128']
-
-    for dt in dtypes:
-        array_obj = np.ones(8, dtype=dt)
-        dump(array_obj, filename, mode)
-        array_hkl = load(filename)
-    try:
-        assert array_hkl.dtype == array_obj.dtype
-        assert np.all((array_hkl, array_obj))
-    except AssertionError:
-        print(array_hkl)
-        print(array_obj)
-        raise
-
-
-def test_masked():
-    """ Test masked numpy array """
-    filename, mode = 'test.h5', 'w'
-    a = np.ma.array([1,2,3,4], dtype='float32', mask=[0,1,0,0])
-
-    dump(a, filename, mode)
-    a_hkl = load(filename)
-
-    try:
-        assert a_hkl.dtype == a.dtype
-        assert np.all((a_hkl, a))
-    except AssertionError:
-        print(a_hkl)
-        print(a)
-        raise
-
-
-def test_dict():
-    """ Test dictionary dumping and loading """
-    filename, mode = 'test.h5', 'w'
-
-    dd = {
-        'name'   : b'Danny',
-        'age'    : 28,
-        'height' : 6.1,
-        'dork'   : True,
-        'nums'   : [1, 2, 3],
-        'narr'   : np.array([1,2,3]),
-        #'unic'   : u'dan[at]thetelegraphic.com'
-    }
-
-
-    dump(dd, filename, mode)
-    dd_hkl = load(filename)
-
-    for k in dd.keys():
-        try:
-            assert k in dd_hkl.keys()
-
-            if type(dd[k]) is type(np.array([1])):
-                assert np.all((dd[k], dd_hkl[k]))
-            else:
-                #assert dd_hkl[k] == dd[k]
-                pass
-            assert type(dd_hkl[k]) == type(dd[k])
-        except AssertionError:
-            print(k)
-            print(dd_hkl[k])
-            print(dd[k])
-            print(type(dd_hkl[k]), type(dd[k]))
-            raise
-
-
-def test_empty_dict():
-    """ Test empty dictionary dumping and loading """
-    filename, mode = 'test.h5', 'w'
-
-    dump({}, filename, mode)
-    assert load(filename) == {}
-
-
-def test_compression():
-    """ Test compression on datasets"""
-
-    filename, mode = 'test.h5', 'w'
-    dtypes = ['int32', 'float32', 'float64', 'complex64', 'complex128']
-
-    comps = [None, 'gzip', 'lzf']
-
-    for dt in dtypes:
-        for cc in comps:
-            array_obj = np.ones(32768, dtype=dt)
-            dump(array_obj, filename, mode, compression=cc)
-            print(cc, os.path.getsize(filename))
-            array_hkl = load(filename)
-    try:
-        assert array_hkl.dtype == array_obj.dtype
-        assert np.all((array_hkl, array_obj))
-    except AssertionError:
-        print(array_hkl)
-        print(array_obj)
-        raise
-
-
-def test_dict_int_key():
-    """ Test for dictionaries with integer keys """
-    filename, mode = 'test.h5', 'w'
-
-    dd = {
-        0: "test",
-        1: "test2"
-    }
-
-    dump(dd, filename, mode)
-    dd_hkl = load(filename)
-
-
-def test_dict_nested():
-    """ Test for dictionaries with integer keys """
-    filename, mode = 'test.h5', 'w'
-
-    dd = NESTED_DICT
-
-    dump(dd, filename, mode)
-    dd_hkl = load(filename)
-
-    ll_hkl = dd_hkl["level1_3"]["level2_1"]["level3_1"]
-    ll     = dd["level1_3"]["level2_1"]["level3_1"]
-    assert ll == ll_hkl
-
-
-def test_masked_dict():
-    """ Test dictionaries with masked arrays """
-
-    filename, mode = 'test.h5', 'w'
-
-    dd = {
-        "data"  : np.ma.array([1,2,3], mask=[True, False, False]),
-        "data2" : np.array([1,2,3,4,5])
-    }
-
-    dump(dd, filename, mode)
-    dd_hkl = load(filename)
-
-    for k in dd.keys():
-        try:
-            assert k in dd_hkl.keys()
-            if type(dd[k]) is type(np.array([1])):
-                assert np.all((dd[k], dd_hkl[k]))
-            elif type(dd[k]) is type(np.ma.array([1])):
-                print(dd[k].data)
-                print(dd_hkl[k].data)
-                assert np.allclose(dd[k].data, dd_hkl[k].data)
-                assert np.allclose(dd[k].mask, dd_hkl[k].mask)
-
-            assert type(dd_hkl[k]) == type(dd[k])
-
-        except AssertionError:
-            print(k)
-            print(dd_hkl[k])
-            print(dd[k])
-            print(type(dd_hkl[k]), type(dd[k]))
-            raise
-
-
-def test_np_float():
-    """ Test for singular np dtypes """
-    filename, mode = 'np_float.h5', 'w'
-
-    dtype_list = (np.float16, np.float32, np.float64,
-                  np.complex64, np.complex128,
-                  np.int8, np.int16, np.int32, np.int64,
-                  np.uint8, np.uint16, np.uint32, np.uint64)
-
-    for dt in dtype_list:
-
-        dd = dt(1)
-        dump(dd, filename, mode)
-        dd_hkl = load(filename)
-        assert dd == dd_hkl
-        assert dd.dtype == dd_hkl.dtype
-
-    dd = {}
-    for dt in dtype_list:
-        dd[str(dt)] = dt(1.0)
-    dump(dd, filename, mode)
-    dd_hkl = load(filename)
-
-    print(dd)
-    for dt in dtype_list:
-        assert dd[str(dt)] == dd_hkl[str(dt)]
-
-
-def md5sum(filename, blocksize=65536):
-    """ Compute MD5 sum for a given file """
-    hash = hashlib.md5()
-
-    with open(filename, "r+b") as f:
-        for block in iter(lambda: f.read(blocksize), ""):
-            hash.update(block)
-    return hash.hexdigest()
-
-
-def caching_dump(obj, filename, *args, **kwargs):
-    """ Save arguments of all dump calls """
-    DUMP_CACHE.append((obj, filename, args, kwargs))
-    return hickle_dump(obj, filename, *args, **kwargs)
-
-
-def test_track_times():
-    """ Verify that track_times = False produces identical files """
-    hashes = []
-    for obj, filename, mode, kwargs in DUMP_CACHE:
-        if isinstance(filename, hickle.H5FileWrapper):
-            filename = str(filename.file_name)
-        kwargs['track_times'] = False
-        caching_dump(obj, filename, mode, **kwargs)
-        hashes.append(md5sum(filename))
-
-    time.sleep(1)
-
-    for hash1, (obj, filename, mode, kwargs) in zip(hashes, DUMP_CACHE):
-        if isinstance(filename, hickle.H5FileWrapper):
-            filename = str(filename.file_name)
-        caching_dump(obj, filename, mode, **kwargs)
-        hash2 = md5sum(filename)
-        print(hash1, hash2)
-        assert hash1 == hash2
-
-
-def test_comp_kwargs():
-    """ Test compression with some kwargs for shuffle and chunking """
-
-    filename, mode = 'test.h5', 'w'
-    dtypes = ['int32', 'float32', 'float64', 'complex64', 'complex128']
-
-    comps = [None, 'gzip', 'lzf']
-    chunks = [(100, 100), (250, 250)]
-    shuffles = [True, False]
-    scaleoffsets = [0, 1, 2]
-
-    for dt in dtypes:
-        for cc in comps:
-            for ch in chunks:
-                for sh in shuffles:
-                    for so in scaleoffsets:
-                        kwargs = {
-                            'compression' : cc,
-                            'dtype': dt,
-                            'chunks': ch,
-                            'shuffle': sh,
-                            'scaleoffset': so
-                        }
-                        #array_obj = np.random.random_integers(low=-8192, high=8192, size=(1000, 1000)).astype(dt)
-                        array_obj = NESTED_DICT
-                        dump(array_obj, filename, mode, compression=cc)
-                        print(kwargs, os.path.getsize(filename))
-                        array_hkl = load(filename)
-
-
-def test_list_numpy():
-    """ Test converting a list of numpy arrays """
-
-    filename, mode = 'test.h5', 'w'
-
-    a = np.ones(1024)
-    b = np.zeros(1000)
-    c = [a, b]
-
-    dump(c, filename, mode)
-    dd_hkl = load(filename)
-
-    print(dd_hkl)
-
-    assert isinstance(dd_hkl, list)
-    assert isinstance(dd_hkl[0], np.ndarray)
-
-
-def test_tuple_numpy():
-    """ Test converting a list of numpy arrays """
-
-    filename, mode = 'test.h5', 'w'
-
-    a = np.ones(1024)
-    b = np.zeros(1000)
-    c = (a, b, a)
-
-    dump(c, filename, mode)
-    dd_hkl = load(filename)
-
-    print(dd_hkl)
-
-    assert isinstance(dd_hkl, tuple)
-    assert isinstance(dd_hkl[0], np.ndarray)
-
-
-def test_none():
-    """ Test None type hickling """
-
-    filename, mode = 'test.h5', 'w'
-
-    a = None
-
-    dump(a, filename, mode)
-    dd_hkl = load(filename)
-    print(a)
-    print(dd_hkl)
-
-    assert isinstance(dd_hkl, type(None))
-
-
-def test_dict_none():
-    """ Test None type hickling """
-
-    filename, mode = 'test.h5', 'w'
-
-    a = {'a': 1, 'b' : None}
-
-    dump(a, filename, mode)
-    dd_hkl = load(filename)
-    print(a)
-    print(dd_hkl)
-
-    assert isinstance(a['b'], type(None))
-
-
-def test_file_open_close():
-    """ https://github.com/telegraphic/hickle/issues/20 """
-    import h5py
-    f = h5py.File('test.hdf', 'w')
-    a = np.arange(5)
-
-    dump(a, 'test.hkl')
-    dump(a, 'test.hkl')
-
-    dump(a, f, mode='w')
-    f.close()
-    try:
-        dump(a, f, mode='w')
-    except hickle.hickle.ClosedFileError:
-        print("Tests: Closed file exception caught")
-
-
-def test_list_order():
-    """ https://github.com/telegraphic/hickle/issues/26 """
-    d = [np.arange(n + 1) for n in range(20)]
-    hickle.dump(d, 'test.h5')
-    d_hkl = hickle.load('test.h5')
-
-    try:
-        for ii, xx in enumerate(d):
-            assert d[ii].shape == d_hkl[ii].shape
-        for ii, xx in enumerate(d):
-            assert np.allclose(d[ii], d_hkl[ii])
-    except AssertionError:
-        print(d[ii], d_hkl[ii])
-        raise
-
-
-def test_embedded_array():
-    """ See https://github.com/telegraphic/hickle/issues/24 """
-
-    d_orig = [[np.array([10., 20.]), np.array([10, 20, 30])], [np.array([10, 2]), np.array([1.])]]
-    hickle.dump(d_orig, 'test.h5')
-    d_hkl = hickle.load('test.h5')
-
-    for ii, xx in enumerate(d_orig):
-        for jj, yy in enumerate(xx):
-            assert np.allclose(d_orig[ii][jj], d_hkl[ii][jj])
-
-    print(d_hkl)
-    print(d_orig)
-
-
-################
-## NEW TESTS  ##
-################
-
-
-def generate_nested():
-    a = [1, 2, 3]
-    b = [a, a, a]
-    c = [a, b, 's']
-    d = [a, b, c, c, a]
-    e = [d, d, d, d, 1]
-    f = {'a' : a, 'b' : b, 'e' : e}
-    g = {'f' : f, 'a' : e, 'd': d}
-    h = {'h': g, 'g' : f}
-    z = [f, a, b, c, d, e, f, g, h, g, h]
-    a = np.array([1, 2, 3, 4])
-    b = set([1, 2, 3, 4, 5])
-    c = (1, 2, 3, 4, 5)
-    d = np.ma.array([1, 2, 3, 4, 5, 6, 7, 8])
-    z = {'a': a, 'b': b, 'c': c, 'd': d, 'z': z}
-    return z
-
-
-def test_is_iterable():
-    a = [1, 2, 3]
-    b = 1
-
-    assert check_is_iterable(a) == True
-    assert check_is_iterable(b) == False
-
-
-def test_check_iterable_item_type():
-
-    a = [1, 2, 3]
-    b = [a, a, a]
-    c = [a, b, 's']
-
-    type_a = check_iterable_item_type(a)
-    type_b = check_iterable_item_type(b)
-    type_c = check_iterable_item_type(c)
-
-    assert type_a is int
-    assert type_b is list
-    assert type_c == False
-
-
-def test_dump_nested():
-    """ Dump a complicated nested object to HDF5
-    """
-    z = generate_nested()
-    dump(z, 'test.hkl', mode='w')
-
-
-def test_with_dump():
-    lst = [1]
-    tpl = (1)
-    dct = {1: 1}
-    arr = np.array([1])
-
-    with h5py.File('test.hkl') as file:
-        dump(lst, file, path='/lst')
-        dump(tpl, file, path='/tpl')
-        dump(dct, file, path='/dct')
-        dump(arr, file, path='/arr')
-
-
-def test_with_load():
-    lst = [1]
-    tpl = (1)
-    dct = {1: 1}
-    arr = np.array([1])
-
-    with h5py.File('test.hkl') as file:
-        assert load(file, '/lst') == lst
-        assert load(file, '/tpl') == tpl
-        assert load(file, '/dct') == dct
-        assert load(file, '/arr') == arr
-
-
-def test_load():
-
-    a = set([1, 2, 3, 4])
-    b = set([5, 6, 7, 8])
-    c = set([9, 10, 11, 12])
-    z = (a, b, c)
-    z = [z, z]
-    z = (z, z, z, z, z)
-
-    print("Original:")
-    pprint(z)
-    dump(z, 'test.hkl', mode='w')
-
-    print("\nReconstructed:")
-    z = load('test.hkl')
-    pprint(z)
-
-
-def test_sort_keys():
-    keys = [b'data_0', b'data_1', b'data_2', b'data_3', b'data_10']
-    keys_sorted = [b'data_0', b'data_1', b'data_2', b'data_3', b'data_10']
-
-    print(keys)
-    print(keys_sorted)
-    assert sort_keys(keys) == keys_sorted
-
-
-def test_ndarray():
-
-    a = np.array([1,2,3])
-    b = np.array([2,3,4])
-    z = (a, b)
-
-    print("Original:")
-    pprint(z)
-    dump(z, 'test.hkl', mode='w')
-
-    print("\nReconstructed:")
-    z = load('test.hkl')
-    pprint(z)
-
-
-def test_ndarray_masked():
-
-    a = np.ma.array([1,2,3])
-    b = np.ma.array([2,3,4], mask=[True, False, True])
-    z = (a, b)
-
-    print("Original:")
-    pprint(z)
-    dump(z, 'test.hkl', mode='w')
-
-    print("\nReconstructed:")
-    z = load('test.hkl')
-    pprint(z)
-
-
-def test_simple_dict():
-    a = {'key1': 1, 'key2': 2}
-
-    dump(a, 'test.hkl')
-    z = load('test.hkl')
-
-    pprint(a)
-    pprint(z)
-
-
-def test_complex_dict():
-    a = {'akey': 1, 'akey2': 2}
-    if six.PY2:
-        # NO LONG TYPE IN PY3!
-        b = {'bkey': 2.0, 'bkey3': long(3.0)}
-    else:
-        b = a
-    c = {'ckey': "hello", "ckey2": "hi there"}
-    z = {'zkey1': a, 'zkey2': b, 'zkey3': c}
-
-    print("Original:")
-    pprint(z)
-    dump(z, 'test.hkl', mode='w')
-
-    print("\nReconstructed:")
-    z = load('test.hkl')
-    pprint(z)
-
-def test_multi_hickle():
-    a = {'a': 123, 'b': [1, 2, 4]}
-
-    if os.path.exists("test.hkl"):
-        os.remove("test.hkl")
-    dump(a, "test.hkl", path="/test", mode="w")
-    dump(a, "test.hkl", path="/test2", mode="r+")
-    dump(a, "test.hkl", path="/test3", mode="r+")
-    dump(a, "test.hkl", path="/test4", mode="r+")
-
-    a = load("test.hkl", path="/test")
-    b = load("test.hkl", path="/test2")
-    c = load("test.hkl", path="/test3")
-    d = load("test.hkl", path="/test4")
-
-def test_complex():
-    """ Test complex value dtype is handled correctly
-
-    https://github.com/telegraphic/hickle/issues/29 """
-
-    data = {"A":1.5, "B":1.5 + 1j, "C":np.linspace(0,1,4) + 2j}
-    dump(data, "test.hkl")
-    data2 = load("test.hkl")
-    for key in data.keys():
-        assert type(data[key]) == type(data2[key])
-
-def test_nonstring_keys():
-    """ Test that keys are reconstructed back to their original datatypes
-    https://github.com/telegraphic/hickle/issues/36
-    """
-    if six.PY2:
-        u = unichr(233) + unichr(0x0bf2) + unichr(3972) + unichr(6000)
-
-        data = {u'test': 123,
-                'def': 456,
-                'hik' : np.array([1,2,3]),
-                u: u,
-                0: 0,
-                True: 'hi',
-                1.1 : 'hey',
-                #2L : 'omg',
-                1j: 'complex_hashable',
-                (1, 2): 'boo',
-                ('A', 17.4, 42): [1, 7, 'A'],
-                (): '1313e was here',
-                '0': 0
-                }
-        #data = {'0': 123, 'def': 456}
-        print(data)
-        dump(data, "test.hkl")
-        data2 = load("test.hkl")
-        print(data2)
-
-        for key in data.keys():
-            assert key in data2.keys()
-
-        print(data2)
-    else:
-        pass
-
-def test_scalar_compression():
-    """ Test bug where compression causes a crash on scalar datasets
-
-    (Scalars are incompressible!)
-    https://github.com/telegraphic/hickle/issues/37
-    """
-    data = {'a' : 0, 'b' : np.float(2), 'c' : True}
-
-    dump(data, "test.hkl", compression='gzip')
-    data2 = load("test.hkl")
-
-    print(data2)
-    for key in data.keys():
-        assert type(data[key]) == type(data2[key])
-
-def test_bytes():
-    """ Dumping and loading a string. PYTHON3 ONLY """
-    if six.PY3:
-        filename, mode = 'test.h5', 'w'
-        string_obj = b"The quick brown fox jumps over the lazy dog"
-        dump(string_obj, filename, mode)
-        string_hkl = load(filename)
-        #print "Initial list:   %s"%list_obj
-        #print "Unhickled data: %s"%list_hkl
-        print(type(string_obj))
-        print(type(string_hkl))
-        assert type(string_obj) == type(string_hkl) == bytes
-        assert string_obj == string_hkl
-    else:
-        pass
-
-def test_np_scalar():
-    """ Numpy scalar datatype
-
-    https://github.com/telegraphic/hickle/issues/50
-    """
-
-    fid='test.h5py'
-    r0={'test':  np.float64(10.)}
-    s = dump(r0, fid)
-    r = load(fid)
-    print(r)
-    assert type(r0['test']) == type(r['test'])
-
-if __name__ == '__main__':
-    """ Some tests and examples """
-    test_sort_keys()
-
-    test_np_scalar()
-    test_scalar_compression()
-    test_complex()
-    test_file_open_close()
-    test_dict_none()
-    test_none()
-    test_masked_dict()
-    test_list()
-    test_set()
-    test_numpy()
-    test_dict()
-    test_empty_dict()
-    test_compression()
-    test_masked()
-    test_dict_nested()
-    test_comp_kwargs()
-    test_list_numpy()
-    test_tuple_numpy()
-    test_track_times()
-    test_list_order()
-    test_embedded_array()
-    test_np_float()
-
-    if six.PY2:
-        test_unicode()
-        test_unicode2()
-        test_string()
-        test_nonstring_keys()
-
-    if six.PY3:
-        test_bytes()
-
-
-    # NEW TESTS
-    test_is_iterable()
-    test_check_iterable_item_type()
-    test_dump_nested()
-    test_with_dump()
-    test_with_load()
-    test_load()
-    test_sort_keys()
-    test_ndarray()
-    test_ndarray_masked()
-    test_simple_dict()
-    test_complex_dict()
-    test_multi_hickle()
-    test_dict_int_key()
-
-    # Cleanup
-    print("ALL TESTS PASSED!")
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle_helpers.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle_helpers.py
deleted file mode 100755
index 253839e97c96e484b7a66ad9d174648d281d1c66..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_hickle_helpers.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#! /usr/bin/env python
-# encoding: utf-8
-"""
-# test_hickle_helpers.py
-
-Unit tests for hickle module -- helper functions.
-
-"""
-
-import numpy as np
-try:
-    import scipy
-    from scipy import sparse
-    _has_scipy = True
-except ImportError:
-    _has_scipy = False
-
-from hickle.helpers import check_is_hashable, check_is_iterable, check_iterable_item_type
-
-from hickle.loaders.load_numpy import check_is_numpy_array 
-if _has_scipy:
-    from hickle.loaders.load_scipy import check_is_scipy_sparse_array
-
-
-
-def test_check_is_iterable():
-    assert check_is_iterable([1,2,3]) is True
-    assert check_is_iterable(1) is False
-
-
-def test_check_is_hashable():
-    assert check_is_hashable(1) is True
-    assert check_is_hashable([1,2,3]) is False
-
-
-def test_check_iterable_item_type():
-    assert check_iterable_item_type([1,2,3]) is int
-    assert check_iterable_item_type([int(1), float(1)]) is False
-    assert check_iterable_item_type([]) is False
-
-
-def test_check_is_numpy_array():
-    assert check_is_numpy_array(np.array([1,2,3])) is True
-    assert check_is_numpy_array(np.ma.array([1,2,3])) is True
-    assert check_is_numpy_array([1,2]) is False
-
-
-def test_check_is_scipy_sparse_array():
-    t_csr = scipy.sparse.csr_matrix([0])
-    t_csc = scipy.sparse.csc_matrix([0])
-    t_bsr = scipy.sparse.bsr_matrix([0])
-    assert check_is_scipy_sparse_array(t_csr) is True
-    assert check_is_scipy_sparse_array(t_csc) is True
-    assert check_is_scipy_sparse_array(t_bsr) is True
-    assert check_is_scipy_sparse_array(np.array([1])) is False
-
-if __name__ == "__main__":
-    test_check_is_hashable()
-    test_check_is_iterable()
-    test_check_is_numpy_array()
-    test_check_iterable_item_type()
-    if _has_scipy:
-        test_check_is_scipy_sparse_array()
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_legacy_load.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_legacy_load.py
deleted file mode 100755
index e849bcf6594c7139357659f8cf0721ef777da3b0..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_legacy_load.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import glob
-import warnings
-import hickle as hkl
-import h5py
-import six
-
-def test_legacy_load():
-    if six.PY2:
-        filelist = sorted(glob.glob('legacy_hkls/*.hkl'))
-
-        # Make all warnings show
-        warnings.simplefilter("always")
-
-        for filename in filelist:
-            try:
-                print(filename)
-                a = hkl.load(filename)
-            except:
-                with h5py.File(filename) as a:
-                    print(a.attrs.items())
-                    print(a.items())
-                    for key, item in a.items():
-                        print(item.attrs.items())
-                    raise
-    else:
-        print("Legacy loading only works in Py2. Sorry.")
-        pass
-
-if __name__ == "__main__":
-    test_legacy_load()
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_scipy.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_scipy.py
deleted file mode 100755
index ab78311d3eb543f4d3515b6aef2eba4e5ea2a175..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/hickle-3.4.3-py3.6.egg/tests/test_scipy.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import numpy as np
-from scipy.sparse import csr_matrix, csc_matrix, bsr_matrix
-
-import hickle
-from hickle.loaders.load_scipy import check_is_scipy_sparse_array
-
-from py.path import local
-
-# Set the current working directory to the temporary directory
-local.get_temproot().chdir()
-
-
-def test_is_sparse():
-    sm0 = csr_matrix((3, 4), dtype=np.int8)
-    sm1 = csc_matrix((1, 2))
-
-    assert check_is_scipy_sparse_array(sm0)
-    assert check_is_scipy_sparse_array(sm1)
-
-
-def test_sparse_matrix():
-    sm0 = csr_matrix((3, 4), dtype=np.int8).toarray()
-
-    row = np.array([0, 0, 1, 2, 2, 2])
-    col = np.array([0, 2, 2, 0, 1, 2])
-    data = np.array([1, 2, 3, 4, 5, 6])
-    sm1 = csr_matrix((data, (row, col)), shape=(3, 3))
-    sm2 = csc_matrix((data, (row, col)), shape=(3, 3))
-
-    indptr = np.array([0, 2, 3, 6])
-    indices = np.array([0, 2, 2, 0, 1, 2])
-    data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
-    sm3 = bsr_matrix((data,indices, indptr), shape=(6, 6))
-
-    hickle.dump(sm1, 'test_sp.h5')
-    sm1_h = hickle.load('test_sp.h5')
-    hickle.dump(sm2, 'test_sp2.h5')
-    sm2_h = hickle.load('test_sp2.h5')
-    hickle.dump(sm3, 'test_sp3.h5')
-    sm3_h = hickle.load('test_sp3.h5')
-
-    assert isinstance(sm1_h, csr_matrix)
-    assert isinstance(sm2_h, csc_matrix)
-    assert isinstance(sm3_h, bsr_matrix)
-
-    assert np.allclose(sm1_h.data, sm1.data)
-    assert np.allclose(sm2_h.data, sm2.data)
-    assert np.allclose(sm3_h.data, sm3.data)
-
-    assert sm1_h. shape == sm1.shape
-    assert sm2_h. shape == sm2.shape
-    assert sm3_h. shape == sm3.shape
-
-
-if __name__ == "__main__":
-    test_sparse_matrix()
-    test_is_sparse()
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/site.py b/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/site.py
deleted file mode 100755
index 0d2d2ff8da3960ecdaa6591fcee836c186fb8c91..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/hickle/lib/python3.6/site-packages/site.py
+++ /dev/null
@@ -1,74 +0,0 @@
-def __boot():
-    import sys
-    import os
-    PYTHONPATH = os.environ.get('PYTHONPATH')
-    if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH):
-        PYTHONPATH = []
-    else:
-        PYTHONPATH = PYTHONPATH.split(os.pathsep)
-
-    pic = getattr(sys, 'path_importer_cache', {})
-    stdpath = sys.path[len(PYTHONPATH):]
-    mydir = os.path.dirname(__file__)
-
-    for item in stdpath:
-        if item == mydir or not item:
-            continue  # skip if current dir. on Windows, or my own directory
-        importer = pic.get(item)
-        if importer is not None:
-            loader = importer.find_module('site')
-            if loader is not None:
-                # This should actually reload the current module
-                loader.load_module('site')
-                break
-        else:
-            try:
-                import imp  # Avoid import loop in Python >= 3.3
-                stream, path, descr = imp.find_module('site', [item])
-            except ImportError:
-                continue
-            if stream is None:
-                continue
-            try:
-                # This should actually reload the current module
-                imp.load_module('site', stream, path, descr)
-            finally:
-                stream.close()
-            break
-    else:
-        raise ImportError("Couldn't find the real 'site' module")
-
-    known_paths = dict([(makepath(item)[1], 1) for item in sys.path])  # 2.2 comp
-
-    oldpos = getattr(sys, '__egginsert', 0)  # save old insertion position
-    sys.__egginsert = 0  # and reset the current one
-
-    for item in PYTHONPATH:
-        addsitedir(item)
-
-    sys.__egginsert += oldpos  # restore effective old position
-
-    d, nd = makepath(stdpath[0])
-    insert_at = None
-    new_path = []
-
-    for item in sys.path:
-        p, np = makepath(item)
-
-        if np == nd and insert_at is None:
-            # We've hit the first 'system' path entry, so added entries go here
-            insert_at = len(new_path)
-
-        if np in known_paths or insert_at is None:
-            new_path.append(item)
-        else:
-            # new path after the insert point, back-insert it
-            new_path.insert(insert_at, item)
-            insert_at += 1
-
-    sys.path[:] = new_path
-
-
-if __name__ == 'site':
-    __boot()
-    del __boot
diff --git a/workflow_parallel_frame_prediction/Training/horovodJob.sh b/workflow_parallel_frame_prediction/Training/horovodJob.sh
deleted file mode 100644
index 236a08d9913dadfee3c7b1a76b4256797ec11533..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/horovodJob.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-#SBATCH --account=deepacf 
-# budget account where contingent is taken from# TASKS = NODES * GPUS_PER_NODE
-#SBATCH --nodes=3
-#SBATCH --ntasks-per-node=4
-#SBATCH --ntasks=12
-# can be omitted if --nodes and --ntasks-per-node
-# are given
-# SBATCH --cpus-per-task=1
-# for OpenMP/hybrid jobs only
-#SBATCH --output=horovod-%j.out
-# if keyword omitted: Default is slurm-%j.out in
-# the submission directory (%j is replaced by
-# the job ID).
-#SBATCH --error=horovod-%j.err
-# if keyword omitted: Default is slurm-%j.out in
-# the submission directory.
-#SBATCH --time=20:00:00
-#SBATCH --gres=gpu:4
-#SBATCH --partition=gpus
-#SBATCH --mail-user=b.gong@fz-juelich.de
-#SBATCH --mail-type=ALL
-
-#create a folder to save the output
-jutil env activate -p deepacf
-module --force  purge
-module load Stages/Devel-2019a
-module load GCC/8.3.0
-module load MVAPICH2/2.3.2-GDR
-module load Stages/2019a
-module load Horovod/0.16.2-GPU-Python-3.6.8
-module load Keras/2.2.4-GPU-Python-3.6.8
-
-#module load ParaStationMPI/5.2.2-1
-#module load h5py/2.9.0-Python-3.6.8
-# *** start of job script ***:
-# Note: The current working directory at this point is
-# the directory where sbatch was executed.
-# export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
-# *** start of job script ***
-# Note: The current working directory at this point is
-# the directory where sbatch was executed.
-# export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
-srun --cpu_bind=none python3.6 kitti_train_horovod.py
diff --git a/workflow_parallel_frame_prediction/Training/keras_utils.py b/workflow_parallel_frame_prediction/Training/keras_utils.py
deleted file mode 100755
index ededcc74fed982654d82cfb610b79224f1e08554..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/keras_utils.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import os
-import numpy as np
-
-from keras import backend as K
-from keras.legacy.interfaces import generate_legacy_interface, recurrent_args_preprocessor
-from keras.models import model_from_json
-
-legacy_prednet_support = generate_legacy_interface(
-    allowed_positional_args=['stack_sizes', 'R_stack_sizes',
-                            'A_filt_sizes', 'Ahat_filt_sizes', 'R_filt_sizes'],
-    conversions=[('dim_ordering', 'data_format'),
-                 ('consume_less', 'implementation')],
-    value_conversions={'dim_ordering': {'tf': 'channels_last',
-                                        'th': 'channels_first',
-                                        'default': None},
-                        'consume_less': {'cpu': 0,
-                                        'mem': 1,
-                                        'gpu': 2}},
-    preprocessor=recurrent_args_preprocessor)
-
-# Convert old Keras (1.2) json models and weights to Keras 2.0
-def convert_model_to_keras2(old_json_file, old_weights_file, new_json_file, new_weights_file):
-    from prednet import PredNet
-    # If using tensorflow, it doesn't allow you to load the old weights.
-    if K.backend() != 'theano':
-        os.environ['KERAS_BACKEND'] = backend
-        reload(K)
-
-    f = open(old_json_file, 'r')
-    json_string = f.read()
-    f.close()
-    model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})
-    model.load_weights(old_weights_file)
-
-    weights = model.layers[1].get_weights()
-    if weights[0].shape[0] == model.layers[1].stack_sizes[1]:
-        for i, w in enumerate(weights):
-            if w.ndim == 4:
-                weights[i] = np.transpose(w, (2, 3, 1, 0))
-        model.set_weights(weights)
-
-    model.save_weights(new_weights_file)
-    json_string = model.to_json()
-    with open(new_json_file, "w") as f:
-        f.write(json_string)
-
-
-if __name__ == '__main__':
-    old_dir = './model_data/'
-    new_dir = './model_data_keras2/'
-    if not os.path.exists(new_dir):
-        os.mkdir(new_dir)
-    for w_tag in ['', '-Lall', '-extrapfinetuned']:
-        m_tag = '' if w_tag == '-Lall' else w_tag
-        convert_model_to_keras2(old_dir + 'prednet_kitti_model' + m_tag + '.json',
-                                old_dir + 'prednet_kitti_weights' + w_tag + '.hdf5',
-                                new_dir + 'prednet_kitti_model' + m_tag + '.json',
-                                new_dir + 'prednet_kitti_weights' + w_tag + '.hdf5')
diff --git a/workflow_parallel_frame_prediction/Training/kitti_settings.py b/workflow_parallel_frame_prediction/Training/kitti_settings.py
deleted file mode 100755
index 547671117e573c9e41096ebc4775b925bb99a87f..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/kitti_settings.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Where KITTI data will be saved if you run process_kitti.py
-# If you directly download the processed data, change to the path of the data.
-## Changed logic: Now this is the path where the processed data lies: X_train,val,test
-#DATA_DIR = './kitti_data/'
-#data directory for training data 2015 and 2016
-#DATA_DIR = '/p/project/cjjsc42/severin/try3'
-#data directory for moving objects:
-#DATA_DIR = '/p/home/jusers/hussmann1/jureca/movingObjects/se_nw'
-#data directory for featuretesting:
-##DATA_DIR = './testTry2'
-DATA_DIR = '/p/scratch/cjjsc42/bing/PredNet/processData/splits'
-# Where model weights and config will be saved if you run kitti_train.py
-# If you directly download the trained weights, change to appropriate path.
-WEIGHTS_DIR = './model_data_keras2/'
-#WEIGHTS_DIR = '/p/project/cjjsc42/bing/ml-severin/model_data_keras2'
-
-# Where results (prediction plots and evaluation file) will be saved.
-#RESULTS_SAVE_DIR = './kitti_results'
-
diff --git a/workflow_parallel_frame_prediction/Training/kitti_train_horovod.py b/workflow_parallel_frame_prediction/Training/kitti_train_horovod.py
deleted file mode 100755
index 72539927f6a0dd9c88df47f4738b11c7124c39bc..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/kitti_train_horovod.py
+++ /dev/null
@@ -1,119 +0,0 @@
-'''
-Train PredNet on KITTI sequences. (Geiger et al. 2013, http://www.cvlibs.net/datasets/kitti/)
-'''
-
-import os
-import numpy as np
-np.random.seed(123)
-#from six.moves import cPickle
-
-from keras import backend as K
-from keras.models import Model
-from keras.layers import Input, Dense, Flatten
-from keras.layers import LSTM
-from keras.layers import TimeDistributed
-from keras.callbacks import LearningRateScheduler, ModelCheckpoint
-from keras.optimizers import Adam
-from prednet import PredNet
-from data_utils import SequenceGenerator
-from kitti_settings import *
-import datetime
-import horovod.keras as hvd
-import keras
-import tensorflow as tf
-#Horovod:initialize horovod
-hvd.init()
-#Horovod: pin GPU to be used for process local rank (one GPU per process)
-config = tf.ConfigProto()
-config.gpu_options.allow_growth = True
-config.gpu_options.visible_device_list = str(hvd.local_rank())
-K.set_session(tf.Session(config=config))
-
-print("horovode size", hvd.size())
-
-save_model = True# if weights will be saved
-weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5')  # where weights will be saved
-json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')
-if not os.path.exists(WEIGHTS_DIR): os.mkdir(WEIGHTS_DIR)
-# Data files
-train_file = os.path.join(DATA_DIR, 'X_train.hkl')
-train_sources = os.path.join(DATA_DIR, 'sources_train.hkl')
-val_file = os.path.join(DATA_DIR, 'X_val.hkl')
-val_sources = os.path.join(DATA_DIR, 'sources_val.hkl')
-
-# Training parameters
-nb_epoch = 10 #original: 150; for all tests so far set to 100; t2onlyMax: 150
-batch_size = 15
-samples_per_epoch = 500 #original: 500; for all tests so far set to 300; t2onlyMax: 500
-N_seq_val = 80  # number of sequences to use for validation ##original: 100; for all tests so far set to 65; t2onlyMax: 80
-
-# Model parameters
-n_channels, im_height, im_width = (3, 128, 160) 
-input_shape = (n_channels, im_height, im_width) if K.image_data_format() == 'channels_first' else (im_height, im_width, n_channels)
-stack_sizes = (n_channels, 48, 96, 192)
-R_stack_sizes = stack_sizes
-A_filt_sizes = (3, 3, 3)
-Ahat_filt_sizes = (3, 3, 3, 3)
-R_filt_sizes = (3, 3, 3, 3)
-layer_loss_weights = np.array([1., 0., 0., 0.])  # weighting for each layer in final loss; "L_0" model:  [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
-layer_loss_weights = np.expand_dims(layer_loss_weights, 1)
-nt = 10  # number of timesteps used for sequences in training
-time_loss_weights = 1./ (nt - 1) * np.ones((nt,1))  # equally weight all timesteps except the first
-time_loss_weights[0] = 0
-
-prednet = PredNet(stack_sizes, R_stack_sizes,
-			  A_filt_sizes, Ahat_filt_sizes, R_filt_sizes,
-			  output_mode='error', return_sequences=True)
-inputs = Input(shape=(nt,) + input_shape)
-errors = prednet(inputs)  # errors will be (batch_size, nt, nb_layers)
-errors_by_time = TimeDistributed(Dense(1, trainable=False), weights=[layer_loss_weights, np.zeros(1)], trainable=False)(errors)  # calculate weighted error by layer
-errors_by_time = Flatten()(errors_by_time)  # will be (batch_size, nt)
-final_errors = Dense(1, weights=[time_loss_weights, np.zeros(1)], trainable=False)(errors_by_time)  # weight errors by time
-model = Model(inputs=inputs, outputs=final_errors)
-#Horovod:ajust learning rate based on number of GPUs
-opt = keras.optimizers.Adam(0.01 * hvd.size())
-#Horovod: add horovod DistributedOptimizer
-opt = hvd.DistributedOptimizer(opt)
-#Horovode: use hvd.DistributedOptimizer to compute gradients
-model.compile(loss="mean_absolute_error", optimizer=opt, metrics=["accuracy"])
-
-
-
-train_generator = SequenceGenerator(train_file, train_sources, nt, batch_size=batch_size, shuffle=True)
-val_generator = SequenceGenerator(val_file, val_sources, nt, batch_size=batch_size, N_seq=N_seq_val)
-
-#lr_schedule = lambda epoch: 0.001 if epoch < 75 else 0.0001    # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
-callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
-         #hvd.callbacks.MetricAverageCallback(),
-         hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5,verbose=1)
-         ]
-#bing: original save_model is True
-if hvd.rank() == 0:
-    if save_model:
-        print("===========The model will be saved =======")
-        callbacks.append(ModelCheckpoint(filepath=weights_file, monitor='val_loss', save_best_only=True))
-
-#the start training time
-a = datetime.datetime.now()
-
-history = model.fit_generator(generator=train_generator,steps_per_epoch=samples_per_epoch/(batch_size*hvd.size()), epochs=nb_epoch, callbacks=callbacks,
-                validation_data=val_generator, validation_steps=N_seq_val/(batch_size*hvd.size()))
-
-
-b = datetime.datetime.now()
-
-#the training time
-t = b-a
-
-stats = list(train_generator.X.shape)
-stats.append(t)
-
-print("training time is",stats)
-
-if save_model:
-    json_string = model.to_json()
-    with open(json_file, "w") as f:
-        f.write(json_string)
-
-
-
diff --git a/workflow_parallel_frame_prediction/Training/minMaxExtractor.py b/workflow_parallel_frame_prediction/Training/minMaxExtractor.py
deleted file mode 100644
index 5c1216208fae071c9dcf62e2152722dd626cd15e..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/minMaxExtractor.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import hickle as hkl
-import numpy as np
-import matplotlib.pyplot as plt
-
-#x_train = hkl.load('/Users/Severin/Desktop/X_train.hkl') #load X_train produces on jureca
-x_train = hkl.load('/p/project/cjjsc42/severin/try3/X_train.hkl') #load X_train produces on jureca
-print('Shape of X:')
-print(x_train.shape)
-print('')
-
-#Print example
-#t2_cutout = x_train[100,:,:,0]
-#printt2cutout = plt.pcolormesh(t2_cutout[::-1,:], shading='bottom', cmap=plt.cm.jet)
-#plt.savefig('t2_cutout')
-#Extract Max min values:
-maxT2 = np.amax(x_train[:,:,:,0]) # numpy.amax() returns the maximum of an array or maximum along an axis.
-print('maxT2: ' + str(maxT2))
-minT2 = np.amin(x_train[:,:,:,0])
-print('minT2: ' + str(minT2))
-meanT2 = np.mean(x_train[:,:,:,0])
-print('meanT2: ' + str(meanT2))
-stdT2 = np.std(x_train[:,:,:,0])
-print('stdT2: ' + str(stdT2))
-highCutT2 = meanT2 + 3 * stdT2
-print('highCutT2: ' + str(highCutT2))
-lowCutT2 = meanT2 - 3 * stdT2
-print('lowCutT2: ' + str(lowCutT2))
-print('')
-
-maxGP = np.amax(x_train[:,:,:,1])
-print('maxGP: ' + str(maxGP))
-minGP = np.amin(x_train[:,:,:,1])
-print('minGP: ' + str(minGP))
-meanGP = np.mean(x_train[:,:,:,1])
-print('meanGP: ' + str(meanGP))
-stdGP = np.std(x_train[:,:,:,1])
-print('stdGP: ' + str(stdGP))
-highCutGP = meanGP + 3 * stdGP
-print('highCutGP: ' + str(highCutGP))
-lowCutGP = meanGP - 3 * stdGP
-print('lowCutGP: ' + str(lowCutGP))
-print('')
-
-maxGPH = np.amax(x_train[:,:,:,2])
-print('maxGPH: ' + str(maxGPH))
-minGPH = np.amin(x_train[:,:,:,2])
-print('minGPH: ' + str(minGPH))
-meanGPH = np.mean(x_train[:,:,:,2])
-print('meanGP: ' + str(meanGPH))
-stdGPH = np.std(x_train[:,:,:,2])
-print('stdGPH: ' + str(stdGPH))
-highCutGPH = meanGPH + 3 * stdGPH
-print('highCutGPH: ' + str(highCutGPH))
-lowCutGPH = meanGPH - 3 * stdGPH
-print('lowCutGPH: ' + str(lowCutGPH))
-print('')
-
-# Formel zum normalisieren: z = (x-min(x))/(max(x)-min(x))
-#x_trainNormalized2 = np.zeros(shape=x_train.shape)
-#print('Empty shape:')
-#print(x_trainNormalized2.shape)
-#x_trainNormalized2[:,:,:,0] = (x_train[:,:,:,0]-minT2)/(maxT2-minT2)
-#x_trainNormalized2[:,:,:,1] = (x_train[:,:,:,1]-minGP)/(maxGP-minGP)
-#x_trainNormalized2[:,:,:,2] = (x_train[:,:,:,2]-minGPH)/(maxGPH-minGPH)
-
-#print('MaxMin values of normalized dataset:')
-#print('T2:')
-#print(np.amax(x_trainNormalized2[:,:,:,0]))
-#print(np.amin(x_trainNormalized2[:,:,:,0]))
-#print('GP:')
-#print(np.amax(x_trainNormalized2[:,:,:,1]))
-#print(np.amin(x_trainNormalized2[:,:,:,1]))
-#print('GPH:')
-#print(np.amax(x_trainNormalized2[:,:,:,2]))
-#print(np.amin(x_trainNormalized2[:,:,:,2]))
-#print(x_trainNormalized2)
\ No newline at end of file
diff --git a/workflow_parallel_frame_prediction/Training/prednet.py b/workflow_parallel_frame_prediction/Training/prednet.py
deleted file mode 100755
index b5a0208ae137666c9bc284b21d6affe04d721053..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/prednet.py
+++ /dev/null
@@ -1,311 +0,0 @@
-import numpy as np
-
-from keras import backend as K
-from keras import activations
-from keras.layers import Recurrent
-from keras.layers import Conv2D, UpSampling2D, MaxPooling2D
-from keras.engine import InputSpec
-from keras_utils import legacy_prednet_support
-
-class PredNet(Recurrent):
-    '''PredNet architecture - Lotter 2016.
-        Stacked convolutional LSTM inspired by predictive coding principles.
-
-    # Arguments
-        stack_sizes: number of channels in targets (A) and predictions (Ahat) in each layer of the architecture.
-            Length is the number of layers in the architecture.
-            First element is the number of channels in the input.
-            Ex. (3, 16, 32) would correspond to a 3 layer architecture that takes in RGB images and has 16 and 32
-                channels in the second and third layers, respectively.
-        R_stack_sizes: number of channels in the representation (R) modules.
-            Length must equal length of stack_sizes, but the number of channels per layer can be different.
-        A_filt_sizes: filter sizes for the target (A) modules.
-            Has length of 1 - len(stack_sizes).
-            Ex. (3, 3) would mean that targets for layers 2 and 3 are computed by a 3x3 convolution of the errors (E)
-                from the layer below (followed by max-pooling)
-        Ahat_filt_sizes: filter sizes for the prediction (Ahat) modules.
-            Has length equal to length of stack_sizes.
-            Ex. (3, 3, 3) would mean that the predictions for each layer are computed by a 3x3 convolution of the
-                representation (R) modules at each layer.
-        R_filt_sizes: filter sizes for the representation (R) modules.
-            Has length equal to length of stack_sizes.
-            Corresponds to the filter sizes for all convolutions in the LSTM.
-        pixel_max: the maximum pixel value.
-            Used to clip the pixel-layer prediction.
-        error_activation: activation function for the error (E) units.
-        A_activation: activation function for the target (A) and prediction (A_hat) units.
-        LSTM_activation: activation function for the cell and hidden states of the LSTM.
-        LSTM_inner_activation: activation function for the gates in the LSTM.
-        output_mode: either 'error', 'prediction', 'all' or layer specification (ex. R2, see below).
-            Controls what is outputted by the PredNet.
-            If 'error', the mean response of the error (E) units of each layer will be outputted.
-                That is, the output shape will be (batch_size, nb_layers).
-            If 'prediction', the frame prediction will be outputted.
-            If 'all', the output will be the frame prediction concatenated with the mean layer errors.
-                The frame prediction is flattened before concatenation.
-                Nomenclature of 'all' is kept for backwards compatibility, but should not be confused with returning all of the layers of the model
-            For returning the features of a particular layer, output_mode should be of the form unit_type + layer_number.
-                For instance, to return the features of the LSTM "representational" units in the lowest layer, output_mode should be specificied as 'R0'.
-                The possible unit types are 'R', 'Ahat', 'A', and 'E' corresponding to the 'representation', 'prediction', 'target', and 'error' units respectively.
-        extrap_start_time: time step for which model will start extrapolating.
-            Starting at this time step, the prediction from the previous time step will be treated as the "actual"
-        data_format: 'channels_first' or 'channels_last'.
-            It defaults to the `image_data_format` value found in your
-            Keras config file at `~/.keras/keras.json`.
-
-    # References
-        - [Deep predictive coding networks for video prediction and unsupervised learning](https://arxiv.org/abs/1605.08104)
-        - [Long short-term memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
-        - [Convolutional LSTM network: a machine learning approach for precipitation nowcasting](http://arxiv.org/abs/1506.04214)
-        - [Predictive coding in the visual cortex: a functional interpretation of some extra-classical receptive-field effects](http://www.nature.com/neuro/journal/v2/n1/pdf/nn0199_79.pdf)
-    '''
-    @legacy_prednet_support
-    def __init__(self, stack_sizes, R_stack_sizes,
-                 A_filt_sizes, Ahat_filt_sizes, R_filt_sizes,
-                 pixel_max=1., error_activation='relu', A_activation='relu',
-                 LSTM_activation='tanh', LSTM_inner_activation='hard_sigmoid',
-                 output_mode='error', extrap_start_time=None,
-                 data_format=K.image_data_format(), **kwargs):
-        self.stack_sizes = stack_sizes
-        self.nb_layers = len(stack_sizes)
-        assert len(R_stack_sizes) == self.nb_layers, 'len(R_stack_sizes) must equal len(stack_sizes)'
-        self.R_stack_sizes = R_stack_sizes
-        assert len(A_filt_sizes) == (self.nb_layers - 1), 'len(A_filt_sizes) must equal len(stack_sizes) - 1'
-        self.A_filt_sizes = A_filt_sizes
-        assert len(Ahat_filt_sizes) == self.nb_layers, 'len(Ahat_filt_sizes) must equal len(stack_sizes)'
-        self.Ahat_filt_sizes = Ahat_filt_sizes
-        assert len(R_filt_sizes) == (self.nb_layers), 'len(R_filt_sizes) must equal len(stack_sizes)'
-        self.R_filt_sizes = R_filt_sizes
-
-        self.pixel_max = pixel_max
-        self.error_activation = activations.get(error_activation)
-        self.A_activation = activations.get(A_activation)
-        self.LSTM_activation = activations.get(LSTM_activation)
-        self.LSTM_inner_activation = activations.get(LSTM_inner_activation)
-
-        default_output_modes = ['prediction', 'error', 'all']
-        layer_output_modes = [layer + str(n) for n in range(self.nb_layers) for layer in ['R', 'E', 'A', 'Ahat']]
-        assert output_mode in default_output_modes + layer_output_modes, 'Invalid output_mode: ' + str(output_mode)
-        self.output_mode = output_mode
-        if self.output_mode in layer_output_modes:
-            self.output_layer_type = self.output_mode[:-1]
-            self.output_layer_num = int(self.output_mode[-1])
-        else:
-            self.output_layer_type = None
-            self.output_layer_num = None
-        self.extrap_start_time = extrap_start_time
-
-        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
-        self.data_format = data_format
-        self.channel_axis = -3 if data_format == 'channels_first' else -1
-        self.row_axis = -2 if data_format == 'channels_first' else -3
-        self.column_axis = -1 if data_format == 'channels_first' else -2
-        super(PredNet, self).__init__(**kwargs)
-        self.input_spec = [InputSpec(ndim=5)]
-
-    def compute_output_shape(self, input_shape):
-        if self.output_mode == 'prediction':
-            out_shape = input_shape[2:]
-        elif self.output_mode == 'error':
-            out_shape = (self.nb_layers,)
-        elif self.output_mode == 'all':
-            out_shape = (np.prod(input_shape[2:]) + self.nb_layers,)
-        else:
-            stack_str = 'R_stack_sizes' if self.output_layer_type == 'R' else 'stack_sizes'
-            stack_mult = 2 if self.output_layer_type == 'E' else 1
-            out_stack_size = stack_mult * getattr(self, stack_str)[self.output_layer_num]
-            out_nb_row = input_shape[self.row_axis] / 2**self.output_layer_num
-            out_nb_col = input_shape[self.column_axis] / 2**self.output_layer_num
-            if self.data_format == 'channels_first':
-                out_shape = (out_stack_size, out_nb_row, out_nb_col)
-            else:
-                out_shape = (out_nb_row, out_nb_col, out_stack_size)
-
-        if self.return_sequences:
-            return (input_shape[0], input_shape[1]) + out_shape
-        else:
-            return (input_shape[0],) + out_shape
-
-    def get_initial_state(self, x):
-        input_shape = self.input_spec[0].shape
-        init_nb_row = input_shape[self.row_axis]
-        init_nb_col = input_shape[self.column_axis]
-
-        base_initial_state = K.zeros_like(x)  # (samples, timesteps) + image_shape
-        non_channel_axis = -1 if self.data_format == 'channels_first' else -2
-        for _ in range(2):
-            base_initial_state = K.sum(base_initial_state, axis=non_channel_axis)
-        base_initial_state = K.sum(base_initial_state, axis=1)  # (samples, nb_channels)
-
-        initial_states = []
-        states_to_pass = ['r', 'c', 'e']
-        nlayers_to_pass = {u: self.nb_layers for u in states_to_pass}
-        if self.extrap_start_time is not None:
-           states_to_pass.append('ahat')  # pass prediction in states so can use as actual for t+1 when extrapolating
-           nlayers_to_pass['ahat'] = 1
-        for u in states_to_pass:
-            for l in range(nlayers_to_pass[u]):
-                ds_factor = 2 ** l
-                nb_row = init_nb_row // ds_factor
-                nb_col = init_nb_col // ds_factor
-                if u in ['r', 'c']:
-                    stack_size = self.R_stack_sizes[l]
-                elif u == 'e':
-                    stack_size = 2 * self.stack_sizes[l]
-                elif u == 'ahat':
-                    stack_size = self.stack_sizes[l]
-                output_size = stack_size * nb_row * nb_col  # flattened size
-
-                reducer = K.zeros((input_shape[self.channel_axis], output_size)) # (nb_channels, output_size)
-                initial_state = K.dot(base_initial_state, reducer) # (samples, output_size)
-                if self.data_format == 'channels_first':
-                    output_shp = (-1, stack_size, nb_row, nb_col)
-                else:
-                    output_shp = (-1, nb_row, nb_col, stack_size)
-                initial_state = K.reshape(initial_state, output_shp)
-                initial_states += [initial_state]
-
-        if K._BACKEND == 'theano':
-            from theano import tensor as T
-            # There is a known issue in the Theano scan op when dealing with inputs whose shape is 1 along a dimension.
-            # In our case, this is a problem when training on grayscale images, and the below line fixes it.
-            initial_states = [T.unbroadcast(init_state, 0, 1) for init_state in initial_states]
-
-        if self.extrap_start_time is not None:
-            initial_states += [K.variable(0, int if K.backend() != 'tensorflow' else 'int32')]  # the last state will correspond to the current timestep
-        return initial_states
-
-    def build(self, input_shape):
-        self.input_spec = [InputSpec(shape=input_shape)]
-        self.conv_layers = {c: [] for c in ['i', 'f', 'c', 'o', 'a', 'ahat']}
-
-        for l in range(self.nb_layers):
-            for c in ['i', 'f', 'c', 'o']:
-                act = self.LSTM_activation if c == 'c' else self.LSTM_inner_activation
-                self.conv_layers[c].append(Conv2D(self.R_stack_sizes[l], self.R_filt_sizes[l], padding='same', activation=act, data_format=self.data_format))
-
-            act = 'relu' if l == 0 else self.A_activation
-            self.conv_layers['ahat'].append(Conv2D(self.stack_sizes[l], self.Ahat_filt_sizes[l], padding='same', activation=act, data_format=self.data_format))
-
-            if l < self.nb_layers - 1:
-                self.conv_layers['a'].append(Conv2D(self.stack_sizes[l+1], self.A_filt_sizes[l], padding='same', activation=self.A_activation, data_format=self.data_format))
-
-        self.upsample = UpSampling2D(data_format=self.data_format)
-        self.pool = MaxPooling2D(data_format=self.data_format)
-
-        self.trainable_weights = []
-        nb_row, nb_col = (input_shape[-2], input_shape[-1]) if self.data_format == 'channels_first' else (input_shape[-3], input_shape[-2])
-        for c in sorted(self.conv_layers.keys()):
-            for l in range(len(self.conv_layers[c])):
-                ds_factor = 2 ** l
-                if c == 'ahat':
-                    nb_channels = self.R_stack_sizes[l]
-                elif c == 'a':
-                    nb_channels = 2 * self.stack_sizes[l]
-                else:
-                    nb_channels = self.stack_sizes[l] * 2 + self.R_stack_sizes[l]
-                    if l < self.nb_layers - 1:
-                        nb_channels += self.R_stack_sizes[l+1]
-                in_shape = (input_shape[0], nb_channels, nb_row // ds_factor, nb_col // ds_factor)
-                if self.data_format == 'channels_last': in_shape = (in_shape[0], in_shape[2], in_shape[3], in_shape[1])
-                with K.name_scope('layer_' + c + '_' + str(l)):
-                    self.conv_layers[c][l].build(in_shape)
-                self.trainable_weights += self.conv_layers[c][l].trainable_weights
-
-        self.states = [None] * self.nb_layers*3
-
-        if self.extrap_start_time is not None:
-            self.t_extrap = K.variable(self.extrap_start_time, int if K.backend() != 'tensorflow' else 'int32')
-            self.states += [None] * 2  # [previous frame prediction, timestep]
-
-    def step(self, a, states):
-        r_tm1 = states[:self.nb_layers]
-        c_tm1 = states[self.nb_layers:2*self.nb_layers]
-        e_tm1 = states[2*self.nb_layers:3*self.nb_layers]
-
-        if self.extrap_start_time is not None:
-            t = states[-1]
-            a = K.switch(t >= self.t_extrap, states[-2], a)  # if past self.extrap_start_time, the previous prediction will be treated as the actual
-
-        c = []
-        r = []
-        e = []
-
-        # Update R units starting from the top
-        for l in reversed(range(self.nb_layers)):
-            inputs = [r_tm1[l], e_tm1[l]]
-            if l < self.nb_layers - 1:
-                inputs.append(r_up)
-
-            inputs = K.concatenate(inputs, axis=self.channel_axis)
-            i = self.conv_layers['i'][l].call(inputs)
-            f = self.conv_layers['f'][l].call(inputs)
-            o = self.conv_layers['o'][l].call(inputs)
-            _c = f * c_tm1[l] + i * self.conv_layers['c'][l].call(inputs)
-            _r = o * self.LSTM_activation(_c)
-            c.insert(0, _c)
-            r.insert(0, _r)
-
-            if l > 0:
-                r_up = self.upsample.call(_r)
-
-        # Update feedforward path starting from the bottom
-        for l in range(self.nb_layers):
-            ahat = self.conv_layers['ahat'][l].call(r[l])
-            if l == 0:
-                ahat = K.minimum(ahat, self.pixel_max)
-                frame_prediction = ahat
-
-            # compute errors
-            e_up = self.error_activation(ahat - a)
-            e_down = self.error_activation(a - ahat)
-
-            e.append(K.concatenate((e_up, e_down), axis=self.channel_axis))
-
-            if self.output_layer_num == l:
-                if self.output_layer_type == 'A':
-                    output = a
-                elif self.output_layer_type == 'Ahat':
-                    output = ahat
-                elif self.output_layer_type == 'R':
-                    output = r[l]
-                elif self.output_layer_type == 'E':
-                    output = e[l]
-
-            if l < self.nb_layers - 1:
-                a = self.conv_layers['a'][l].call(e[l])
-                a = self.pool.call(a)  # target for next layer
-
-        if self.output_layer_type is None:
-            if self.output_mode == 'prediction':
-                output = frame_prediction
-            else:
-                for l in range(self.nb_layers):
-                    layer_error = K.mean(K.batch_flatten(e[l]), axis=-1, keepdims=True)
-                    all_error = layer_error if l == 0 else K.concatenate((all_error, layer_error), axis=-1)
-                if self.output_mode == 'error':
-                    output = all_error
-                else:
-                    output = K.concatenate((K.batch_flatten(frame_prediction), all_error), axis=-1)
-
-        states = r + c + e
-        if self.extrap_start_time is not None:
-            states += [frame_prediction, t + 1]
-        return output, states
-
-    def get_config(self):
-        config = {'stack_sizes': self.stack_sizes,
-                  'R_stack_sizes': self.R_stack_sizes,
-                  'A_filt_sizes': self.A_filt_sizes,
-                  'Ahat_filt_sizes': self.Ahat_filt_sizes,
-                  'R_filt_sizes': self.R_filt_sizes,
-                  'pixel_max': self.pixel_max,
-                  'error_activation': self.error_activation.__name__,
-                  'A_activation': self.A_activation.__name__,
-                  'LSTM_activation': self.LSTM_activation.__name__,
-                  'LSTM_inner_activation': self.LSTM_inner_activation.__name__,
-                  'data_format': self.data_format,
-                  'extrap_start_time': self.extrap_start_time,
-                  'output_mode': self.output_mode}
-        base_config = super(PredNet, self).get_config()
-        return dict(list(base_config.items()) + list(config.items()))
diff --git a/workflow_parallel_frame_prediction/Training/process_netCDF.py b/workflow_parallel_frame_prediction/Training/process_netCDF.py
deleted file mode 100644
index 2cb822ec6afa49a2be6c6411b64c20e5c0fe71ff..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/Training/process_netCDF.py
+++ /dev/null
@@ -1,169 +0,0 @@
-'''
-Code for processing staged ERA5 data 
-'''
-
-import os
-#import requests
-#from bs4 import BeautifulSoup
-#import urllib.request
-import numpy as np
-#from imageio import imread
-#from scipy.misc import imresize
-import hickle as hkl
-from netCDF4 import Dataset
-from kitti_settings import *
-
-#TODO: Not optimal with DATA_DIR and filingPath: In original process_kitti.py 
-# there's just DATA_DIR (which is specified in kitti_settings.py) and in there
-# the processed data will be stores. The raw data lies also in there in a subfolder
-
-#Path of ERA5 Data
-DATA_DIR = './testData2'
-print(DATA_DIR)
-#Path where to save processed data
-filingPath = './testTry2'
-
-# ToDo: Define a convenient function to create a list containing all files.
-imageList = list(os.walk(DATA_DIR, topdown=False))[-1][-1]
-imageList = sorted(imageList)
-print('Image List:')
-print(imageList)
-print('Length of Image List: ' + str(len(imageList)))
-#scp hussmann1@jureca.fz-juelich.de:/p/project/cjjsc42/severin/data/era5_extract_481.nc ~/Desktop/netCDFdata
-
-# ToDo: Define properly the train, val and test index
-# Here just for testing and taking weird .DS_Store file into consideration
-# http://www.apfelwiki.de/Main/DSStore
-#train_recordings = imageList[1:6]
-#val_recordings = imageList[7:9]
-#test_recordings = imageList[-2:]
-
-#Train,Val,Test size in percentage
-partition = [0.8, 0.05, 0.15]
-#determine correct indices 
-train_begin = 0
-train_end = round(partition[0]*len(imageList))-1
-val_begin = train_end + 1
-val_end = train_end + round(partition[1]*len(imageList))
-test_begin = val_end + 1
-test_end = len(imageList)-1
-print('Indices of Train, Val and test: '+ str(train_begin) + ' ' + str(val_begin) + ' ' + str(test_begin))
-#slightly adapting start and end because starts at the first index given and stops before(!) the last. 
-train_recordings = imageList[train_begin:val_begin]
-val_recordings = imageList[val_begin:test_begin]
-test_recordings = imageList[test_begin:test_end]
-
-#adapted for feature testing: just first year (2015); Otherwise would take too long and some weird mistake in some data in 2016
-#in total: 17544
-#half: 8772
-#train: 0-6900
-#val:6901-7000
-#test:7001-8772
-#train_recordings = imageList[0:1000]
-#val_recordings = imageList[6901:7000]
-#test_recordings = imageList[7001:8772]
-
-print('Now everything together:')
-print('Train:')
-print(train_recordings)
-print('Val:')
-print(val_recordings)
-print('Test:')
-print(test_recordings)
-
-desired_im_sz = (128, 160)
-# Create image datasets.
-# Processes images and saves them in train, val, test splits.
-def process_data():
-    splits = {s: [] for s in ['train', 'test', 'val']}
-    splits['val'] = val_recordings
-    splits['test'] = test_recordings
-    splits['train'] = train_recordings
-    for split in splits:
-        source_list = [DATA_DIR] * len(splits[split]) # corresponds to recording that image came from
-        print(splits[split])
-        print(source_list)
-        print((len(splits[split])==(len(source_list))))
-        print('The list of ' + split + ' has length: ' + str(len(source_list)))
-        print( 'Creating ' + split + ' data: ' + str(len(source_list)) + ' images')
-
-        #X = np.zeros((len(splits[split]),) + desired_im_sz + (3,), np.uint8)
-        #print(X)
-        #print('shape of X' + str(X.shape))
-
-        ##### TODO: iterate over split and read every .nc file, cut out array, 
-        #####		overlay arrays for RGB like style. 
-        #####		Save everything after for loop.
-        EU_stack_list = [0] * (len(splits[split]))
-
-        for i, im_file in enumerate(splits[split]):
-            im_path = os.path.join(DATA_DIR, im_file)
-            print('Open following dataset: ' + im_path)
-            im = Dataset(im_path, mode = 'r')
-            #print(im)
-            t2 = im.variables['T2'][0,:,:]
-            msl = im.variables['MSL'][0,:,:]
-            gph500 = im.variables['gph500'][0,:,:]
-            im.close()
-            EU_t2 = t2[74:202, 550:710]
-            EU_msl = msl[74:202, 550:710]
-            EU_gph500 = gph500[74:202, 550:710]
-            print(EU_t2.shape, EU_msl.shape, EU_gph500.shape)
-            #Normal stack: T2, MSL & GPH500
-            #EU_stack = np.stack([EU_t2, EU_msl, EU_gph500],axis=2)
-            #Stack T2 only:
-            #EU_stack = np.stack([EU_t2, EU_t2, EU_t2],axis=2)
-            #EU_stack_list[i]=EU_stack
-            #Stack T2*2 MSL*1:
-            #EU_stack = np.stack([EU_t2, EU_t2, EU_msl],axis=2)
-            #EU_stack_list[i]=EU_stack
-            #EU_stack = np.stack([EU_t2, EU_msl, EU_msl],axis=2)
-            #EU_stack_list[i]=EU_stack
-            #Stack T2*2 gph500*1:
-            #EU_stack = np.stack([EU_t2, EU_t2, EU_gph500],axis=2)
-            #EU_stack_list[i]=EU_stack
-            #Stack T2*1 gph500*2
-            #EU_stack = np.stack([EU_t2, EU_gph500, EU_gph500],axis=2)
-            #EU_stack_list[i]=EU_stack
-            #print(EU_stack.shape)
-            #X[i]=EU_stack #this should be unnecessary
-            #t2_1 stack. Stack t2 with two empty arrays
-            #empty_image = np.zeros(shape = (128, 160))
-            #EU_stack = np.stack([EU_t2, empty_image, empty_image],axis=2)
-            #EU_stack_list[i]=EU_stack
-            #t2_2 stack. Stack t2 with one empty array
-            empty_image = np.zeros(shape = (128, 160))
-            EU_stack = np.stack([EU_t2, EU_t2, empty_image],axis=2)
-            EU_stack_list[i]=EU_stack
-            #print('Does ist work? ')
-            #print(EU_stack_list[i][:,:,0]==EU_t2)
-            #print(EU_stack[:,:,1]==EU_msl)
-        X = np.array(EU_stack_list)
-        print('Shape of X: ' + str(X.shape))
-        hkl.dump(X, os.path.join(filingPath, 'X_' + split + '.hkl')) #Not optimal!
-        hkl.dump(source_list, os.path.join(filingPath, 'sources_' + split + '.hkl'))
-
-
-        #for category, folder in splits[split]:
-        #    im_dir = os.path.join(DATA_DIR, 'raw/', category, folder, folder[:10], folder, 'image_03/data/')
-        #    files = list(os.walk(im_dir, topdown=False))[-1][-1]
-        #    im_list += [im_dir + f for f in sorted(files)]
-            # multiply path of respective recording with lengths of its files in order to ensure
-            # that each entry in X_train.hkl corresponds with an entry of source_list/ sources_train.hkl
-        #    source_list += [category + '-' + folder] * len(files)
-
-        #print( 'Creating ' + split + ' data: ' + str(len(im_list)) + ' images')
-        #X = np.zeros((len(im_list),) + desired_im_sz + (3,), np.uint8)
-        # enumerate allows us to loop over something and have an automatic counter
-        #for i, im_file in enumerate(im_list):
-        #    im = imread(im_file)
-        #    X[i] = process_im(im, desired_im_sz)
-
-        #hkl.dump(X, os.path.join(DATA_DIR, 'X_' + split + '.hkl'))
-        #hkl.dump(source_list, os.path.join(DATA_DIR, 'sources_' + split + '.hkl'))
-
-
-if __name__ == '__main__':
-    #download_data()
-    #extract_data()
-    process_data()
diff --git a/workflow_parallel_frame_prediction/Workflow.png b/workflow_parallel_frame_prediction/Workflow.png
deleted file mode 100644
index 5edc5ed807f496597f52d978d2cd28532e689b2b..0000000000000000000000000000000000000000
Binary files a/workflow_parallel_frame_prediction/Workflow.png and /dev/null differ
diff --git a/workflow_parallel_frame_prediction/__init__.py b/workflow_parallel_frame_prediction/__init__.py
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/workflow_parallel_frame_prediction/main.sh b/workflow_parallel_frame_prediction/main.sh
deleted file mode 100644
index 066b202d0daaf60555afd5a2898372c3a06eba0c..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/main.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-
-#!/bin/bash
-EXTRACT_PATH="./DataExtraction/Stager_devel_N_24_Bing.sh"
-PREPROCESS_PATH="./DataPreprocess/Stager_devel_N_24_process_netCDF.sh"
-TRAINING_PATH="./Training/horovodJob.sh"
-POSPROCESS_PATH = "./DataPostprocess/Stager_devel_N_24_evaluation.sh"
-
-echo "============ Parallel Data Extraction ==========\n"
-
-sbatch "$EXTRACT_PATH"
-
-echo "============= Parallel Data Preprocessing =========\n "
-
-
-sbatch "$PREPROCESS_PATH"
-
-
-echo "============= Parallel Training ================\n"
-
-sbatch "$TRAINING_PATH"
-
-
-echo "=============Parallel Postprocessing ===============\n"
-
-sbatch "$POSTPROCESS_PATH"
-
-
-
diff --git a/workflow_parallel_frame_prediction/setup.py b/workflow_parallel_frame_prediction/setup.py
deleted file mode 100755
index 207b869dd947101b9160c90a1511037f44611552..0000000000000000000000000000000000000000
--- a/workflow_parallel_frame_prediction/setup.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-from setuptools import setup
-
-setup(
-    name='Parallel_Workflow_PredNet',
-    author="Bing,Gong; Amirpasha Mozaffari, Severin Hussman",
-    description="This is the parallel workflow for PredNet",
-    copyright= "Copyright 2019, The ESDE project",
-    version='1.0.0',
-    author_email="b.gong@fz-juelich.de",
-)