diff --git a/video_prediction_savp/scripts/generate_transfer_learning_finetune.py b/video_prediction_savp/scripts/generate_transfer_learning_finetune.py index 13b93889875779942e5171e5e1d98eebc84fd9f3..3df6f7e2843eb732df4d0be70f410853a0ac2a78 100644 --- a/video_prediction_savp/scripts/generate_transfer_learning_finetune.py +++ b/video_prediction_savp/scripts/generate_transfer_learning_finetune.py @@ -357,6 +357,7 @@ def main(): sess.graph.as_default() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) + model.restore(sess, args.checkpoint) #model.restore(sess, args.checkpoint)#Bing: Todo: 20200728 Let's only focus on true and persistend data sample_ind, gen_images_all, persistent_images_all, input_images_all = initia_save_data() diff --git a/video_prediction_savp/video_prediction/datasets/era5_dataset_v2.py b/video_prediction_savp/video_prediction/datasets/era5_dataset_v2.py index 5baad396c6d8d1d233f0c683640ebf1908170fae..a3c9fc3666eb21ef02f9e5f64c8c95a29034d619 100644 --- a/video_prediction_savp/video_prediction/datasets/era5_dataset_v2.py +++ b/video_prediction_savp/video_prediction/datasets/era5_dataset_v2.py @@ -366,13 +366,13 @@ def main(): # "2012":[1,2,3,4,5,6,7,8,9,10,11,12], # "2013_complete":[1,2,3,4,5,6,7,8,9,10,11,12], # "2015":[1,2,3,4,5,6,7,8,9,10,11,12], - "2017":[1,2,3,4,5,6,7,8,9,10] + "2017_test":[1,2,3,4,5,6,7,8,9,10] }, "val": - {"2017":[11] + {"2017_test":[11] }, "test": - {"2017":[12] + {"2017_test":[12] } } diff --git a/video_prediction_savp/video_prediction/models/vanilla_convLSTM_model.py b/video_prediction_savp/video_prediction/models/vanilla_convLSTM_model.py index c7f3db7ce4fce732312eba0d9f17362faa2e64b5..7560a225e7651728e2ca8d2107d7f32458106c86 100644 --- a/video_prediction_savp/video_prediction/models/vanilla_convLSTM_model.py +++ b/video_prediction_savp/video_prediction/models/vanilla_convLSTM_model.py @@ -41,7 +41,7 @@ class VanillaConvLstmVideoPredictionModel(BaseVideoPredictionModel): lr: learning rate. if decay steps is non-zero, this is the learning rate for steps <= decay_step. max_steps: number of training steps. - context_frames: the number of ground-truth frames to pass in at + context_frames: the number of ground-truth frames to pass :qin at start. Must be specified during instantiation. sequence_length: the number of frames in the video sequence, including the context frames, so this model predicts