Skip to content
Snippets Groups Projects
Commit 6f3b4588 authored by gong1's avatar gong1
Browse files

update WeatherBench

parent ddf29111
No related branches found
No related tags found
No related merge requests found
Pipeline #102866 passed
...@@ -57,11 +57,12 @@ class WeatherBenchModel(object): ...@@ -57,11 +57,12 @@ class WeatherBenchModel(object):
""" """
hparams = dict( hparams = dict(
sequence_length =12, sequence_length =12,
context_frames =1,
max_epochs = 20, max_epochs = 20,
batch_size = 40, batch_size = 40,
lr = 0.001, lr = 0.001,
shuffle_on_val= True, shuffle_on_val= True,
filters = [64, 64, 64, 64, 2], filters = [64, 64, 64, 64, 3],
kernels = [5, 5, 5, 5, 5] kernels = [5, 5, 5, 5, 5]
) )
return hparams return hparams
...@@ -75,9 +76,10 @@ class WeatherBenchModel(object): ...@@ -75,9 +76,10 @@ class WeatherBenchModel(object):
original_global_variables = tf.global_variables() original_global_variables = tf.global_variables()
# Architecture # Architecture
x_hat = self.build_model(self.x[:,0,:, :,0:1], self.filters, self.kernels) x_hat = self.build_model(self.x[:,0,:, :, :],self.filters, self.kernels)
# Loss # Loss
self.total_loss = l1_loss(self.x[:,0,:, :,0:1], x_hat)
self.total_loss = l1_loss(self.x[:,0,:, :,0], x_hat[:,:,:,0])
# Optimizer # Optimizer
self.train_op = tf.train.AdamOptimizer( self.train_op = tf.train.AdamOptimizer(
...@@ -86,6 +88,10 @@ class WeatherBenchModel(object): ...@@ -86,6 +88,10 @@ class WeatherBenchModel(object):
# outputs # outputs
self.outputs["total_loss"] = self.total_loss self.outputs["total_loss"] = self.total_loss
# inferences
self.outputs["gen_images"] = self.forecast(self.x[:,0,:, :,0:1], 12, self.filters, self.kernels)
# Summary op # Summary op
tf.summary.scalar("total_loss", self.total_loss) tf.summary.scalar("total_loss", self.total_loss)
self.summary_op = tf.summary.merge_all() self.summary_op = tf.summary.merge_all()
...@@ -100,13 +106,22 @@ class WeatherBenchModel(object): ...@@ -100,13 +106,22 @@ class WeatherBenchModel(object):
idx = 0 idx = 0
for f, k in zip(filters[:-1], kernels[:-1]): for f, k in zip(filters[:-1], kernels[:-1]):
print("1",x) print("1",x)
with tf.variable_scope("conv_layer_"+str(idx),reuse=tf.AUTO_REUSE):
x = ld.conv_layer(x, kernel_size=k, stride=1, num_features=f, idx="conv_layer_"+str(idx) , activate="leaky_relu") x = ld.conv_layer(x, kernel_size=k, stride=1, num_features=f, idx="conv_layer_"+str(idx) , activate="leaky_relu")
print("2",x) print("2",x)
idx += 1 idx += 1
with tf.variable_scope("Conv_last_layer",reuse=tf.AUTO_REUSE):
output = ld.conv_layer(x, kernel_size=kernels[-1], stride=1, num_features=filters[-1], idx="Conv_last_layer", activate="linear") output = ld.conv_layer(x, kernel_size=kernels[-1], stride=1, num_features=filters[-1], idx="Conv_last_layer", activate="linear")
print("output dimension", output)
return output return output
def forecast(self, inputs, forecast_time, filters, kernels):
x_hat = []
for i in range(forecast_time):
x_pred = self.build_model(self.x[:,i,:, :,:],filters,kernels)
x_hat.append(x_pred)
x_hat = tf.stack(x_hat)
x_hat = tf.transpose(x_hat, [1, 0, 2, 3, 4])
return x_hat
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment