Skip to content
Snippets Groups Projects
Commit 070fd9fa authored by tomrink's avatar tomrink
Browse files

snapshot...

parent 07cb75ab
No related branches found
No related tags found
No related merge requests found
...@@ -92,14 +92,15 @@ class Trainer(object): ...@@ -92,14 +92,15 @@ class Trainer(object):
logging.debug("Starting Distributed Step") logging.debug("Starting Distributed Step")
with tf.GradientTape() as tape: with tf.GradientTape() as tape:
fake = generator.unsigned_call(image_lr) fake = generator.unsigned_call(image_lr)
loss = utils.pixel_loss(image_hr, fake) * (1.0 / self.batch_size) loss_mae = utils.pixel_loss(image_hr, fake) * (1.0 / self.batch_size)
# loss = utils.pixel_loss_mse(image_hr, fake) * (1.0 / self.batch_size) # loss_mse = utils.pixel_loss_mse(image_hr, fake) * (1.0 / self.batch_size)
loss = loss_mae
mean_loss = metric(loss_mae)
psnr_metric(tf.reduce_mean(tf.image.psnr(fake, image_hr, max_val=PSNR_MAX))) psnr_metric(tf.reduce_mean(tf.image.psnr(fake, image_hr, max_val=PSNR_MAX)))
# gen_vars = list(set(generator.trainable_variables)) # gen_vars = list(set(generator.trainable_variables))
gen_vars = generator.trainable_variables gen_vars = generator.trainable_variables
gradient = tape.gradient(loss, gen_vars) gradient = tape.gradient(loss, gen_vars)
G_optimizer.apply_gradients(zip(gradient, gen_vars)) G_optimizer.apply_gradients(zip(gradient, gen_vars))
mean_loss = metric(loss)
logging.debug("Ending Distributed Step") logging.debug("Ending Distributed Step")
return tf.cast(G_optimizer.iterations, tf.float32) return tf.cast(G_optimizer.iterations, tf.float32)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment