From 2c5d5b529b1192e07ca2e097d240276acab497c9 Mon Sep 17 00:00:00 2001 From: tomrink <rink@ssec.wisc.edu> Date: Wed, 23 Aug 2023 13:51:49 -0500 Subject: [PATCH] snapshot... --- modules/GSOC/E2_ESRGAN/lib/train.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/modules/GSOC/E2_ESRGAN/lib/train.py b/modules/GSOC/E2_ESRGAN/lib/train.py index 2920fc38..4294d634 100644 --- a/modules/GSOC/E2_ESRGAN/lib/train.py +++ b/modules/GSOC/E2_ESRGAN/lib/train.py @@ -285,19 +285,21 @@ class Trainer(object): for _step in decay_steps.copy(): if num_step >= _step: decay_steps.pop(0) - g_current_lr = self.strategy.reduce( - tf.distribute.ReduceOp.MEAN, - G_optimizer.learning_rate, axis=None) - - d_current_lr = self.strategy.reduce( - tf.distribute.ReduceOp.MEAN, - D_optimizer.learning_rate, axis=None) - - logging.debug( - "Current LR: G = %s, D = %s" % - (g_current_lr, d_current_lr)) - logging.debug( - "[Phase 2] Decayed Learing Rate by %f." % decay_factor) + # TDR, Let's don't print this out, causes an error in the next line + # g_current_lr = self.strategy.reduce( + # tf.distribute.ReduceOp.MEAN, + # G_optimizer.learning_rate, axis=None) + # + # d_current_lr = self.strategy.reduce( + # tf.distribute.ReduceOp.MEAN, + # D_optimizer.learning_rate, axis=None) + # + # logging.debug( + # "Current LR: G = %s, D = %s" % + # (g_current_lr, d_current_lr)) + # logging.debug( + # "[Phase 2] Decayed Learing Rate by %f." % decay_factor) + G_optimizer.learning_rate.assign(G_optimizer.learning_rate * decay_factor) D_optimizer.learning_rate.assign(D_optimizer.learning_rate * decay_factor) -- GitLab