diff --git a/modules/GSOC/E2_ESRGAN/lib/train.py b/modules/GSOC/E2_ESRGAN/lib/train.py index 2920fc3884c1b8b67e25e727841bd7320580bb45..4294d634fcbd3cc04fc93ad943243fe31f18f5ca 100644 --- a/modules/GSOC/E2_ESRGAN/lib/train.py +++ b/modules/GSOC/E2_ESRGAN/lib/train.py @@ -285,19 +285,21 @@ class Trainer(object): for _step in decay_steps.copy(): if num_step >= _step: decay_steps.pop(0) - g_current_lr = self.strategy.reduce( - tf.distribute.ReduceOp.MEAN, - G_optimizer.learning_rate, axis=None) - - d_current_lr = self.strategy.reduce( - tf.distribute.ReduceOp.MEAN, - D_optimizer.learning_rate, axis=None) - - logging.debug( - "Current LR: G = %s, D = %s" % - (g_current_lr, d_current_lr)) - logging.debug( - "[Phase 2] Decayed Learing Rate by %f." % decay_factor) + # TDR, Let's don't print this out, causes an error in the next line + # g_current_lr = self.strategy.reduce( + # tf.distribute.ReduceOp.MEAN, + # G_optimizer.learning_rate, axis=None) + # + # d_current_lr = self.strategy.reduce( + # tf.distribute.ReduceOp.MEAN, + # D_optimizer.learning_rate, axis=None) + # + # logging.debug( + # "Current LR: G = %s, D = %s" % + # (g_current_lr, d_current_lr)) + # logging.debug( + # "[Phase 2] Decayed Learing Rate by %f." % decay_factor) + G_optimizer.learning_rate.assign(G_optimizer.learning_rate * decay_factor) D_optimizer.learning_rate.assign(D_optimizer.learning_rate * decay_factor)