Skip to content
Snippets Groups Projects
Commit 2c5d5b52 authored by tomrink's avatar tomrink
Browse files

snapshot...

parent 18584809
No related branches found
No related tags found
No related merge requests found
......@@ -285,19 +285,21 @@ class Trainer(object):
for _step in decay_steps.copy():
if num_step >= _step:
decay_steps.pop(0)
g_current_lr = self.strategy.reduce(
tf.distribute.ReduceOp.MEAN,
G_optimizer.learning_rate, axis=None)
d_current_lr = self.strategy.reduce(
tf.distribute.ReduceOp.MEAN,
D_optimizer.learning_rate, axis=None)
logging.debug(
"Current LR: G = %s, D = %s" %
(g_current_lr, d_current_lr))
logging.debug(
"[Phase 2] Decayed Learing Rate by %f." % decay_factor)
# TDR, Let's don't print this out, causes an error in the next line
# g_current_lr = self.strategy.reduce(
# tf.distribute.ReduceOp.MEAN,
# G_optimizer.learning_rate, axis=None)
#
# d_current_lr = self.strategy.reduce(
# tf.distribute.ReduceOp.MEAN,
# D_optimizer.learning_rate, axis=None)
#
# logging.debug(
# "Current LR: G = %s, D = %s" %
# (g_current_lr, d_current_lr))
# logging.debug(
# "[Phase 2] Decayed Learing Rate by %f." % decay_factor)
G_optimizer.learning_rate.assign(G_optimizer.learning_rate * decay_factor)
D_optimizer.learning_rate.assign(D_optimizer.learning_rate * decay_factor)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment