diff --git a/modules/deeplearning/icing_cnn.py b/modules/deeplearning/icing_cnn.py index 6910d031d642d647c65cae2931e8cbe82f9a4e82..c1eb18f1f30ad11e41161a421ddbb8243db6e9df 100644 --- a/modules/deeplearning/icing_cnn.py +++ b/modules/deeplearning/icing_cnn.py @@ -811,7 +811,7 @@ class IcingIntensityNN: step = 0 total_time = 0 - best_test_loss = np.finfo(dtype=np.float).max + best_test_loss = np.finfo(dtype=np.float32).max best_test_acc = 0 best_test_recall = 0 best_test_precision = 0 @@ -842,7 +842,7 @@ class IcingIntensityNN: with self.writer_train.as_default(): tf.summary.scalar('loss_trn', loss.numpy(), step=step) - tf.summary.scalar('learning_rate', self.optimizer._decayed_lr('float32').numpy(), step=step) + tf.summary.scalar('learning_rate', self.optimizer.lr.numpy(), step=step) tf.summary.scalar('num_train_steps', step, step=step) tf.summary.scalar('num_epochs', epoch, step=step) @@ -869,7 +869,7 @@ class IcingIntensityNN: tf.summary.scalar('num_epochs', epoch, step=step) print('****** test loss, acc, lr: ', self.test_loss.result().numpy(), self.test_accuracy.result().numpy(), - self.optimizer._decayed_lr('float32').numpy()) + self.optimizer.lr.numpy()) step += 1 print('train loss: ', loss.numpy())