diff --git a/modules/deeplearning/icing_fcn.py b/modules/deeplearning/icing_fcn.py index 07aa8adfaf53ee2420b457d4d2849ca6efcb108c..2cb9642766a08e3c0a229085f3922ad8994ab87a 100644 --- a/modules/deeplearning/icing_fcn.py +++ b/modules/deeplearning/icing_fcn.py @@ -30,7 +30,7 @@ TRIPLET = False CONV3D = False NOISE_TRAINING = True -NOISE_STDDEV = 0.10 +NOISE_STDDEV = 0.01 DO_AUGMENT = True img_width = 16 @@ -566,7 +566,7 @@ class IcingIntensityFCN: # activation = tf.nn.elu activation = tf.nn.leaky_relu - num_filters = len(self.train_params) * 10 + num_filters = len(self.train_params) * 16 input_2d = self.inputs[0] conv = tf.keras.layers.Conv2D(num_filters, kernel_size=5, strides=1, padding=padding, activation=None)(input_2d) @@ -651,11 +651,11 @@ class IcingIntensityFCN: conv = build_residual_block_1x1(conv, num_filters, activation, 'Residual_Block_2', padding=padding) - conv = build_residual_block_1x1(conv, num_filters, activation, 'Residual_Block_3', padding=padding) + # conv = build_residual_block_1x1(conv, num_filters, activation, 'Residual_Block_3', padding=padding) - conv = build_residual_block_1x1(conv, num_filters, activation, 'Residual_Block_4', padding=padding) + # conv = build_residual_block_1x1(conv, num_filters, activation, 'Residual_Block_4', padding=padding) - conv = build_residual_block_1x1(conv, num_filters, activation, 'Residual_Block_5', padding=padding) + # conv = build_residual_block_1x1(conv, num_filters, activation, 'Residual_Block_5', padding=padding) print(conv.shape) @@ -681,7 +681,7 @@ class IcingIntensityFCN: initial_learning_rate = 0.002 decay_rate = 0.95 steps_per_epoch = int(self.num_data_samples/BATCH_SIZE) # one epoch - decay_steps = int(steps_per_epoch / 2) + decay_steps = int(steps_per_epoch) print('initial rate, decay rate, steps/epoch, decay steps: ', initial_learning_rate, decay_rate, steps_per_epoch, decay_steps) self.learningRateSchedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps, decay_rate)