diff --git a/modules/deeplearning/srcnn_l1b_l2.py b/modules/deeplearning/srcnn_l1b_l2.py
index 41ad975c309e90a7a1dd56a4ede83bc9c1c22c41..3da783c7ee025aa60801f4ff744aeb0232518f00 100644
--- a/modules/deeplearning/srcnn_l1b_l2.py
+++ b/modules/deeplearning/srcnn_l1b_l2.py
@@ -24,7 +24,7 @@ else:
     NumLogits = NumClasses
 
 BATCH_SIZE = 128
-NUM_EPOCHS = 60
+NUM_EPOCHS = 80
 
 TRACK_MOVING_AVERAGE = False
 EARLY_STOP = True
@@ -407,7 +407,7 @@ class SRCNN:
         activation = tf.nn.relu
         momentum = 0.99
 
-        num_filters = 64
+        num_filters = 128
 
         input_2d = self.inputs[0]
         print('input: ', input_2d.shape)
@@ -424,7 +424,7 @@ class SRCNN:
 
         conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_2', kernel_size=3, scale=scale)
 
-        #conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_3', kernel_size=3, scale=scale)
+        conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_3', kernel_size=3, scale=scale)
 
         #conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_4', kernel_size=3, scale=scale)
 
@@ -449,7 +449,7 @@ class SRCNN:
         self.loss = tf.keras.losses.MeanSquaredError()  # Regression
 
         # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
-        initial_learning_rate = 0.002
+        initial_learning_rate = 0.005
         decay_rate = 0.95
         steps_per_epoch = int(self.num_data_samples/BATCH_SIZE)  # one epoch
         decay_steps = int(steps_per_epoch)