diff --git a/modules/deeplearning/srcnn_l1b_l2.py b/modules/deeplearning/srcnn_l1b_l2.py
index cff7396a3abba2b09187d9b1ea964de6551b0036..3820f8beaa4d9a582457d846816074d0b1bb1ebd 100644
--- a/modules/deeplearning/srcnn_l1b_l2.py
+++ b/modules/deeplearning/srcnn_l1b_l2.py
@@ -19,7 +19,7 @@ from scipy.ndimage import gaussian_filter
 LOG_DEVICE_PLACEMENT = False
 
 PROC_BATCH_SIZE = 4
-PROC_BATCH_BUFFER_SIZE = 50000
+PROC_BATCH_BUFFER_SIZE = 5000
 
 NumClasses = 2
 if NumClasses == 2:
@@ -35,7 +35,7 @@ EARLY_STOP = True
 
 NOISE_TRAINING = False
 NOISE_STDDEV = 0.01
-DO_AUGMENT = True
+DO_AUGMENT = False
 
 DO_SMOOTH = False
 SIGMA = 1.0
@@ -267,7 +267,6 @@ class SRCNN:
         for param in data_params_half:
             idx = params.index(param)
             tmp = input_data[:, idx, :, :]
-            tmp = tmp.copy()
             tmp = np.where(np.isnan(tmp), 0, tmp)
             if DO_ESPCN:
                 tmp = tmp[:, slc_y_2, slc_x_2]
@@ -281,7 +280,6 @@ class SRCNN:
         for param in data_params_full:
             idx = params.index(param)
             tmp = input_data[:, idx, :, :]
-            tmp = tmp.copy()
             tmp = np.where(np.isnan(tmp), 0, tmp)
             # Full res:
             tmp = tmp[:, slc_y, slc_x]
@@ -291,7 +289,6 @@ class SRCNN:
             data_norm.append(tmp)
         # ---------------------------------------------------
         tmp = input_data[:, label_idx, :, :]
-        tmp = tmp.copy()
         tmp = np.where(np.isnan(tmp), 0, tmp)
         if DO_SMOOTH:
             tmp = smooth_2d(tmp, sigma=SIGMA)
@@ -316,7 +313,6 @@ class SRCNN:
         # -----------------------------------------------------
         # -----------------------------------------------------
         label = input_data[:, label_idx, :, :]
-        label = label.copy()
         if DO_SMOOTH:
             label = np.where(np.isnan(label), 0, label)
             label = smooth_2d(label, sigma=SIGMA)
@@ -468,10 +464,10 @@ class SRCNN:
         self.loss = tf.keras.losses.MeanSquaredError()  # Regression
 
         # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
-        initial_learning_rate = 0.005
+        initial_learning_rate = 0.002
         decay_rate = 0.95
         steps_per_epoch = int(self.num_data_samples/BATCH_SIZE)  # one epoch
-        decay_steps = int(steps_per_epoch)
+        decay_steps = int(steps_per_epoch) * 2
         print('initial rate, decay rate, steps/epoch, decay steps: ', initial_learning_rate, decay_rate, steps_per_epoch, decay_steps)
 
         self.learningRateSchedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps, decay_rate)