diff --git a/modules/deeplearning/espcn.py b/modules/deeplearning/espcn.py
index 2240ad971d59b17d58b1a461680acf0d8607c8ad..b7e561c9385fb5d421370652690cded1f9c239c8 100644
--- a/modules/deeplearning/espcn.py
+++ b/modules/deeplearning/espcn.py
@@ -23,10 +23,10 @@ else:
     NumLogits = NumClasses
 
 BATCH_SIZE = 64
-NUM_EPOCHS = 60
+NUM_EPOCHS = 80
 
 TRACK_MOVING_AVERAGE = False
-EARLY_STOP = False
+EARLY_STOP = True
 
 NOISE_TRAINING = False
 NOISE_STDDEV = 0.10
@@ -366,7 +366,7 @@ class ESPCN:
 
         self.get_evaluate_dataset(idxs)
 
-    def build_espcn(self):
+    def build_espcn(self, do_drop_out=False, do_batch_norm=False, drop_rate=0.5):
         print('build_cnn')
         # padding = "VALID"
         padding = "SAME"
@@ -389,12 +389,20 @@ class ESPCN:
         if NOISE_TRAINING:
             conv = tf.keras.layers.GaussianNoise(stddev=NOISE_STDDEV)(conv)
 
+        if do_drop_out:
+            conv = tf.keras.layers.Dropout(drop_rate)(conv)
+        if do_batch_norm:
+            conv = tf.keras.layers.BatchNormalization()(conv)
+
         conv = tf.keras.layers.Conv2D(num_filters, kernel_size=5, strides=1, padding=padding, activation=activation)(conv)
-        conv = tf.keras.layers.BatchNormalization()(conv)
         print(conv.shape)
 
+        if do_drop_out:
+            conv = tf.keras.layers.Dropout(drop_rate)(conv)
+        if do_batch_norm:
+            conv = tf.keras.layers.BatchNormalization()(conv)
+
         conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
-        conv = tf.keras.layers.BatchNormalization()(conv)
         print(conv.shape)
 
         # conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=None)(conv)
@@ -405,12 +413,21 @@ class ESPCN:
         # conv = tf.keras.layers.LeakyReLU()(conv)
         # print(conv.shape)
 
+        if do_drop_out:
+            conv = tf.keras.layers.Dropout(drop_rate)(conv)
+        if do_batch_norm:
+            conv = tf.keras.layers.BatchNormalization()(conv)
+
         conv = tf.keras.layers.Conv2D(num_filters // 2, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
         conv = tf.keras.layers.BatchNormalization()(conv)
         print(conv.shape)
 
+        if do_drop_out:
+            conv = tf.keras.layers.Dropout(drop_rate)(conv)
+        if do_batch_norm:
+            conv = tf.keras.layers.BatchNormalization()(conv)
+
         conv = tf.keras.layers.Conv2D(num_filters // 2, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
-        conv = tf.keras.layers.BatchNormalization()(conv)
         print(conv.shape)
 
         # conv = tf.keras.layers.Conv2D(4, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
@@ -419,7 +436,11 @@ class ESPCN:
         conv = tf.keras.layers.Conv2DTranspose(num_filters // 4, kernel_size=3, strides=2, padding=padding, activation=activation)(conv)
         print(conv.shape)
 
-        self.logits = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, padding=padding, name='probability', activation=tf.nn.sigmoid)(conv)
+        conv = tf.keras.layers.Conv2DTranspose(num_filters // 4, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
+        print(conv.shape)
+
+        #self.logits = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, padding=padding, name='probability', activation=tf.nn.sigmoid)(conv)
+        self.logits = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, padding=padding, name='probability')(conv)
 
         # conv = tf.nn.depth_to_space(conv, block_size=2)
         # conv = tf.keras.layers.Activation(activation=activation)(conv)
@@ -755,8 +776,9 @@ class ESPCN:
 
         pred = self.model([data])
         self.test_probs = pred
+        pred = pred.numpy()
 
-        return denormalize(pred, param, mean_std_dct[param])
+        return denormalize(pred, param, mean_std_dct)
 
     def run(self, directory):
         train_data_files = glob.glob(directory+'data_train*.npy')
@@ -780,10 +802,11 @@ class ESPCN:
 
     def run_evaluate(self, nda_lr, param, ckpt_dir):
         # self.setup_eval_pipeline(filename)
+        self.num_data_samples = 80000
         self.build_model()
         self.build_training()
         self.build_evaluation()
-        self.do_evaluate(nda_lr, param, ckpt_dir)
+        return self.do_evaluate(nda_lr, param, ckpt_dir)
 
 
 if __name__ == "__main__":