diff --git a/modules/GSOC/E2_ESRGAN/lib/dataset.py b/modules/GSOC/E2_ESRGAN/lib/dataset.py
index b7485b7f1347df6dd118e93e3daea2f5e7afb219..d995920521420b57388b57e50a2af6710f02cbb3 100644
--- a/modules/GSOC/E2_ESRGAN/lib/dataset.py
+++ b/modules/GSOC/E2_ESRGAN/lib/dataset.py
@@ -137,22 +137,30 @@ class OpdNpyDataset:
         self.hr_size = hr_size
         self.lr_size = lr_size
 
-        def integer_gen(limit):
-            n = 0
-            while n < limit:
-                yield n
-                n += 1
-
-        num_gen = integer_gen(self.num_files)
-        gen = make_tf_callable_generator(num_gen)
-        dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32)
+        # def integer_gen(limit):
+        #     n = 0
+        #     while n < limit:
+        #         yield n
+        #         n += 1
+        #
+        # num_gen = integer_gen(self.num_files)
+        # gen = make_tf_callable_generator(num_gen)
+        # dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32)
+        # dataset = dataset.batch(batch_size)
+        # dataset = dataset.map(self.data_function, num_parallel_calls=8)
+        # # These execute w/o an iteration on dataset?
+        # # dataset = dataset.map(scale_down(), num_parallel_calls=1)
+        # # dataset = dataset.map(augment_image(), num_parallel_calls=1)
+        #
+        # dataset = dataset.cache()
+        # dataset = dataset.prefetch(buffer_size=1)
+
+        file_idxs = np.arange(len(self.num_files))
+        dataset = tf.data.Dataset.from_tensor_slices(list(file_idxs))
+        dataset = dataset.shuffle(2000, reshuffle_each_iteration=True)
         dataset = dataset.batch(batch_size)
         dataset = dataset.map(self.data_function, num_parallel_calls=8)
-        # These execute w/o an iteration on dataset?
-        # dataset = dataset.map(scale_down(), num_parallel_calls=1)
-        # dataset = dataset.map(augment_image(), num_parallel_calls=1)
-
-        dataset = dataset.cache()
+        # dataset = dataset.cache()
         dataset = dataset.prefetch(buffer_size=1)
 
         self.dataset = dataset
@@ -160,12 +168,12 @@ class OpdNpyDataset:
     def read_numpy_file_s(self, f_idxs):
         data_s = []
         for fi in f_idxs:
-                fname = self.filenames[fi]
-                data = np.load(fname)
-                data = data[0, ]
-                data = scale(data, 'cld_opd_dcomp', mean_std_dct)
-                data = data.astype(np.float32)
-                data_s.append(data)
+            fname = self.filenames[fi]
+            data = np.load(fname)
+            data = data[0, ]
+            data = scale(data, 'cld_opd_dcomp', mean_std_dct)
+            data = data.astype(np.float32)
+            data_s.append(data)
         hr_image = np.concatenate(data_s)
         hr_image = tf.expand_dims(hr_image, axis=3)
         hr_image = tf.image.crop_to_bounding_box(hr_image, 0, 0, self.hr_size, self.hr_size)