From 41292d1d54cd832f4e709ea424dd93432b01fdfe Mon Sep 17 00:00:00 2001 From: tomrink <rink@ssec.wisc.edu> Date: Mon, 21 Aug 2023 15:59:33 -0500 Subject: [PATCH] snapshot... --- modules/GSOC/E2_ESRGAN/lib/dataset.py | 48 ++++++++++++++++----------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/modules/GSOC/E2_ESRGAN/lib/dataset.py b/modules/GSOC/E2_ESRGAN/lib/dataset.py index b7485b7f..d9959205 100644 --- a/modules/GSOC/E2_ESRGAN/lib/dataset.py +++ b/modules/GSOC/E2_ESRGAN/lib/dataset.py @@ -137,22 +137,30 @@ class OpdNpyDataset: self.hr_size = hr_size self.lr_size = lr_size - def integer_gen(limit): - n = 0 - while n < limit: - yield n - n += 1 - - num_gen = integer_gen(self.num_files) - gen = make_tf_callable_generator(num_gen) - dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32) + # def integer_gen(limit): + # n = 0 + # while n < limit: + # yield n + # n += 1 + # + # num_gen = integer_gen(self.num_files) + # gen = make_tf_callable_generator(num_gen) + # dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32) + # dataset = dataset.batch(batch_size) + # dataset = dataset.map(self.data_function, num_parallel_calls=8) + # # These execute w/o an iteration on dataset? + # # dataset = dataset.map(scale_down(), num_parallel_calls=1) + # # dataset = dataset.map(augment_image(), num_parallel_calls=1) + # + # dataset = dataset.cache() + # dataset = dataset.prefetch(buffer_size=1) + + file_idxs = np.arange(len(self.num_files)) + dataset = tf.data.Dataset.from_tensor_slices(list(file_idxs)) + dataset = dataset.shuffle(2000, reshuffle_each_iteration=True) dataset = dataset.batch(batch_size) dataset = dataset.map(self.data_function, num_parallel_calls=8) - # These execute w/o an iteration on dataset? - # dataset = dataset.map(scale_down(), num_parallel_calls=1) - # dataset = dataset.map(augment_image(), num_parallel_calls=1) - - dataset = dataset.cache() + # dataset = dataset.cache() dataset = dataset.prefetch(buffer_size=1) self.dataset = dataset @@ -160,12 +168,12 @@ class OpdNpyDataset: def read_numpy_file_s(self, f_idxs): data_s = [] for fi in f_idxs: - fname = self.filenames[fi] - data = np.load(fname) - data = data[0, ] - data = scale(data, 'cld_opd_dcomp', mean_std_dct) - data = data.astype(np.float32) - data_s.append(data) + fname = self.filenames[fi] + data = np.load(fname) + data = data[0, ] + data = scale(data, 'cld_opd_dcomp', mean_std_dct) + data = data.astype(np.float32) + data_s.append(data) hr_image = np.concatenate(data_s) hr_image = tf.expand_dims(hr_image, axis=3) hr_image = tf.image.crop_to_bounding_box(hr_image, 0, 0, self.hr_size, self.hr_size) -- GitLab