Skip to content
Snippets Groups Projects
Commit 41292d1d authored by tomrink's avatar tomrink
Browse files

snapshot...

parent 86c08ace
No related branches found
No related tags found
No related merge requests found
...@@ -137,22 +137,30 @@ class OpdNpyDataset: ...@@ -137,22 +137,30 @@ class OpdNpyDataset:
self.hr_size = hr_size self.hr_size = hr_size
self.lr_size = lr_size self.lr_size = lr_size
def integer_gen(limit): # def integer_gen(limit):
n = 0 # n = 0
while n < limit: # while n < limit:
yield n # yield n
n += 1 # n += 1
#
num_gen = integer_gen(self.num_files) # num_gen = integer_gen(self.num_files)
gen = make_tf_callable_generator(num_gen) # gen = make_tf_callable_generator(num_gen)
dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32) # dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32)
# dataset = dataset.batch(batch_size)
# dataset = dataset.map(self.data_function, num_parallel_calls=8)
# # These execute w/o an iteration on dataset?
# # dataset = dataset.map(scale_down(), num_parallel_calls=1)
# # dataset = dataset.map(augment_image(), num_parallel_calls=1)
#
# dataset = dataset.cache()
# dataset = dataset.prefetch(buffer_size=1)
file_idxs = np.arange(len(self.num_files))
dataset = tf.data.Dataset.from_tensor_slices(list(file_idxs))
dataset = dataset.shuffle(2000, reshuffle_each_iteration=True)
dataset = dataset.batch(batch_size) dataset = dataset.batch(batch_size)
dataset = dataset.map(self.data_function, num_parallel_calls=8) dataset = dataset.map(self.data_function, num_parallel_calls=8)
# These execute w/o an iteration on dataset? # dataset = dataset.cache()
# dataset = dataset.map(scale_down(), num_parallel_calls=1)
# dataset = dataset.map(augment_image(), num_parallel_calls=1)
dataset = dataset.cache()
dataset = dataset.prefetch(buffer_size=1) dataset = dataset.prefetch(buffer_size=1)
self.dataset = dataset self.dataset = dataset
...@@ -160,12 +168,12 @@ class OpdNpyDataset: ...@@ -160,12 +168,12 @@ class OpdNpyDataset:
def read_numpy_file_s(self, f_idxs): def read_numpy_file_s(self, f_idxs):
data_s = [] data_s = []
for fi in f_idxs: for fi in f_idxs:
fname = self.filenames[fi] fname = self.filenames[fi]
data = np.load(fname) data = np.load(fname)
data = data[0, ] data = data[0, ]
data = scale(data, 'cld_opd_dcomp', mean_std_dct) data = scale(data, 'cld_opd_dcomp', mean_std_dct)
data = data.astype(np.float32) data = data.astype(np.float32)
data_s.append(data) data_s.append(data)
hr_image = np.concatenate(data_s) hr_image = np.concatenate(data_s)
hr_image = tf.expand_dims(hr_image, axis=3) hr_image = tf.expand_dims(hr_image, axis=3)
hr_image = tf.image.crop_to_bounding_box(hr_image, 0, 0, self.hr_size, self.hr_size) hr_image = tf.image.crop_to_bounding_box(hr_image, 0, 0, self.hr_size, self.hr_size)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment