diff --git a/modules/deeplearning/icing_fcn.py b/modules/deeplearning/icing_fcn.py index 21c646c41e8ff6a0179d3b9ca5c16729699b7ffc..7b7c5d5cbaa263543403b3a2ca6a64a0ea4b80fe 100644 --- a/modules/deeplearning/icing_fcn.py +++ b/modules/deeplearning/icing_fcn.py @@ -11,9 +11,6 @@ import h5py LOG_DEVICE_PLACEMENT = False -# Manual (data, label) caching, but has been replaced with tf.data.dataset.cache() -CACHE_DATA_IN_MEM = False - PROC_BATCH_SIZE = 4096 PROC_BATCH_BUFFER_SIZE = 50000 @@ -252,16 +249,6 @@ class IcingIntensityFCN: def get_in_mem_data_batch(self, idxs, is_training): - # Pretty much dead, but left in here for reference (See note above) - if CACHE_DATA_IN_MEM: - key = frozenset(idxs) - if is_training: - tup = self.in_mem_data_cache.get(key) - else: - tup = self.in_mem_data_cache_test(key) - if tup is not None: - return tup[0], tup[1], tup[2] - # sort these to use as numpy indexing arrays nd_idxs = np.array(idxs) nd_idxs = np.sort(nd_idxs) @@ -300,12 +287,6 @@ class IcingIntensityFCN: label = np.where(np.invert(np.logical_or(label == 0, label == 1)), 2, label) label = label.reshape((label.shape[0], 1)) - if CACHE_DATA_IN_MEM: - if is_training: - self.in_mem_data_cache[key] = (data, data_alt, label) - else: - self.in_mem_data_cache_test[key] = (data, data_alt, label) - if is_training and DO_AUGMENT: data_ud = np.flip(data, axis=1) data_alt_ud = np.copy(data_alt)