From 3d1692f8f04dc9c5b34c3ab8189932436064fdd8 Mon Sep 17 00:00:00 2001
From: tomrink <rink@ssec.wisc.edu>
Date: Fri, 7 May 2021 13:46:01 -0500
Subject: [PATCH] snapshot...

---
 modules/deeplearning/icing_cnn.py | 24 ++++++++++++++++--------
 1 file changed, 16 insertions(+), 8 deletions(-)

diff --git a/modules/deeplearning/icing_cnn.py b/modules/deeplearning/icing_cnn.py
index f771dd28..9128ae7e 100644
--- a/modules/deeplearning/icing_cnn.py
+++ b/modules/deeplearning/icing_cnn.py
@@ -13,18 +13,17 @@ from util.plot_cm import plot_confusion_matrix
 
 LOG_DEVICE_PLACEMENT = False
 
-CACHE_DATA_IN_MEM = True
+CACHE_DATA_IN_MEM = False
 
 PROC_BATCH_SIZE = 4096
 PROC_BATCH_BUFFER_SIZE = 50000
-NumClasses = 3
+NumClasses = 2
 NumLogits = 1
 BATCH_SIZE = 256
-NUM_EPOCHS = 50
+NUM_EPOCHS = 200
 
 TRACK_MOVING_AVERAGE = False
 
-
 TRIPLET = False
 CONV3D = False
 
@@ -187,7 +186,11 @@ class IcingIntensityNN:
                 # Memory growth must be set before GPUs have been initialized
                 print(e)
 
-    def get_in_mem_data_batch(self, idxs):
+    def get_in_mem_data_batch(self, idxs, is_training):
+        h5f = self.h5f_trn
+        if not is_training:
+            h5f = self.h5f_tst
+
         key = frozenset(idxs)
 
         if CACHE_DATA_IN_MEM:
@@ -201,14 +204,14 @@ class IcingIntensityNN:
 
         data = []
         for param in train_params:
-            nda = self.h5f[param][nd_idxs, ]
+            nda = h5f[param][nd_idxs, ]
             nda = normalize(nda, param, mean_std_dct)
             data.append(nda)
         data = np.stack(data)
         data = data.astype(np.float32)
         data = np.transpose(data, axes=(1, 2, 3, 0))
 
-        label = self.h5f['icing_intensity'][nd_idxs]
+        label = h5f['icing_intensity'][nd_idxs]
         label = label.astype(np.int32)
         label = np.where(label == -1, 0, label)
 
@@ -256,7 +259,8 @@ class IcingIntensityNN:
         dataset = tf.data.Dataset.from_tensor_slices(indexes)
         dataset = dataset.batch(PROC_BATCH_SIZE)
         dataset = dataset.map(self.data_function, num_parallel_calls=8)
-        dataset = dataset.shuffle(PROC_BATCH_BUFFER_SIZE)
+        dataset = dataset.cache()
+        # dataset = dataset.shuffle(PROC_BATCH_BUFFER_SIZE)
         dataset = dataset.prefetch(buffer_size=1)
         self.train_dataset = dataset
 
@@ -266,6 +270,7 @@ class IcingIntensityNN:
         dataset = tf.data.Dataset.from_tensor_slices(indexes)
         dataset = dataset.batch(PROC_BATCH_SIZE)
         dataset = dataset.map(self.data_function, num_parallel_calls=8)
+        dataset = dataset.cache()
         self.test_dataset = dataset
 
     def setup_pipeline(self, filename_trn, filename_tst, trn_idxs=None, tst_idxs=None, seed=None):
@@ -621,6 +626,9 @@ class IcingIntensityNN:
         self.writer_train.close()
         self.writer_valid.close()
 
+        self.h5f_trn.close()
+        self.h5f_tst.close()
+
     def build_model(self):
         flat = self.build_cnn()
         # flat_1d = self.build_1d_cnn()
-- 
GitLab