diff --git a/modules/deeplearning/icing_cnn.py b/modules/deeplearning/icing_cnn.py index 5ac23b33ad3d48b8587cc9839381837f5f8bd567..260f36637f41cd884420403a3bb691f174f6bdf8 100644 --- a/modules/deeplearning/icing_cnn.py +++ b/modules/deeplearning/icing_cnn.py @@ -207,6 +207,7 @@ class IcingIntensityNN: self.X_img = tf.keras.Input(shape=(img_width, img_width, n_chans)) self.inputs.append(self.X_img) + self.inputs.append(tf.keras.Input(5)) self.DISK_CACHE = False @@ -218,8 +219,7 @@ class IcingIntensityNN: tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT) - # Note: Don't do this anymore, because nobody else willing to do so as well! - # Also, doesn't seem to play well with SLURM + # Doesn't seem to play well with SLURM # gpus = tf.config.experimental.list_physical_devices('GPU') # if gpus: # try: @@ -242,7 +242,7 @@ class IcingIntensityNN: else: tup = self.in_mem_data_cache_test(key) if tup is not None: - return tup[0], tup[1] + return tup[0], tup[1], tup[2] # sort these to use as numpy indexing arrays nd_idxs = np.array(idxs) @@ -268,6 +268,8 @@ class IcingIntensityNN: data = data.astype(np.float32) data = np.transpose(data, axes=(1, 2, 3, 0)) + data_alt = self.get_scalar_data(nd_idxs, is_training) + label = self.get_label_data(nd_idxs, is_training) label = np.where(label == -1, 0, label) @@ -282,11 +284,11 @@ class IcingIntensityNN: if CACHE_DATA_IN_MEM: if is_training: - self.in_mem_data_cache[key] = (data, label) + self.in_mem_data_cache[key] = (data, data_alt, label) else: - self.in_mem_data_cache_test[key] = (data, label) + self.in_mem_data_cache_test[key] = (data, data_alt, label) - return data, label + return data, data_alt, label def get_parameter_data(self, param, nd_idxs, is_training): if is_training: @@ -318,16 +320,13 @@ class IcingIntensityNN: h5f = self.h5f_l2_tst nda = h5f[param][nd_idxs,] - b0 = np.logical_and(nda >= 0, nda < 2000) - b1 = np.logical_and(nda >= 2000, nda < 4000) - b2 = np.logical_and(nda >= 4000, nda < 6000) - b3 = np.logical_and(nda >= 6000, nda < 8000) - b4 = np.logical_and(nda >= 8000, nda < 15000) - nda[b0] = 0 - nda[b1] = 1 - nda[b2] = 2 - nda[b3] = 3 - nda[b4] = 4 + + nda[np.logical_and(nda >= 0, nda < 2000)] = 0 + nda[np.logical_and(nda >= 2000, nda < 4000)] = 1 + nda[np.logical_and(nda >= 4000, nda < 6000)] = 2 + nda[np.logical_and(nda >= 6000, nda < 8000)] = 3 + nda[np.logical_and(nda >= 8000, nda < 15000)] = 4 + nda = tf.one_hot(nda, 5).numpy() return nda @@ -368,21 +367,23 @@ class IcingIntensityNN: data = np.stack(data) data = data.astype(np.float32) data = np.transpose(data, axes=(1, 2, 3, 0)) + # TODO: altitude data will be specified by user at run-time return data @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)]) def data_function(self, indexes): - out = tf.numpy_function(self.get_in_mem_data_batch_train, [indexes], [tf.float32, tf.int32]) + out = tf.numpy_function(self.get_in_mem_data_batch_train, [indexes], [tf.float32, tf.float32, tf.int32]) return out @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)]) def data_function_test(self, indexes): - out = tf.numpy_function(self.get_in_mem_data_batch_test, [indexes], [tf.float32, tf.int32]) + out = tf.numpy_function(self.get_in_mem_data_batch_test, [indexes], [tf.float32, tf.float32, tf.int32]) return out @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)]) def data_function_evaluate(self, indexes): + # TODO: modify for user specified altitude out = tf.numpy_function(self.get_in_mem_data_batch_eval, [indexes], tf.float32) return out @@ -666,8 +667,8 @@ class IcingIntensityNN: @tf.function def train_step(self, mini_batch): - inputs = [mini_batch[0]] - labels = mini_batch[1] + inputs = [mini_batch[0], mini_batch[1]] + labels = mini_batch[2] with tf.GradientTape() as tape: pred = self.model(inputs, training=True) loss = self.loss(labels, pred) @@ -685,8 +686,8 @@ class IcingIntensityNN: @tf.function def test_step(self, mini_batch): - inputs = [mini_batch[0]] - labels = mini_batch[1] + inputs = [mini_batch[0], mini_batch[1]] + labels = mini_batch[2] pred = self.model(inputs, training=False) t_loss = self.loss(labels, pred) @@ -702,8 +703,8 @@ class IcingIntensityNN: self.test_false_pos(labels, pred) def predict(self, mini_batch): - inputs = [mini_batch[0]] - labels = mini_batch[1] + inputs = [mini_batch[0], mini_batch[1]] + labels = mini_batch[2] pred = self.model(inputs, training=False) t_loss = self.loss(labels, pred) @@ -782,8 +783,8 @@ class IcingIntensityNN: proc_batch_cnt = 0 n_samples = 0 - for data0, label in self.train_dataset: - trn_ds = tf.data.Dataset.from_tensor_slices((data0, label)) + for data0, data1, label in self.train_dataset: + trn_ds = tf.data.Dataset.from_tensor_slices((data0, data1, label)) trn_ds = trn_ds.batch(BATCH_SIZE) for mini_batch in trn_ds: if self.learningRateSchedule is not None: @@ -797,8 +798,8 @@ class IcingIntensityNN: tf.summary.scalar('num_epochs', epoch, step=step) self.reset_test_metrics() - for data0_tst, label_tst in self.test_dataset: - tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, label_tst)) + for data0_tst, data1_tst, label_tst in self.test_dataset: + tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, data1_tst, label_tst)) tst_ds = tst_ds.batch(BATCH_SIZE) for mini_batch_test in tst_ds: self.test_step(mini_batch_test) @@ -832,8 +833,8 @@ class IcingIntensityNN: total_time += (t1-t0) self.reset_test_metrics() - for data0, label in self.test_dataset: - ds = tf.data.Dataset.from_tensor_slices((data0, label)) + for data0, data1, label in self.test_dataset: + ds = tf.data.Dataset.from_tensor_slices((data0, data1, label)) ds = ds.batch(BATCH_SIZE) for mini_batch in ds: self.test_step(mini_batch) @@ -892,6 +893,7 @@ class IcingIntensityNN: # flat = tf.keras.layers.concatenate([flat, flat_1d, flat_anc]) # flat = tf.keras.layers.concatenate([flat, flat_1d]) # self.build_dnn(flat) + flat = tf.keras.layers.concatenate([flat, self.inputs[1]]) self.build_dnn(flat) self.model = tf.keras.Model(self.inputs, self.logits)