diff --git a/modules/deeplearning/icing.py b/modules/deeplearning/icing.py index c5a651e80a722cdb4e02a9e8be1913394c17dcb4..ee2e13ed70e83d0d39cb44ead6a594175c69fba1 100644 --- a/modules/deeplearning/icing.py +++ b/modules/deeplearning/icing.py @@ -489,8 +489,8 @@ class IcingIntensityNN: proc_batch_cnt = 0 n_samples = 0 - for abi, temp, lbfp in self.train_dataset: - trn_ds = tf.data.Dataset.from_tensor_slices((abi, temp, lbfp)) + for data0, data1, label in self.train_dataset: + trn_ds = tf.data.Dataset.from_tensor_slices((data0, data1, label)) trn_ds = trn_ds.batch(BATCH_SIZE) for mini_batch in trn_ds: if self.learningRateSchedule is not None: @@ -506,8 +506,8 @@ class IcingIntensityNN: self.test_loss.reset_states() self.test_accuracy.reset_states() - for abi_tst, temp_tst, lbfp_tst in self.test_dataset: - tst_ds = tf.data.Dataset.from_tensor_slices((abi_tst, temp_tst, lbfp_tst)) + for data0_tst, data1_tst, label_tst in self.test_dataset: + tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, data1_tst, label_tst)) tst_ds = tst_ds.batch(BATCH_SIZE) for mini_batch_test in tst_ds: self.test_step(mini_batch_test) @@ -524,7 +524,7 @@ class IcingIntensityNN: print('train loss: ', loss.numpy()) proc_batch_cnt += 1 - n_samples += abi.shape[0] + n_samples += data0.shape[0] print('proc_batch_cnt: ', proc_batch_cnt, n_samples) t1 = datetime.datetime.now().timestamp() @@ -533,8 +533,8 @@ class IcingIntensityNN: self.test_loss.reset_states() self.test_accuracy.reset_states() - for abi, temp, lbfp in self.test_dataset: - ds = tf.data.Dataset.from_tensor_slices((abi, temp, lbfp)) + for data0, data1, label in self.test_dataset: + ds = tf.data.Dataset.from_tensor_slices((data0, data1, label)) ds = ds.batch(BATCH_SIZE) for mini_batch in ds: self.test_step(mini_batch)