Skip to content
Snippets Groups Projects
Commit 100113ed authored by tomrink's avatar tomrink
Browse files

snapshot...

parent 61356bd7
Branches
No related tags found
No related merge requests found
......@@ -31,6 +31,9 @@ CONV3D = False
img_width = 16
mean_std_file = '/Users/tomrink/data/icing/fovs_mean_std_day.pkl'
f = open(mean_std_file, 'rb')
mean_std_dct = pickle.load(f)
f.close()
train_params = ['cld_height_acha', 'cld_geo_thick', 'supercooled_cloud_fraction', 'cld_temp_acha', 'cld_press_acha',
'cld_reff_dcomp', 'cld_opd_dcomp', 'cld_cwp_dcomp', 'iwc_dcomp', 'lwc_dcomp']
......@@ -186,7 +189,7 @@ class IcingIntensityNN:
data = []
for param in train_params:
nda = self.h5f[param][nd_keys, ]
# nda = normalize(nda, param)
nda = normalize(nda, param, mean_std_dct)
data.append(nda)
data = np.stack(data)
data = data.astype(np.float32)
......@@ -212,11 +215,11 @@ class IcingIntensityNN:
# if CACHE_DATA_IN_MEM:
# self.in_mem_data_cache[key] = (nda, ndb, ndc)
return data, data, label
return data, label
@tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
def data_function(self, indexes):
out = tf.numpy_function(self.get_in_mem_data_batch, [indexes], [tf.float32, tf.float32, tf.int32])
out = tf.numpy_function(self.get_in_mem_data_batch, [indexes], [tf.float32, tf.int32])
return out
def get_train_dataset(self, indexes):
......@@ -374,7 +377,7 @@ class IcingIntensityNN:
self.logits = logits
def build_training(self):
self.loss = tf.keras.losses.BinaryCrossentropy # for two-class only
self.loss = tf.keras.losses.BinaryCrossentropy() # for two-class only
#self.loss = tf.keras.losses.SparseCategoricalCrossentropy() # For multi-class
# decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
......@@ -414,8 +417,8 @@ class IcingIntensityNN:
@tf.function
def train_step(self, mini_batch):
inputs = [mini_batch[0], mini_batch[1]]
labels = mini_batch[2]
inputs = [mini_batch[0]]
labels = mini_batch[1]
with tf.GradientTape() as tape:
pred = self.model(inputs, training=True)
loss = self.loss(labels, pred)
......@@ -433,8 +436,8 @@ class IcingIntensityNN:
@tf.function
def test_step(self, mini_batch):
inputs = [mini_batch[0], mini_batch[1]]
labels = mini_batch[2]
inputs = [mini_batch[0]]
labels = mini_batch[1]
pred = self.model(inputs, training=False)
t_loss = self.loss(labels, pred)
......@@ -442,8 +445,8 @@ class IcingIntensityNN:
self.test_accuracy(labels, pred)
def predict(self, mini_batch):
inputs = [mini_batch[0], mini_batch[1]]
labels = mini_batch[2]
inputs = [mini_batch[0]]
labels = mini_batch[1]
pred = self.model(inputs, training=False)
t_loss = self.loss(labels, pred)
......@@ -473,8 +476,8 @@ class IcingIntensityNN:
proc_batch_cnt = 0
n_samples = 0
for data0, data1, label in self.train_dataset:
trn_ds = tf.data.Dataset.from_tensor_slices((data0, data1, label))
for data0, label in self.train_dataset:
trn_ds = tf.data.Dataset.from_tensor_slices((data0, label))
trn_ds = trn_ds.batch(BATCH_SIZE)
for mini_batch in trn_ds:
if self.learningRateSchedule is not None:
......@@ -490,8 +493,8 @@ class IcingIntensityNN:
self.test_loss.reset_states()
self.test_accuracy.reset_states()
for data0_tst, data1_tst, label_tst in self.test_dataset:
tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, data1_tst, label_tst))
for data0_tst, label_tst in self.test_dataset:
tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, label_tst))
tst_ds = tst_ds.batch(BATCH_SIZE)
for mini_batch_test in tst_ds:
self.test_step(mini_batch_test)
......@@ -517,8 +520,8 @@ class IcingIntensityNN:
self.test_loss.reset_states()
self.test_accuracy.reset_states()
for data0, data1, label in self.test_dataset:
ds = tf.data.Dataset.from_tensor_slices((data0, data1, label))
for data0, label in self.test_dataset:
ds = tf.data.Dataset.from_tensor_slices((data0, label))
ds = ds.batch(BATCH_SIZE)
for mini_batch in ds:
self.test_step(mini_batch)
......
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment