Skip to content
Snippets Groups Projects
Commit 100113ed authored by tomrink's avatar tomrink
Browse files

snapshot...

parent 61356bd7
No related branches found
No related tags found
No related merge requests found
...@@ -31,6 +31,9 @@ CONV3D = False ...@@ -31,6 +31,9 @@ CONV3D = False
img_width = 16 img_width = 16
mean_std_file = '/Users/tomrink/data/icing/fovs_mean_std_day.pkl' mean_std_file = '/Users/tomrink/data/icing/fovs_mean_std_day.pkl'
f = open(mean_std_file, 'rb')
mean_std_dct = pickle.load(f)
f.close()
train_params = ['cld_height_acha', 'cld_geo_thick', 'supercooled_cloud_fraction', 'cld_temp_acha', 'cld_press_acha', train_params = ['cld_height_acha', 'cld_geo_thick', 'supercooled_cloud_fraction', 'cld_temp_acha', 'cld_press_acha',
'cld_reff_dcomp', 'cld_opd_dcomp', 'cld_cwp_dcomp', 'iwc_dcomp', 'lwc_dcomp'] 'cld_reff_dcomp', 'cld_opd_dcomp', 'cld_cwp_dcomp', 'iwc_dcomp', 'lwc_dcomp']
...@@ -186,7 +189,7 @@ class IcingIntensityNN: ...@@ -186,7 +189,7 @@ class IcingIntensityNN:
data = [] data = []
for param in train_params: for param in train_params:
nda = self.h5f[param][nd_keys, ] nda = self.h5f[param][nd_keys, ]
# nda = normalize(nda, param) nda = normalize(nda, param, mean_std_dct)
data.append(nda) data.append(nda)
data = np.stack(data) data = np.stack(data)
data = data.astype(np.float32) data = data.astype(np.float32)
...@@ -212,11 +215,11 @@ class IcingIntensityNN: ...@@ -212,11 +215,11 @@ class IcingIntensityNN:
# if CACHE_DATA_IN_MEM: # if CACHE_DATA_IN_MEM:
# self.in_mem_data_cache[key] = (nda, ndb, ndc) # self.in_mem_data_cache[key] = (nda, ndb, ndc)
return data, data, label return data, label
@tf.function(input_signature=[tf.TensorSpec(None, tf.int32)]) @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
def data_function(self, indexes): def data_function(self, indexes):
out = tf.numpy_function(self.get_in_mem_data_batch, [indexes], [tf.float32, tf.float32, tf.int32]) out = tf.numpy_function(self.get_in_mem_data_batch, [indexes], [tf.float32, tf.int32])
return out return out
def get_train_dataset(self, indexes): def get_train_dataset(self, indexes):
...@@ -374,7 +377,7 @@ class IcingIntensityNN: ...@@ -374,7 +377,7 @@ class IcingIntensityNN:
self.logits = logits self.logits = logits
def build_training(self): def build_training(self):
self.loss = tf.keras.losses.BinaryCrossentropy # for two-class only self.loss = tf.keras.losses.BinaryCrossentropy() # for two-class only
#self.loss = tf.keras.losses.SparseCategoricalCrossentropy() # For multi-class #self.loss = tf.keras.losses.SparseCategoricalCrossentropy() # For multi-class
# decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
...@@ -414,8 +417,8 @@ class IcingIntensityNN: ...@@ -414,8 +417,8 @@ class IcingIntensityNN:
@tf.function @tf.function
def train_step(self, mini_batch): def train_step(self, mini_batch):
inputs = [mini_batch[0], mini_batch[1]] inputs = [mini_batch[0]]
labels = mini_batch[2] labels = mini_batch[1]
with tf.GradientTape() as tape: with tf.GradientTape() as tape:
pred = self.model(inputs, training=True) pred = self.model(inputs, training=True)
loss = self.loss(labels, pred) loss = self.loss(labels, pred)
...@@ -433,8 +436,8 @@ class IcingIntensityNN: ...@@ -433,8 +436,8 @@ class IcingIntensityNN:
@tf.function @tf.function
def test_step(self, mini_batch): def test_step(self, mini_batch):
inputs = [mini_batch[0], mini_batch[1]] inputs = [mini_batch[0]]
labels = mini_batch[2] labels = mini_batch[1]
pred = self.model(inputs, training=False) pred = self.model(inputs, training=False)
t_loss = self.loss(labels, pred) t_loss = self.loss(labels, pred)
...@@ -442,8 +445,8 @@ class IcingIntensityNN: ...@@ -442,8 +445,8 @@ class IcingIntensityNN:
self.test_accuracy(labels, pred) self.test_accuracy(labels, pred)
def predict(self, mini_batch): def predict(self, mini_batch):
inputs = [mini_batch[0], mini_batch[1]] inputs = [mini_batch[0]]
labels = mini_batch[2] labels = mini_batch[1]
pred = self.model(inputs, training=False) pred = self.model(inputs, training=False)
t_loss = self.loss(labels, pred) t_loss = self.loss(labels, pred)
...@@ -473,8 +476,8 @@ class IcingIntensityNN: ...@@ -473,8 +476,8 @@ class IcingIntensityNN:
proc_batch_cnt = 0 proc_batch_cnt = 0
n_samples = 0 n_samples = 0
for data0, data1, label in self.train_dataset: for data0, label in self.train_dataset:
trn_ds = tf.data.Dataset.from_tensor_slices((data0, data1, label)) trn_ds = tf.data.Dataset.from_tensor_slices((data0, label))
trn_ds = trn_ds.batch(BATCH_SIZE) trn_ds = trn_ds.batch(BATCH_SIZE)
for mini_batch in trn_ds: for mini_batch in trn_ds:
if self.learningRateSchedule is not None: if self.learningRateSchedule is not None:
...@@ -490,8 +493,8 @@ class IcingIntensityNN: ...@@ -490,8 +493,8 @@ class IcingIntensityNN:
self.test_loss.reset_states() self.test_loss.reset_states()
self.test_accuracy.reset_states() self.test_accuracy.reset_states()
for data0_tst, data1_tst, label_tst in self.test_dataset: for data0_tst, label_tst in self.test_dataset:
tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, data1_tst, label_tst)) tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, label_tst))
tst_ds = tst_ds.batch(BATCH_SIZE) tst_ds = tst_ds.batch(BATCH_SIZE)
for mini_batch_test in tst_ds: for mini_batch_test in tst_ds:
self.test_step(mini_batch_test) self.test_step(mini_batch_test)
...@@ -517,8 +520,8 @@ class IcingIntensityNN: ...@@ -517,8 +520,8 @@ class IcingIntensityNN:
self.test_loss.reset_states() self.test_loss.reset_states()
self.test_accuracy.reset_states() self.test_accuracy.reset_states()
for data0, data1, label in self.test_dataset: for data0, label in self.test_dataset:
ds = tf.data.Dataset.from_tensor_slices((data0, data1, label)) ds = tf.data.Dataset.from_tensor_slices((data0, label))
ds = ds.batch(BATCH_SIZE) ds = ds.batch(BATCH_SIZE)
for mini_batch in ds: for mini_batch in ds:
self.test_step(mini_batch) self.test_step(mini_batch)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment