Skip to content
Snippets Groups Projects
Commit 7718854a authored by tomrink's avatar tomrink
Browse files

snapshot...

parent 92e5ab31
No related branches found
No related tags found
No related merge requests found
...@@ -314,70 +314,6 @@ class SRCNN: ...@@ -314,70 +314,6 @@ class SRCNN:
def get_in_mem_data_batch_test(self, idxs): def get_in_mem_data_batch_test(self, idxs):
return self.get_in_mem_data_batch(idxs, False) return self.get_in_mem_data_batch(idxs, False)
def get_in_mem_data_batch_eval(self, idxs):
in_file = '/home/rink/data/clavrx_snpp_day/clavrx_VNP02MOD.A2019017.1600.001.2019017214117.uwssec.highres.nc.level2.nc'
N = 8
slc_x = slice(2, N * 128 + 4)
slc_y = slice(2, N * 128 + 4)
slc_x_2 = slice(1, N * 128 + 6, 2)
slc_y_2 = slice(1, N * 128 + 6, 2)
x_2 = np.arange(int((N * 128) / 2) + 3)
y_2 = np.arange(int((N * 128) / 2) + 3)
t = np.arange(0, int((N * 128) / 2) + 3, 0.5)
s = np.arange(0, int((N * 128) / 2) + 3, 0.5)
x_k = slice(1, N * 128 + 3)
y_k = slice(1, N * 128 + 3)
x_128 = slice(3, N * 128 + 3)
y_128 = slice(3, N * 128 + 3)
sub_y, sub_x = (N * 128) + 10, (N * 128) + 10
y_0, x_0, = 2432 - int(sub_y / 2), 2432 - int(sub_x / 2)
h5f = h5py.File(in_file, 'r')
grd_a = get_grid_values_all(h5f, 'temp_11_0um_nom')
grd_a = grd_a[y_0:y_0 + sub_y, x_0:x_0 + sub_x]
grd_a = grd_a.copy()
grd_a = np.where(np.isnan(grd_a), 0, grd_a)
hr_grd_a = grd_a.copy()
hr_grd_a = hr_grd_a[y_128, x_128]
grd_a = grd_a[slc_y_2, slc_x_2]
grd_a = resample_2d_linear_one(x_2, y_2, grd_a, t, s)
grd_a = grd_a[y_k, x_k]
grd_a = normalize(grd_a, 'temp_11_0um_nom', mean_std_dct)
#
# grd_b = get_grid_values_all(h5f, 'refl_0_65um_nom')
# grd_b = grd_b[y_0:y_0+sub_y, x_0:x_0+sub_x]
# grd_b = grd_b[y_130, x_130]
# refl = grd_b
# grd_b = normalize(grd_b, 'refl_0_65um_nom', mean_std_dct)
grd_c = get_grid_values_all(h5f, label_param)
grd_c = grd_c[y_0:y_0 + sub_y, x_0:x_0 + sub_x]
hr_grd_c = grd_c.copy()
hr_grd_c = hr_grd_c[y_128, x_128]
grd_c = np.where(np.isnan(grd_c), 0, grd_c)
grd_c = grd_c.copy()
grd_c = grd_c[slc_y_2, slc_x_2]
grd_c = resample_2d_linear_one(x_2, y_2, grd_c, t, s)
grd_c = grd_c[y_k, x_k]
if label_param != 'cloud_probability':
grd_c = normalize(grd_c, label_param, mean_std_dct)
# data = np.stack([grd_a, grd_b, grd_c], axis=2)
# data = np.stack([grd_a, grd_c], axis=2)
data = np.stack([grd_c], axis=2)
data = np.expand_dims(data, axis=0)
data = data.astype(np.float32)
h5f.close()
return data
@tf.function(input_signature=[tf.TensorSpec(None, tf.int32)]) @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
def data_function(self, indexes): def data_function(self, indexes):
out = tf.numpy_function(self.get_in_mem_data_batch_train, [indexes], [tf.float32, tf.float32]) out = tf.numpy_function(self.get_in_mem_data_batch_train, [indexes], [tf.float32, tf.float32])
...@@ -388,11 +324,6 @@ class SRCNN: ...@@ -388,11 +324,6 @@ class SRCNN:
out = tf.numpy_function(self.get_in_mem_data_batch_test, [indexes], [tf.float32, tf.float32]) out = tf.numpy_function(self.get_in_mem_data_batch_test, [indexes], [tf.float32, tf.float32])
return out return out
@tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
def data_function_evaluate(self, indexes):
out = tf.numpy_function(self.get_in_mem_data_batch_eval, [indexes], [tf.float32])
return out
def get_train_dataset(self, indexes): def get_train_dataset(self, indexes):
indexes = list(indexes) indexes = list(indexes)
...@@ -414,13 +345,6 @@ class SRCNN: ...@@ -414,13 +345,6 @@ class SRCNN:
dataset = dataset.cache() dataset = dataset.cache()
self.test_dataset = dataset self.test_dataset = dataset
def get_evaluate_dataset(self, indexes):
indexes = list(indexes)
dataset = tf.data.Dataset.from_tensor_slices(indexes)
dataset = dataset.map(self.data_function_evaluate, num_parallel_calls=8)
self.eval_dataset = dataset
def setup_pipeline(self, train_data_files, test_data_files, num_train_samples): def setup_pipeline(self, train_data_files, test_data_files, num_train_samples):
self.train_data_files = train_data_files self.train_data_files = train_data_files
...@@ -449,11 +373,6 @@ class SRCNN: ...@@ -449,11 +373,6 @@ class SRCNN:
self.get_test_dataset(tst_idxs) self.get_test_dataset(tst_idxs)
print('setup_test_pipeline: Done') print('setup_test_pipeline: Done')
def setup_eval_pipeline(self, filename):
idxs = [0]
self.num_data_samples = 1
self.get_evaluate_dataset(idxs)
def build_srcnn(self, do_drop_out=False, do_batch_norm=False, drop_rate=0.5, factor=2): def build_srcnn(self, do_drop_out=False, do_batch_norm=False, drop_rate=0.5, factor=2):
print('build_cnn') print('build_cnn')
padding = "SAME" padding = "SAME"
...@@ -731,13 +650,6 @@ class SRCNN: ...@@ -731,13 +650,6 @@ class SRCNN:
self.reset_test_metrics() self.reset_test_metrics()
# for data in self.eval_dataset:
# pred = self.model([data], training=False)
# pred = pred.numpy()
# if label_param != 'cloud_probability':
# pred = denormalize(pred, label_param, mean_std_dct)
# print(pred.min(), pred.max())
pred = self.model([data], training=False) pred = self.model([data], training=False)
self.test_probs = pred self.test_probs = pred
pred = pred.numpy() pred = pred.numpy()
...@@ -749,8 +661,6 @@ class SRCNN: ...@@ -749,8 +661,6 @@ class SRCNN:
def run(self, directory, ckpt_dir=None, num_data_samples=50000): def run(self, directory, ckpt_dir=None, num_data_samples=50000):
train_data_files = glob.glob(directory+'data_train_*.npy') train_data_files = glob.glob(directory+'data_train_*.npy')
valid_data_files = glob.glob(directory+'data_valid_*.npy') valid_data_files = glob.glob(directory+'data_valid_*.npy')
# train_data_files = train_data_files[::2]
# valid_data_files = valid_data_files[::2]
self.setup_pipeline(train_data_files, valid_data_files, num_data_samples) self.setup_pipeline(train_data_files, valid_data_files, num_data_samples)
self.build_model() self.build_model()
...@@ -770,7 +680,6 @@ class SRCNN: ...@@ -770,7 +680,6 @@ class SRCNN:
def run_evaluate(self, data, ckpt_dir): def run_evaluate(self, data, ckpt_dir):
data = tf.convert_to_tensor(data, dtype=tf.float32) data = tf.convert_to_tensor(data, dtype=tf.float32)
self.num_data_samples = 80000 self.num_data_samples = 80000
# self.setup_eval_pipeline('clavrx_VNP02MOD.A2019017.1600.001.2019017214117.uwssec.highres.nc.level2.nc')
self.build_model() self.build_model()
self.build_training() self.build_training()
self.build_evaluation() self.build_evaluation()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment