Skip to content
Snippets Groups Projects
Commit c327dcb6 authored by tomrink's avatar tomrink
Browse files

snapshot...

parent ab0080c6
No related branches found
No related tags found
No related merge requests found
...@@ -62,12 +62,12 @@ IMG_DEPTH = 1 ...@@ -62,12 +62,12 @@ IMG_DEPTH = 1
label_param = 'cloud_probability' label_param = 'cloud_probability'
params = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param] params = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param]
params_i = ['refl_0_65um_nom', label_param] # params_i = ['refl_0_65um_nom', label_param]
data_params_half = ['temp_11_0um_nom'] data_params_half = ['temp_11_0um_nom']
data_params_full = ['refl_0_65um_nom'] data_params_full = ['refl_0_65um_nom']
label_idx = params_i.index(label_param) # label_idx = params_i.index(label_param)
# label_idx = 0 label_idx = params.index(label_param)
print('data_params_half: ', data_params_half) print('data_params_half: ', data_params_half)
print('data_params_full: ', data_params_full) print('data_params_full: ', data_params_full)
...@@ -315,42 +315,42 @@ class SRCNN: ...@@ -315,42 +315,42 @@ class SRCNN:
tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT) tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT)
def get_in_mem_data_batch(self, idxs, is_training): def get_in_mem_data_batch(self, idxs, is_training):
# if is_training:
# files = self.train_data_files
# else:
# files = self.test_data_files
#
# data_s = []
# for k in idxs:
# f = files[k]
# try:
# nda = np.load(f)
# except Exception:
# print(f)
# continue
# data_s.append(nda)
# input_data = np.concatenate(data_s)
# input_label = input_data[:, label_idx, :, :]
if is_training: if is_training:
data_files = self.train_data_files files = self.train_data_files
label_files = self.train_label_files
else: else:
data_files = self.test_data_files files = self.test_data_files
label_files = self.test_label_files
data_s = [] data_s = []
label_s = []
for k in idxs: for k in idxs:
f = data_files[k] f = files[k]
nda = np.load(f) try:
nda = np.load(f)
except Exception:
print(f)
continue
data_s.append(nda) data_s.append(nda)
f = label_files[k]
nda = np.load(f)
label_s.append(nda)
input_data = np.concatenate(data_s) input_data = np.concatenate(data_s)
input_label = np.concatenate(label_s) # input_label = input_data[:, label_idx, :, :]
# if is_training:
# data_files = self.train_data_files
# label_files = self.train_label_files
# else:
# data_files = self.test_data_files
# label_files = self.test_label_files
#
# data_s = []
# label_s = []
# for k in idxs:
# f = data_files[k]
# nda = np.load(f)
# data_s.append(nda)
#
# f = label_files[k]
# nda = np.load(f)
# label_s.append(nda)
# input_data = np.concatenate(data_s)
# input_label = np.concatenate(label_s)
data_norm = [] data_norm = []
for param in data_params_half: for param in data_params_half:
...@@ -360,15 +360,14 @@ class SRCNN: ...@@ -360,15 +360,14 @@ class SRCNN:
if DO_ESPCN: if DO_ESPCN:
tmp = tmp[:, slc_y_2, slc_x_2] tmp = tmp[:, slc_y_2, slc_x_2]
else: # Half res upsampled to full res: else: # Half res upsampled to full res:
# tmp = get_grid_cell_mean(tmp) tmp = get_grid_cell_mean(tmp)
tmp = tmp[:, 0:66, 0:66] tmp = tmp[:, 0:66, 0:66]
tmp = normalize(tmp, param, mean_std_dct) tmp = normalize(tmp, param, mean_std_dct)
data_norm.append(tmp) data_norm.append(tmp)
for param in data_params_full: for param in data_params_full:
idx = params_i.index(param) idx = params.index(param)
# tmp = input_data[:, idx, :, :] tmp = input_data[:, idx, :, :]
tmp = input_label[:, idx, :, :]
tmp = tmp.copy() tmp = tmp.copy()
lo, hi, std, avg = get_min_max_std(tmp) lo, hi, std, avg = get_min_max_std(tmp)
...@@ -382,15 +381,12 @@ class SRCNN: ...@@ -382,15 +381,12 @@ class SRCNN:
data_norm.append(avg[:, 0:66, 0:66]) data_norm.append(avg[:, 0:66, 0:66])
# data_norm.append(std[:, 0:66, 0:66]) # data_norm.append(std[:, 0:66, 0:66])
# --------------------------------------------------- # ---------------------------------------------------
# tmp = input_data[:, label_idx, :, :] tmp = input_data[:, label_idx, :, :]
tmp = input_data[:, 2, :, :]
tmp = tmp.copy() tmp = tmp.copy()
tmp = np.where(np.isnan(tmp), 0, tmp)
if DO_ESPCN: if DO_ESPCN:
tmp = tmp[:, slc_y_2, slc_x_2] tmp = tmp[:, slc_y_2, slc_x_2]
else: # Half res upsampled to full res: else: # Half res upsampled to full res:
# tmp = get_grid_cell_mean(tmp) tmp = get_grid_cell_mean(tmp)
tmp = np.where(np.isnan(tmp), 0, tmp)
tmp = tmp[:, 0:66, 0:66] tmp = tmp[:, 0:66, 0:66]
if label_param != 'cloud_probability': if label_param != 'cloud_probability':
tmp = normalize(tmp, label_param, mean_std_dct) tmp = normalize(tmp, label_param, mean_std_dct)
...@@ -401,7 +397,7 @@ class SRCNN: ...@@ -401,7 +397,7 @@ class SRCNN:
data = data.astype(np.float32) data = data.astype(np.float32)
# ----------------------------------------------------- # -----------------------------------------------------
# ----------------------------------------------------- # -----------------------------------------------------
label = input_label[:, label_idx, :, :] label = input_data[:, label_idx, :, :]
label = label.copy() label = label.copy()
label = label[:, y_128, x_128] label = label[:, y_128, x_128]
if NumClasses == 5: if NumClasses == 5:
...@@ -468,13 +464,13 @@ class SRCNN: ...@@ -468,13 +464,13 @@ class SRCNN:
self.test_dataset = dataset self.test_dataset = dataset
def setup_pipeline(self, train_data_files, train_label_files, test_data_files, test_label_files, num_train_samples): def setup_pipeline(self, train_data_files, train_label_files, test_data_files, test_label_files, num_train_samples):
self.train_data_files = train_data_files
self.train_label_files = train_label_files
self.test_data_files = test_data_files
self.test_label_files = test_label_files
# self.train_data_files = train_data_files # self.train_data_files = train_data_files
# self.train_label_files = train_label_files
# self.test_data_files = test_data_files # self.test_data_files = test_data_files
# self.test_label_files = test_label_files
self.train_data_files = train_data_files
self.test_data_files = test_data_files
trn_idxs = np.arange(len(train_data_files)) trn_idxs = np.arange(len(train_data_files))
np.random.shuffle(trn_idxs) np.random.shuffle(trn_idxs)
...@@ -801,15 +797,15 @@ class SRCNN: ...@@ -801,15 +797,15 @@ class SRCNN:
return pred return pred
def run(self, directory, ckpt_dir=None, num_data_samples=50000): def run(self, directory, ckpt_dir=None, num_data_samples=50000):
train_data_files = glob.glob(directory+'train*mres*.npy') # train_data_files = glob.glob(directory+'train*mres*.npy')
valid_data_files = glob.glob(directory+'valid*mres*.npy') # valid_data_files = glob.glob(directory+'valid*mres*.npy')
train_label_files = glob.glob(directory+'train*ires*.npy') # train_label_files = glob.glob(directory+'train*ires*.npy')
valid_label_files = glob.glob(directory+'valid*ires*.npy') # valid_label_files = glob.glob(directory+'valid*ires*.npy')
self.setup_pipeline(train_data_files, train_label_files, valid_data_files, valid_label_files, num_data_samples) # self.setup_pipeline(train_data_files, train_label_files, valid_data_files, valid_label_files, num_data_samples)
# train_data_files = glob.glob(directory+'data_train_*.npy') train_data_files = glob.glob(directory+'data_train_*.npy')
# valid_data_files = glob.glob(directory+'data_valid_*.npy') valid_data_files = glob.glob(directory+'data_valid_*.npy')
# self.setup_pipeline(train_data_files, None, valid_data_files, None, num_data_samples) self.setup_pipeline(train_data_files, None, valid_data_files, None, num_data_samples)
self.build_model() self.build_model()
self.build_training() self.build_training()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment