Skip to content
Snippets Groups Projects
Commit 1308ba27 authored by tomrink's avatar tomrink
Browse files

snapshot...

parent f7e77305
No related branches found
No related tags found
No related merge requests found
...@@ -58,7 +58,6 @@ mean_std_dct.update(mean_std_dct_l1b) ...@@ -58,7 +58,6 @@ mean_std_dct.update(mean_std_dct_l1b)
mean_std_dct.update(mean_std_dct_l2) mean_std_dct.update(mean_std_dct_l2)
IMG_DEPTH = 1 IMG_DEPTH = 1
# label_param = 'cloud_fraction'
# label_param = 'cld_opd_dcomp' # label_param = 'cld_opd_dcomp'
label_param = 'cloud_probability' label_param = 'cloud_probability'
...@@ -66,8 +65,8 @@ params = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param] ...@@ -66,8 +65,8 @@ params = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param]
data_params_half = ['temp_11_0um_nom'] data_params_half = ['temp_11_0um_nom']
data_params_full = ['refl_0_65um_nom'] data_params_full = ['refl_0_65um_nom']
label_idx = params.index(label_param) # label_idx = params.index(label_param)
# label_idx = 0 label_idx = 0
print('data_params_half: ', data_params_half) print('data_params_half: ', data_params_half)
print('data_params_full: ', data_params_full) print('data_params_full: ', data_params_full)
...@@ -315,43 +314,44 @@ class SRCNN: ...@@ -315,43 +314,44 @@ class SRCNN:
tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT) tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT)
def get_in_mem_data_batch(self, idxs, is_training): def get_in_mem_data_batch(self, idxs, is_training):
# if is_training:
# files = self.train_data_files
# else:
# files = self.test_data_files
#
# data_s = []
# for k in idxs:
# f = files[k]
# try:
# nda = np.load(f)
# except Exception:
# print(f)
# continue
# data_s.append(nda)
# input_data = np.concatenate(data_s)
# input_label = input_data[:, label_idx, :, :]
if is_training: if is_training:
files = self.train_data_files data_files = self.train_data_files
label_files = self.train_label_files
else: else:
files = self.test_data_files data_files = self.test_data_files
label_files = self.test_label_files
data_s = [] data_s = []
label_s = []
for k in idxs: for k in idxs:
f = files[k] f = data_files[k]
try: nda = np.load(f)
nda = np.load(f)
except Exception:
print(f)
continue
data_s.append(nda) data_s.append(nda)
f = label_files[k]
nda = np.load(f)
label_s.append(nda)
input_data = np.concatenate(data_s) input_data = np.concatenate(data_s)
input_label = np.concatenate(label_s)
input_label = input_data[:, label_idx, :, :] input_label = input_data[:, label_idx, :, :]
# if is_training:
# data_files = self.train_data_files
# label_files = self.train_label_files
# else:
# data_files = self.test_data_files
# label_files = self.test_label_files
#
# data_s = []
# label_s = []
# for k in idxs:
# f = data_files[k]
# nda = np.load(f)
# data_s.append(nda)
#
# f = label_files[k]
# nda = np.load(f)
# label_s.append(nda)
# input_data = np.concatenate(data_s)
# input_label = np.concatenate(label_s)
data_norm = [] data_norm = []
for param in data_params_half: for param in data_params_half:
idx = params.index(param) idx = params.index(param)
...@@ -360,7 +360,7 @@ class SRCNN: ...@@ -360,7 +360,7 @@ class SRCNN:
if DO_ESPCN: if DO_ESPCN:
tmp = tmp[:, slc_y_2, slc_x_2] tmp = tmp[:, slc_y_2, slc_x_2]
else: # Half res upsampled to full res: else: # Half res upsampled to full res:
tmp = get_grid_cell_mean(tmp) # tmp = get_grid_cell_mean(tmp)
tmp = tmp[:, 0:66, 0:66] tmp = tmp[:, 0:66, 0:66]
tmp = normalize(tmp, param, mean_std_dct) tmp = normalize(tmp, param, mean_std_dct)
data_norm.append(tmp) data_norm.append(tmp)
...@@ -387,7 +387,7 @@ class SRCNN: ...@@ -387,7 +387,7 @@ class SRCNN:
if DO_ESPCN: if DO_ESPCN:
tmp = tmp[:, slc_y_2, slc_x_2] tmp = tmp[:, slc_y_2, slc_x_2]
else: # Half res upsampled to full res: else: # Half res upsampled to full res:
tmp = get_grid_cell_mean(tmp) # tmp = get_grid_cell_mean(tmp)
tmp = np.where(np.isnan(tmp), 0, tmp) tmp = np.where(np.isnan(tmp), 0, tmp)
tmp = tmp[:, 0:66, 0:66] tmp = tmp[:, 0:66, 0:66]
if label_param != 'cloud_probability': if label_param != 'cloud_probability':
...@@ -466,13 +466,13 @@ class SRCNN: ...@@ -466,13 +466,13 @@ class SRCNN:
self.test_dataset = dataset self.test_dataset = dataset
def setup_pipeline(self, train_data_files, train_label_files, test_data_files, test_label_files, num_train_samples): def setup_pipeline(self, train_data_files, train_label_files, test_data_files, test_label_files, num_train_samples):
# self.train_data_files = train_data_files
# self.train_label_files = train_label_files
# self.test_data_files = test_data_files
# self.test_label_files = test_label_files
self.train_data_files = train_data_files self.train_data_files = train_data_files
self.train_label_files = train_label_files
self.test_data_files = test_data_files self.test_data_files = test_data_files
self.test_label_files = test_label_files
# self.train_data_files = train_data_files
# self.test_data_files = test_data_files
trn_idxs = np.arange(len(train_data_files)) trn_idxs = np.arange(len(train_data_files))
np.random.shuffle(trn_idxs) np.random.shuffle(trn_idxs)
...@@ -799,15 +799,15 @@ class SRCNN: ...@@ -799,15 +799,15 @@ class SRCNN:
return pred return pred
def run(self, directory, ckpt_dir=None, num_data_samples=50000): def run(self, directory, ckpt_dir=None, num_data_samples=50000):
# train_data_files = glob.glob(directory+'data_train*.npy') train_data_files = glob.glob(directory+'data_train*.npy')
# valid_data_files = glob.glob(directory+'data_valid*.npy') valid_data_files = glob.glob(directory+'data_valid*.npy')
# train_label_files = glob.glob(directory+'label_train*.npy') train_label_files = glob.glob(directory+'label_train*.npy')
# valid_label_files = glob.glob(directory+'label_valid*.npy') valid_label_files = glob.glob(directory+'label_valid*.npy')
# self.setup_pipeline(train_data_files, train_label_files, valid_data_files, valid_label_files, num_data_samples) self.setup_pipeline(train_data_files, train_label_files, valid_data_files, valid_label_files, num_data_samples)
train_data_files = glob.glob(directory+'data_train_*.npy') # train_data_files = glob.glob(directory+'data_train_*.npy')
valid_data_files = glob.glob(directory+'data_valid_*.npy') # valid_data_files = glob.glob(directory+'data_valid_*.npy')
self.setup_pipeline(train_data_files, None, valid_data_files, None, num_data_samples) # self.setup_pipeline(train_data_files, None, valid_data_files, None, num_data_samples)
self.build_model() self.build_model()
self.build_training() self.build_training()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment