Skip to content
Snippets Groups Projects
Commit 4fe6621e authored by tomrink's avatar tomrink
Browse files

snapshot...

parent e577c8fa
No related branches found
No related tags found
No related merge requests found
......@@ -62,9 +62,11 @@ IMG_DEPTH = 1
label_param = 'cloud_probability'
params = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param]
params_i = ['refl_0_65um_nom', label_param]
data_params_half = ['temp_11_0um_nom']
data_params_full = ['refl_0_65um_nom']
label_idx_i = params_i.index(label_param)
label_idx = params.index(label_param)
print('data_params_half: ', data_params_half)
......@@ -75,8 +77,8 @@ KERNEL_SIZE = 3 # target size: (128, 128)
N = 1
if KERNEL_SIZE == 3:
slc_x = slice(2, N*128 + 4)
slc_y = slice(2, N*128 + 4)
# slc_x = slice(2, N*128 + 4)
# slc_y = slice(2, N*128 + 4)
slc_x_2 = slice(1, N*128 + 6, 2)
slc_y_2 = slice(1, N*128 + 6, 2)
x_2 = np.arange(int((N*128)/2) + 3)
......@@ -85,6 +87,8 @@ if KERNEL_SIZE == 3:
s = np.arange(0, int((N*128)/2) + 3, 0.5)
x_k = slice(1, N*128 + 3)
y_k = slice(1, N*128 + 3)
slc_x = slice(1, N*128 + 3)
slc_y = slice(1, N*128 + 3)
x_128 = slice(2, N*128 + 2)
y_128 = slice(2, N*128 + 2)
elif KERNEL_SIZE == 5:
......@@ -168,7 +172,8 @@ def build_residual_block_conv2d_down2x(x_in, num_filters, activation, padding='S
def upsample(tmp):
tmp = tmp[:, slc_y_2, slc_x_2]
# tmp = tmp[:, slc_y_2, slc_x_2]
tmp = tmp[:, 0:66, 0:66]
tmp = resample_2d_linear(x_2, y_2, tmp, t, s)
tmp = tmp[:, y_k, x_k]
return tmp
......@@ -183,6 +188,8 @@ def upsample_nearest(grd):
up[:, 0::2, 1::2] = grd[:, 0::, 0::]
up[:, 1::2, 1::2] = grd[:, 0::, 0::]
up = up[:, y_k, x_k]
return up
......@@ -379,24 +386,24 @@ class SRCNN:
def get_in_mem_data_batch(self, idxs, is_training):
if is_training:
files = self.train_data_files
data_files = self.train_data_files
label_files = self.train_label_files
else:
files = self.test_data_files
data_files = self.test_data_files
label_files = self.test_label_files
data_s = []
label_s = []
for k in idxs:
f = files[k]
try:
nda = np.load(f)
except Exception:
print(f)
continue
f = data_files[k]
nda = np.load(f)
data_s.append(nda)
input_data = np.concatenate(data_s)
DO_ADD_NOISE = False
if is_training and NOISE_TRAINING:
DO_ADD_NOISE = True
f = label_files[k]
nda = np.load(f)
label_s.append(nda)
input_data = np.concatenate(data_s)
input_label = np.concatenate(label_s)
data_norm = []
for param in data_params_half:
......@@ -412,11 +419,10 @@ class SRCNN:
data_norm.append(tmp)
for param in data_params_full:
idx = params.index(param)
tmp = input_data[:, idx, :, :]
idx = params_i.index(param)
tmp = input_label[:, idx, :, :]
tmp = tmp.copy()
tmp = np.where(np.isnan(tmp), 0, tmp)
# Full res:
tmp = tmp[:, slc_y, slc_x]
tmp = normalize(tmp, param, mean_std_dct)
data_norm.append(tmp)
......@@ -427,9 +433,7 @@ class SRCNN:
if DO_ESPCN:
tmp = tmp[:, slc_y_2, slc_x_2]
else: # Half res upsampled to full res:
# tmp = upsample(tmp)
tmp = upsample_mean(tmp)
tmp = tmp[:, slc_y, slc_x]
tmp = upsample_nearest(tmp)
if label_param != 'cloud_probability':
tmp = normalize(tmp, label_param, mean_std_dct)
data_norm.append(tmp)
......@@ -438,7 +442,7 @@ class SRCNN:
data = data.astype(np.float32)
# -----------------------------------------------------
# -----------------------------------------------------
label = input_data[:, label_idx, :, :]
label = input_label[:, label_idx_i, :, :]
label = label.copy()
label = label[:, y_128, x_128]
if NumClasses == 5:
......@@ -504,10 +508,11 @@ class SRCNN:
dataset = dataset.cache()
self.test_dataset = dataset
def setup_pipeline(self, train_data_files, test_data_files, num_train_samples):
def setup_pipeline(self, train_data_files, train_label_files, test_data_files, test_label_files, num_train_samples):
self.train_data_files = train_data_files
self.train_label_files = train_label_files
self.test_data_files = test_data_files
self.test_label_files = test_label_files
trn_idxs = np.arange(len(train_data_files))
np.random.shuffle(trn_idxs)
......@@ -526,8 +531,10 @@ class SRCNN:
print('num test samples: ', tst_idxs.shape[0])
print('setup_pipeline: Done')
def setup_test_pipeline(self, test_data_files):
def setup_test_pipeline(self, test_data_files, test_label_files):
self.test_data_files = test_data_files
self.test_label_files = test_label_files
tst_idxs = np.arange(len(test_data_files))
self.get_test_dataset(tst_idxs)
print('setup_test_pipeline: Done')
......@@ -839,19 +846,24 @@ class SRCNN:
return pred
def run(self, directory, ckpt_dir=None, num_data_samples=50000):
train_data_files = glob.glob(directory+'data_train_*.npy')
valid_data_files = glob.glob(directory+'data_valid_*.npy')
train_data_files = glob.glob(directory+'train*mres*.npy')
valid_data_files = glob.glob(directory+'valid*mres*.npy')
train_label_files = glob.glob(directory+'train*ires*.npy')
valid_label_files = glob.glob(directory+'valid*ires*.npy')
self.setup_pipeline(train_data_files, valid_data_files, num_data_samples)
self.setup_pipeline(train_data_files, train_label_files, valid_data_files, valid_label_files, num_data_samples)
self.build_model()
self.build_training()
self.build_evaluation()
self.do_training(ckpt_dir=ckpt_dir)
def run_restore(self, directory, ckpt_dir):
valid_data_files = glob.glob(directory + 'data_valid*.npy')
self.num_data_samples = 1000
self.setup_test_pipeline(valid_data_files)
valid_data_files = glob.glob(directory + 'valid*mres*.npy')
valid_label_files = glob.glob(directory + 'valid*ires*.npy')
self.setup_test_pipeline(valid_data_files, valid_label_files)
self.build_model()
self.build_training()
self.build_evaluation()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment