cloud_opd_srcnn_abi.py 37.32 KiB
import gc
import glob
import tensorflow as tf
from util.augment import augment_image
from util.setup import logdir, modeldir, now, ancillary_path
from util.util import EarlyStop, normalize, denormalize, scale, descale, get_grid_values_all, resample_2d_linear,\
smooth_2d, make_tf_callable_generator
import os, datetime
import numpy as np
import pickle
import h5py
import time
LOG_DEVICE_PLACEMENT = False
PROC_BATCH_SIZE = 4
PROC_BATCH_BUFFER_SIZE = 5000
NumClasses = 2
if NumClasses == 2:
NumLogits = 1
else:
NumLogits = NumClasses
BATCH_SIZE = 128
NUM_EPOCHS = 80
TRACK_MOVING_AVERAGE = False
EARLY_STOP = True
NOISE_TRAINING = False
NOISE_STDDEV = 0.01
DO_AUGMENT = True
DO_SMOOTH = False
SIGMA = 1.0
DO_ZERO_OUT = False
# setup scaling parameters dictionary
mean_std_dct = {}
mean_std_file = ancillary_path+'mean_std_lo_hi_l2.pkl'
f = open(mean_std_file, 'rb')
mean_std_dct_l2 = pickle.load(f)
f.close()
mean_std_file = ancillary_path+'mean_std_lo_hi_l1b.pkl'
f = open(mean_std_file, 'rb')
mean_std_dct_l1b = pickle.load(f)
f.close()
mean_std_dct.update(mean_std_dct_l1b)
mean_std_dct.update(mean_std_dct_l2)
IMG_DEPTH = 1
label_param = 'cld_opd_dcomp'
# params = ['temp_11_0um_nom', 'refl_0_65um_nom', 'refl_submin_ch01', 'refl_submax_ch01', 'refl_substddev_ch01', 'temp_stddev3x3_ch31', 'refl_stddev3x3_ch01', label_param]
params = ['temp_11_0um_nom', 'refl_0_65um_nom', 'refl_submin_ch01', 'refl_submax_ch01', 'refl_substddev_ch01', label_param]
# params_i = ['temp_11_0um_nom', 'refl_0_65um_nom', 'temp_stddev3x3_ch31', 'refl_stddev3x3_ch01', label_param]
params_i = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param]
# data_params_half = ['temp_11_0um_nom', 'refl_0_65um_nom']
data_params_half = ['temp_11_0um_nom']
data_params_full = ['refl_0_65um_nom']
sub_fields = ['refl_submin_ch01', 'refl_submax_ch01', 'refl_substddev_ch01']
# sub_fields = ['refl_stddev3x3_ch01']
label_idx_i = params_i.index(label_param)
label_idx = params.index(label_param)
print('data_params_half: ', data_params_half)
print('data_params_full: ', data_params_full)
print('label_param: ', label_param)
KERNEL_SIZE = 3
def build_residual_conv2d_block(conv, num_filters, block_name, activation=tf.nn.relu, padding='SAME',
kernel_initializer='he_uniform', scale=None, kernel_size=3,
do_drop_out=True, drop_rate=0.5, do_batch_norm=True):
with tf.name_scope(block_name):
skip = tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size, padding=padding, kernel_initializer=kernel_initializer, activation=activation)(conv)
skip = tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size, padding=padding, activation=None)(skip)
if scale is not None:
skip = tf.keras.layers.Lambda(lambda x: x * scale)(skip)
if do_drop_out:
skip = tf.keras.layers.Dropout(drop_rate)(skip)
if do_batch_norm:
skip = tf.keras.layers.BatchNormalization()(skip)
conv = conv + skip
print(block_name+':', conv.shape)
return conv
def upsample_nearest(grd):
bsize, ylen, xlen = grd.shape
up = np.zeros((bsize, ylen*2, xlen*2))
up[:, 0::2, 0::2] = grd[:, 0::, 0::]
up[:, 1::2, 0::2] = grd[:, 0::, 0::]
up[:, 0::2, 1::2] = grd[:, 0::, 0::]
up[:, 1::2, 1::2] = grd[:, 0::, 0::]
return up
def upsample_mean(grd):
bsize, ylen, xlen = grd.shape
up = np.zeros((bsize, ylen*2, xlen*2))
up[:, ::2, ::2] = grd[:, ::2, ::2]
up[:, 1::2, ::2] = grd[:, ::2, ::2]
up[:, ::2, 1::2] = grd[:, ::2, ::2]
up[:, 1::2, 1::2] = grd[:, ::2, ::2]
return up
def get_grid_cell_mean(grd_k):
grd_k = np.where(np.isnan(grd_k), 0, grd_k)
a = grd_k[:, 0::2, 0::2]
b = grd_k[:, 1::2, 0::2]
c = grd_k[:, 0::2, 1::2]
d = grd_k[:, 1::2, 1::2]
mean = np.nanmean([a, b, c, d], axis=0)
return mean
def get_min_max_std(grd_k):
grd_k = np.where(np.isnan(grd_k), 0, grd_k)
a = grd_k[:, 0::2, 0::2]
b = grd_k[:, 1::2, 0::2]
c = grd_k[:, 0::2, 1::2]
d = grd_k[:, 1::2, 1::2]
lo = np.nanmin([a, b, c, d], axis=0)
hi = np.nanmax([a, b, c, d], axis=0)
std = np.nanstd([a, b, c, d], axis=0)
avg = np.nanmean([a, b, c, d], axis=0)
return lo, hi, std, avg
def upsample_static(grd, x_2, y_2, t, s, y_k, x_k):
grd = resample_2d_linear(x_2, y_2, grd, t, s)
# grd = grd[:, y_k, x_k]
return grd
class SRCNN:
def __init__(self, LEN_Y=128, LEN_X=128):
self.train_data = None
self.train_label = None
self.test_data = None
self.test_label = None
self.test_data_denorm = None
self.train_dataset = None
self.inner_train_dataset = None
self.test_dataset = None
self.eval_dataset = None
self.X_img = None
self.inputs = []
self.logits = None
self.predict_data = None
self.predict_dataset = None
self.mean_list = None
self.std_list = None
self.training_op = None
self.correct = None
self.accuracy = None
self.loss = None
self.pred_class = None
self.variable_averages = None
self.global_step = None
self.writer_train = None
self.writer_valid = None
self.writer_train_valid_loss = None
self.model = None
self.optimizer = None
self.ema = None
self.train_loss = None
self.train_accuracy = None
self.test_loss = None
self.test_accuracy = None
self.test_labels = []
self.test_preds = []
self.test_probs = None
self.learningRateSchedule = None
self.num_data_samples = None
self.initial_learning_rate = None
self.train_data_files = None
self.train_label_files = None
self.test_data_files = None
self.test_label_files = None
# self.n_chans = len(data_params_half) + len(data_params_full) + 1
self.n_chans = 1
self.X_img = tf.keras.Input(shape=(None, None, self.n_chans))
self.inputs.append(self.X_img)
self.slc_x_m = slice(1, int(LEN_X / 2) + 4)
self.slc_y_m = slice(1, int(LEN_Y / 2) + 4)
self.slc_x = slice(3, LEN_X + 5)
self.slc_y = slice(3, LEN_Y + 5)
self.slc_x_2 = slice(2, LEN_X + 7, 2)
self.slc_y_2 = slice(2, LEN_Y + 7, 2)
self.x_2 = np.arange(int(LEN_X / 2) + 3)
self.y_2 = np.arange(int(LEN_Y / 2) + 3)
self.t = np.arange(0, int(LEN_X / 2) + 3, 0.5)
self.s = np.arange(0, int(LEN_Y / 2) + 3, 0.5)
self.x_k = slice(1, LEN_X + 3)
self.y_k = slice(1, LEN_Y + 3)
self.x_128 = slice(4, LEN_X + 4)
self.y_128 = slice(4, LEN_Y + 4)
self.LEN_X = LEN_X
self.LEN_Y = LEN_Y
tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT)
def upsample(self, grd):
grd = resample_2d_linear(self.x_2, self.y_2, grd, self.t, self.s)
grd = grd[:, self.y_k, self.x_k]
return grd
def get_in_mem_data_batch(self, idxs, is_training):
if is_training:
data_files = self.train_data_files
label_files = self.train_label_files
else:
data_files = self.test_data_files
label_files = self.test_label_files
data_s = []
label_s = []
for k in idxs:
f = data_files[k]
nda = np.load(f)
data_s.append(nda)
f = label_files[k]
nda = np.load(f)
label_s.append(nda)
input_data = np.concatenate(data_s)
input_label = np.concatenate(label_s)
data_norm = []
# for param in data_params_half:
# idx = params.index(param)
# tmp = input_data[:, idx, :, :]
# tmp = np.where(np.isnan(tmp), 0.0, tmp)
# tmp = tmp[:, self.slc_y_m, self.slc_x_m]
# tmp = self.upsample(tmp)
# if DO_SMOOTH:
# tmp = smooth_2d(tmp)
# tmp = normalize(tmp, param, mean_std_dct)
# # tmp = scale(tmp, param, mean_std_dct)
# data_norm.append(tmp)
# High res refectance ----------
# idx = params_i.index('refl_0_65um_nom')
# tmp = input_label[:, idx, ::2, ::2]
# tmp = np.where(np.isnan(tmp), 0, tmp)
# tmp = normalize(tmp, 'refl_0_65um_nom', mean_std_dct)
# # tmp = scale(tmp, 'refl_0_65um_nom', mean_std_dct)
# data_norm.append(tmp[:, self.slc_y, self.slc_x])
# High res reflectance down 2 ---------
# idx = params_i.index('refl_0_65um_nom')
# tmp = input_label[:, idx, ::2, ::2]
# tmp = tmp.copy()
# tmp = np.where(np.isnan(tmp), 0.0, tmp)
# tmp = tmp[:, self.slc_y_2, self.slc_x_2]
# tmp = self.upsample(tmp)
# if DO_SMOOTH:
# tmp = smooth_2d(tmp)
# tmp = normalize(tmp, label_param, mean_std_dct)
# data_norm.append(tmp)
tmp = input_label[:, label_idx_i, ::2, ::2]
tmp = tmp.copy()
tmp = np.where(np.isnan(tmp), 0.0, tmp)
tmp = tmp[:, self.slc_y_2, self.slc_x_2]
tmp = self.upsample(tmp)
if DO_SMOOTH:
tmp = smooth_2d(tmp)
# tmp = normalize(tmp, label_param, mean_std_dct)
tmp = scale(tmp, label_param, mean_std_dct)
data_norm.append(tmp)
# for param in sub_fields:
# idx = params.index(param)
# tmp = input_data[:, idx, :, :]
# tmp = np.where(np.isnan(tmp), 0.0, tmp)
# tmp = tmp[:, self.slc_y_m, self.slc_x_m]
# tmp = self.upsample(tmp)
# # if param != 'refl_substddev_ch01':
# if False:
# tmp = normalize(tmp, 'refl_0_65um_nom', mean_std_dct)
# else:
# tmp = np.where(np.isnan(tmp), 0.0, tmp)
# data_norm.append(tmp)
# for param in sub_fields:
# idx = params.index(param)
# tmp = input_data[:, idx, :, :]
# tmp = upsample_nearest(tmp)
# tmp = tmp[:, self.slc_y, self.slc_x]
# if param != 'refl_substddev_ch01':
# tmp = normalize(tmp, 'refl_0_65um_nom', mean_std_dct)
# else:
# tmp = np.where(np.isnan(tmp), 0, tmp)
# data_norm.append(tmp)
# ---------------------------------------------------
data = np.stack(data_norm, axis=3)
data = data.astype(np.float32)
# -----------------------------------------------------
# -----------------------------------------------------
label = input_label[:, label_idx_i, ::2, ::2]
label = label.copy()
# label = normalize(label, label_param, mean_std_dct)
label = scale(label, label_param, mean_std_dct)
label = label[:, self.y_128, self.x_128]
label = np.expand_dims(label, axis=3)
label = label.astype(np.float32)
return data, label
def get_in_mem_data_batch_train(self, idxs):
return self.get_in_mem_data_batch(idxs, True)
def get_in_mem_data_batch_test(self, idxs):
return self.get_in_mem_data_batch(idxs, False)
@tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
def data_function(self, indexes):
out = tf.numpy_function(self.get_in_mem_data_batch_train, [indexes], [tf.float32, tf.float32])
return out
@tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
def data_function_test(self, indexes):
out = tf.numpy_function(self.get_in_mem_data_batch_test, [indexes], [tf.float32, tf.float32])
return out
def get_train_dataset(self, num_files):
def integer_gen(limit):
n = 0
while n < limit:
yield n
n += 1
num_gen = integer_gen(num_files)
gen = make_tf_callable_generator(num_gen)
dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32)
dataset = dataset.batch(PROC_BATCH_SIZE)
dataset = dataset.map(self.data_function, num_parallel_calls=8)
if DO_AUGMENT:
dataset = dataset.map(augment_image(), num_parallel_calls=8)
dataset = dataset.cache()
dataset = dataset.shuffle(PROC_BATCH_BUFFER_SIZE, reshuffle_each_iteration=False)
dataset = dataset.prefetch(buffer_size=1)
self.train_dataset = dataset
def get_test_dataset(self, num_files):
def integer_gen(limit):
n = 0
while n < limit:
yield n
n += 1
num_gen = integer_gen(num_files)
gen = make_tf_callable_generator(num_gen)
dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32)
dataset = dataset.batch(PROC_BATCH_SIZE)
dataset = dataset.map(self.data_function_test, num_parallel_calls=8)
dataset = dataset.cache()
self.test_dataset = dataset
def setup_pipeline(self, train_data_files, train_label_files, test_data_files, test_label_files, num_train_samples):
self.train_data_files = train_data_files
self.train_label_files = train_label_files
self.test_data_files = test_data_files
self.test_label_files = test_label_files
self.get_train_dataset(len(train_data_files))
self.get_test_dataset(len(test_data_files))
self.num_data_samples = num_train_samples # approximately
print('datetime: ', now)
print('training and test data: ')
print('---------------------------')
print('num train files: ', len(train_data_files))
print('BATCH SIZE: ', BATCH_SIZE)
print('num test files: ', len(test_data_files))
print('setup_pipeline: Done')
def setup_test_pipeline(self, test_data_files, test_label_files):
self.test_data_files = test_data_files
self.test_label_files = test_label_files
tst_idxs = np.arange(len(test_data_files))
self.get_test_dataset(tst_idxs)
print('setup_test_pipeline: Done')
def build_srcnn(self, do_drop_out=False, do_batch_norm=False, drop_rate=0.5, factor=2):
print('build_cnn')
padding = "SAME"
# activation = tf.nn.relu
# activation = tf.nn.elu
activation = tf.nn.relu
momentum = 0.99
num_filters = 32
input_2d = self.inputs[0]
print('input: ', input_2d.shape)
conv = conv_b = tf.keras.layers.Conv2D(num_filters, kernel_size=KERNEL_SIZE, kernel_initializer='he_uniform', activation=activation, padding='VALID')(input_2d)
print(conv.shape)
# if NOISE_TRAINING:
# conv = conv_b = tf.keras.layers.GaussianNoise(stddev=NOISE_STDDEV)(conv)
scale = 0.2
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_1', kernel_size=KERNEL_SIZE, scale=scale)
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_2', kernel_size=KERNEL_SIZE, scale=scale)
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_3', kernel_size=KERNEL_SIZE, scale=scale)
# conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_4', kernel_size=KERNEL_SIZE, scale=scale)
# conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_5', kernel_size=KERNEL_SIZE, scale=scale)
# conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_6', kernel_size=KERNEL_SIZE, scale=scale)
conv_b = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, activation=activation, kernel_initializer='he_uniform', padding=padding)(conv_b)
# conv = conv + conv_b
conv = conv_b
print(conv.shape)
# This is effectively a Dense layer
self.logits = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, padding=padding, name='regression')(conv)
print(self.logits.shape)
def build_training(self):
self.loss = tf.keras.losses.MeanSquaredError() # Regression
# self.loss = tf.keras.losses.MeanAbsoluteError()
# decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
initial_learning_rate = 0.001
decay_rate = 0.95
steps_per_epoch = int(self.num_data_samples/BATCH_SIZE) # one epoch
decay_steps = int(steps_per_epoch) * 4
print('initial rate, decay rate, steps/epoch, decay steps: ', initial_learning_rate, decay_rate, steps_per_epoch, decay_steps)
self.learningRateSchedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps, decay_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate=self.learningRateSchedule)
if TRACK_MOVING_AVERAGE:
# Not really sure this works properly (from tfa)
# optimizer = tfa.optimizers.MovingAverage(optimizer)
self.ema = tf.train.ExponentialMovingAverage(decay=0.9999)
self.optimizer = optimizer
self.initial_learning_rate = initial_learning_rate
def build_evaluation(self):
self.train_accuracy = tf.keras.metrics.MeanAbsoluteError(name='train_accuracy')
self.test_accuracy = tf.keras.metrics.MeanAbsoluteError(name='test_accuracy')
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
@tf.function(input_signature=[tf.TensorSpec(None, tf.float32), tf.TensorSpec(None, tf.float32)])
def train_step(self, inputs, labels):
with tf.GradientTape() as tape:
pred = self.model([inputs], training=True)
loss = self.loss(labels, pred)
total_loss = loss
if len(self.model.losses) > 0:
reg_loss = tf.math.add_n(self.model.losses)
total_loss = loss + reg_loss
gradients = tape.gradient(total_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
if TRACK_MOVING_AVERAGE:
self.ema.apply(self.model.trainable_variables)
self.train_loss(loss)
self.train_accuracy(labels, pred)
return loss
@tf.function(input_signature=[tf.TensorSpec(None, tf.float32), tf.TensorSpec(None, tf.float32)])
def test_step(self, inputs, labels):
pred = self.model([inputs], training=False)
t_loss = self.loss(labels, pred)
self.test_loss(t_loss)
self.test_accuracy(labels, pred)
# @tf.function(input_signature=[tf.TensorSpec(None, tf.float32), tf.TensorSpec(None, tf.float32)])
# decorator commented out because pred.numpy(): pred not evaluated yet.
def predict(self, inputs, labels):
pred = self.model([inputs], training=False)
# t_loss = self.loss(tf.squeeze(labels, axis=[3]), pred)
t_loss = self.loss(labels, pred)
self.test_labels.append(labels)
self.test_preds.append(pred.numpy())
self.test_loss(t_loss)
self.test_accuracy(labels, pred)
def reset_test_metrics(self):
self.test_loss.reset_states()
self.test_accuracy.reset_states()
def do_training(self, ckpt_dir=None):
if ckpt_dir is None:
if not os.path.exists(modeldir):
os.mkdir(modeldir)
ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
ckpt_manager = tf.train.CheckpointManager(ckpt, modeldir, max_to_keep=3)
else:
ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=3)
ckpt.restore(ckpt_manager.latest_checkpoint)
self.writer_train = tf.summary.create_file_writer(os.path.join(logdir, 'plot_train'))
self.writer_valid = tf.summary.create_file_writer(os.path.join(logdir, 'plot_valid'))
self.writer_train_valid_loss = tf.summary.create_file_writer(os.path.join(logdir, 'plot_train_valid_loss'))
step = 0
total_time = 0
best_test_loss = np.finfo(dtype=np.float).max
if EARLY_STOP:
es = EarlyStop()
for epoch in range(NUM_EPOCHS):
self.train_loss.reset_states()
self.train_accuracy.reset_states()
t0 = datetime.datetime.now().timestamp()
proc_batch_cnt = 0
n_samples = 0
for data, label in self.train_dataset:
trn_ds = tf.data.Dataset.from_tensor_slices((data, label))
trn_ds = trn_ds.batch(BATCH_SIZE)
for mini_batch in trn_ds:
if self.learningRateSchedule is not None:
loss = self.train_step(mini_batch[0], mini_batch[1])
if (step % 100) == 0:
with self.writer_train.as_default():
tf.summary.scalar('loss_trn', loss.numpy(), step=step)
tf.summary.scalar('learning_rate', self.optimizer._decayed_lr('float32').numpy(), step=step)
tf.summary.scalar('num_train_steps', step, step=step)
tf.summary.scalar('num_epochs', epoch, step=step)
self.reset_test_metrics()
for data_tst, label_tst in self.test_dataset:
tst_ds = tf.data.Dataset.from_tensor_slices((data_tst, label_tst))
tst_ds = tst_ds.batch(BATCH_SIZE)
for mini_batch_test in tst_ds:
self.test_step(mini_batch_test[0], mini_batch_test[1])
with self.writer_valid.as_default():
tf.summary.scalar('loss_val', self.test_loss.result(), step=step)
tf.summary.scalar('acc_val', self.test_accuracy.result(), step=step)
with self.writer_train_valid_loss.as_default():
tf.summary.scalar('loss_trn', loss.numpy(), step=step)
tf.summary.scalar('loss_val', self.test_loss.result(), step=step)
print('****** test loss, acc, lr: ', self.test_loss.result().numpy(), self.test_accuracy.result().numpy(),
self.optimizer._decayed_lr('float32').numpy())
step += 1
print('train loss: ', loss.numpy())
proc_batch_cnt += 1
n_samples += data.shape[0]
print('proc_batch_cnt: ', proc_batch_cnt, n_samples)
t1 = datetime.datetime.now().timestamp()
print('End of Epoch: ', epoch+1, 'elapsed time: ', (t1-t0))
total_time += (t1-t0)
self.reset_test_metrics()
for data, label in self.test_dataset:
ds = tf.data.Dataset.from_tensor_slices((data, label))
ds = ds.batch(BATCH_SIZE)
for mini_batch in ds:
self.test_step(mini_batch[0], mini_batch[1])
print('loss, acc: ', self.test_loss.result().numpy(), self.test_accuracy.result().numpy())
print('------------------------------------------------------')
tst_loss = self.test_loss.result().numpy()
if tst_loss < best_test_loss:
best_test_loss = tst_loss
ckpt_manager.save()
if EARLY_STOP and es.check_stop(tst_loss):
break
print('total time: ', total_time)
self.writer_train.close()
self.writer_valid.close()
self.writer_train_valid_loss.close()
def build_model(self):
self.build_srcnn()
self.model = tf.keras.Model(self.inputs, self.logits)
def restore(self, ckpt_dir):
ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=3)
ckpt.restore(ckpt_manager.latest_checkpoint)
self.reset_test_metrics()
for data, label in self.test_dataset:
ds = tf.data.Dataset.from_tensor_slices((data, label))
ds = ds.batch(BATCH_SIZE)
for mini_batch_test in ds:
self.predict(mini_batch_test[0], mini_batch_test[1])
print('loss, acc: ', self.test_loss.result().numpy(), self.test_accuracy.result().numpy())
labels = np.concatenate(self.test_labels)
preds = np.concatenate(self.test_preds)
print(labels.shape, preds.shape)
labels_denorm = denormalize(labels, label_param, mean_std_dct)
preds_denorm = denormalize(preds, label_param, mean_std_dct)
# labels_denorm = descale(labels, label_param, mean_std_dct)
# preds_denorm = descale(preds, label_param, mean_std_dct)
return labels_denorm, preds_denorm
def do_evaluate(self, inputs, ckpt_dir):
ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=3)
ckpt.restore(ckpt_manager.latest_checkpoint)
self.reset_test_metrics()
pred = self.model([inputs], training=False)
self.test_probs = pred
pred = pred.numpy()
return pred
def run(self, directory, ckpt_dir=None, num_data_samples=50000):
train_data_files = glob.glob(directory+'train*mres*.npy')
valid_data_files = glob.glob(directory+'valid*mres*.npy')
train_label_files = [f.replace('mres', 'ires') for f in train_data_files]
valid_label_files = [f.replace('mres', 'ires') for f in valid_data_files]
self.setup_pipeline(train_data_files, train_label_files, valid_data_files, valid_label_files, num_data_samples)
self.build_model()
self.build_training()
self.build_evaluation()
self.do_training(ckpt_dir=ckpt_dir)
def run_restore(self, directory, ckpt_dir):
self.num_data_samples = 1000
valid_data_files = glob.glob(directory + 'valid*mres*.npy')
valid_label_files = [f.replace('mres', 'ires') for f in valid_data_files]
self.setup_test_pipeline(valid_data_files, valid_label_files)
self.build_model()
self.build_training()
self.build_evaluation()
return self.restore(ckpt_dir)
def run_evaluate(self, data, ckpt_dir):
# data = tf.convert_to_tensor(data, dtype=tf.float32)
self.num_data_samples = 80000
self.build_model()
self.build_training()
self.build_evaluation()
return self.do_evaluate(data, ckpt_dir)
def setup_inference(self, ckpt_dir):
self.num_data_samples = 80000
self.build_model()
self.build_training()
self.build_evaluation()
ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=3)
ckpt.restore(ckpt_manager.latest_checkpoint)
def do_inference(self, inputs):
self.reset_test_metrics()
pred = self.model([inputs], training=False)
self.test_probs = pred
pred = pred.numpy()
return pred
def run_inference(self, in_file, out_file):
gc.collect()
t0 = time.time()
s_x = slice(1812, 3612)
s_y = slice(1812, 3612)
h5f = h5py.File(in_file, 'r')
refl = get_grid_values_all(h5f, 'refl_0_65um_nom')
print('FD dims: ', refl.shape)
refl = refl[s_y, s_x]
LEN_Y, LEN_X = refl.shape
print('sub dims: ', refl.shape)
bt = get_grid_values_all(h5f, 'temp_11_0um_nom')
bt = bt[s_y, s_x]
cld_opd = get_grid_values_all(h5f, 'cld_opd_dcomp')
cld_opd = cld_opd[s_y, s_x]
refl_sub_lo = get_grid_values_all(h5f, 'refl_0_65um_nom_min_sub')
refl_sub_lo = refl_sub_lo[s_y, s_x]
refl_sub_hi = get_grid_values_all(h5f, 'refl_0_65um_nom_max_sub')
refl_sub_hi = refl_sub_hi[s_y, s_x]
refl_sub_std = get_grid_values_all(h5f, 'refl_0_65um_nom_stddev_sub')
refl_sub_std = refl_sub_std[s_y, s_x]
t1 = time.time()
print('read data time: ', (t1 - t0))
LEN_Y -= 8
LEN_X -= 8
LEN_Y = 2 * (LEN_Y - 8)
LEN_X = 2 * (LEN_X - 8)
t0 = time.time()
cld_opd_sres, LEN_Y_in, LEN_X_in = self.run_inference_(bt, refl, cld_opd, refl_sub_lo, refl_sub_hi, refl_sub_std, LEN_Y, LEN_X)
t1 = time.time()
print('inference time: ', (t1 - t0))
print(cld_opd_sres.shape)
cld_opd_sres_out = np.zeros((LEN_Y_in, LEN_X_in), dtype=np.int8)
border = int((KERNEL_SIZE - 1) / 2)
cld_opd_sres_out[border:LEN_Y_in - border, border:LEN_X_in - border] = cld_opd_sres[0, :, :, 0]
h5f.close()
if out_file is not None:
np.save(out_file, (cld_opd_sres_out, bt, refl, cld_opd))
else:
return cld_opd_sres
def run_inference_test(self, in_file, out_file):
gc.collect()
t0 = time.time()
group_name_i = 'super/'
group_name_m = 'orig/'
target_param = 'cld_opd_dcomp_1'
h5f = h5py.File(in_file, 'r')
refl = get_grid_values_all(h5f, group_name_i+'refl_ch01')
print('FD dims: ', refl.shape)
LEN_Y, LEN_X = refl.shape
LEN_Y //= 2
LEN_X //= 2
print('Half FD: ', LEN_Y, LEN_X)
refl = refl[::2, ::2]
bt = get_grid_values_all(h5f, group_name_m+'temp_ch38')
cld_opd = get_grid_values_all(h5f, group_name_m+target_param)
print('BT, OPD: ', bt.shape, cld_opd.shape)
# refl_sub_lo = get_grid_values_all(h5f, 'refl_0_65um_nom_min_sub')
# refl_sub_hi = get_grid_values_all(h5f, 'refl_0_65um_nom_max_sub')
# refl_sub_std = get_grid_values_all(h5f, 'refl_0_65um_nom_stddev_sub')
t1 = time.time()
print('read data time: ', (t1 - t0))
LEN_Y -= 8
LEN_X -= 8
# LEN_Y = 2 * (LEN_Y - 8)
# LEN_X = 2 * (LEN_X - 8)
t0 = time.time()
# cld_opd_sres, LEN_Y_in, LEN_X_in = self.run_inference_(bt, refl, cld_opd, refl_sub_lo, refl_sub_hi, refl_sub_std, LEN_Y, LEN_X)
cld_opd_sres, LEN_Y_in, LEN_X_in = self.run_inference_(bt, refl, cld_opd, None, None, None, LEN_Y, LEN_X)
t1 = time.time()
print('inference time: ', (t1 - t0))
print(cld_opd_sres.shape)
cld_opd_sres_out = np.zeros((LEN_Y_in, LEN_X_in), dtype=np.int8)
border = int((KERNEL_SIZE - 1) / 2)
cld_opd_sres_out[border:LEN_Y_in - border, border:LEN_X_in - border] = cld_opd_sres[0, :, :, 0]
h5f.close()
if out_file is not None:
np.save(out_file, (cld_opd_sres_out, bt, refl, cld_opd))
else:
return cld_opd_sres
def run_inference_(self, bt, refl, cld_opd, refl_sub_lo, refl_sub_hi, refl_sub_std, LEN_Y, LEN_X):
self.slc_x_m = slice(1, int(LEN_X / 2) + 4)
self.slc_y_m = slice(1, int(LEN_Y / 2) + 4)
self.slc_x = slice(3, LEN_X + 5)
self.slc_y = slice(3, LEN_Y + 5)
self.slc_x_2 = slice(2, LEN_X + 7, 2)
self.slc_y_2 = slice(2, LEN_Y + 7, 2)
self.x_2 = np.arange(int(LEN_X / 2) + 3)
self.y_2 = np.arange(int(LEN_Y / 2) + 3)
self.t = np.arange(0, int(LEN_X / 2) + 3, 0.5)
self.s = np.arange(0, int(LEN_Y / 2) + 3, 0.5)
self.x_k = slice(1, LEN_X + 3)
self.y_k = slice(1, LEN_Y + 3)
self.LEN_X = LEN_X
self.LEN_Y = LEN_Y
t0 = time.time()
# bt = np.where(np.isnan(bt), 0, bt)
# bt = bt[self.slc_y_m, self.slc_x_m]
# bt = np.expand_dims(bt, axis=0)
# # bt_us = upsample_static(bt, x_2, y_2, t, s, None, None)
# bt_us = self.upsample(bt)
# if DO_SMOOTH:
# bt_us = smooth_2d(bt_us)
# bt_us = normalize(bt_us, 'temp_11_0um_nom', mean_std_dct)
# refl = np.where(np.isnan(refl), 0, refl)
# # refl = refl[self.slc_y_m, self.slc_x_m]
# refl = refl[self.slc_y, self.slc_x]
# refl = np.expand_dims(refl, axis=0)
# # refl_us = self.upsample(refl)
# refl_us = refl
# if DO_SMOOTH:
# refl_us = smooth_2d(refl)
# refl_us = normalize(refl_us, 'refl_0_65um_nom', mean_std_dct)
cld_opd = np.where(np.isnan(cld_opd), 0, cld_opd)
cld_opd = cld_opd[self.slc_y_m, self.slc_x_m]
cld_opd = np.expand_dims(cld_opd, axis=0)
# cld_opd_us = upsample_static(cld_opd, x_2, y_2, t, s, None, None)
cld_opd_us = self.upsample(cld_opd)
if DO_SMOOTH:
cld_opd_us = smooth_2d(cld_opd_us)
# cld_opd_us = normalize(cld_opd_us, label_param, mean_std_dct)
cld_opd_us = scale(cld_opd_us, label_param, mean_std_dct)
# refl_sub_lo = np.expand_dims(refl_sub_lo, axis=0)
# refl_sub_lo = upsample_nearest(refl_sub_lo)
# refl_sub_lo = refl_sub_lo[:, self.slc_y, self.slc_x]
# refl_sub_lo = normalize(refl_sub_lo, 'refl_0_65um_nom', mean_std_dct)
#
# refl_sub_hi = np.expand_dims(refl_sub_hi, axis=0)
# refl_sub_hi = upsample_nearest(refl_sub_hi)
# refl_sub_hi = refl_sub_hi[:, self.slc_y, self.slc_x]
# refl_sub_hi = normalize(refl_sub_hi, 'refl_0_65um_nom', mean_std_dct)
#
# refl_sub_std = np.expand_dims(refl_sub_std, axis=0)
# refl_sub_std = upsample_nearest(refl_sub_std)
# refl_sub_std = refl_sub_std[:, self.slc_y, self.slc_x]
t1 = time.time()
print('upsample/normalize time: ', (t1 - t0))
# data = np.stack([bt_us, refl_us, refl_sub_lo, refl_sub_hi, refl_sub_std, cld_opd_us], axis=3)
# data = np.stack([bt_us, refl_us, cld_opd_us, refl_sub_std], axis=3)
# data = np.stack([bt_us, refl_us, cld_opd_us], axis=3)
data = np.stack([cld_opd_us], axis=3)
print('data in: ', data.shape)
cld_opd_sres = self.do_inference(data)
# cld_opd_sres = denormalize(cld_opd_sres, label_param, mean_std_dct)
cld_opd_sres = descale(cld_opd_sres, label_param, mean_std_dct)
return cld_opd_sres, cld_opd_us.shape[1], cld_opd_us.shape[2]
def run_restore_static(directory, ckpt_dir, out_file=None):
nn = SRCNN()
labels_denorm, preds_denorm = nn.run_restore(directory, ckpt_dir)
if out_file is not None:
np.save(out_file, [labels_denorm, preds_denorm])
def run_evaluate_static(in_file, out_file, ckpt_dir):
h5f = h5py.File(in_file, 'r')
refl = get_grid_values_all(h5f, 'refl_0_65um_nom')
LEN_Y, LEN_X = refl.shape
print(LEN_Y, LEN_X)
bt = get_grid_values_all(h5f, 'temp_11_0um_nom')
cld_opd = get_grid_values_all(h5f, 'cld_opd_dcomp_1')
refl_sub_lo = get_grid_values_all(h5f, 'refl_0_65um_nom_min_sub')
refl_sub_hi = get_grid_values_all(h5f, 'refl_0_65um_nom_max_sub')
refl_sub_std = get_grid_values_all(h5f, 'refl_0_65um_nom_stddev_sub')
nn = SRCNN()
slc_x = slice(0, (LEN_X - 16) + 4)
slc_y = slice(0, (LEN_Y - 16) + 4)
x_2 = np.arange((LEN_X - 16) + 4)
y_2 = np.arange((LEN_Y - 16) + 4)
t = np.arange(0, (LEN_X - 16) + 4, 0.5)
s = np.arange(0, (LEN_Y - 16) + 4, 0.5)
refl = np.where(np.isnan(refl), 0, bt)
refl = refl[slc_y, slc_x]
refl = np.expand_dims(refl, axis=0)
refl_us = upsample_static(refl, x_2, y_2, t, s, None, None)
print(refl_us.shape)
refl_us = normalize(refl_us, 'refl_0_65um_nom', mean_std_dct)
print('REFL done')
bt = np.where(np.isnan(bt), 0, bt)
bt = bt[slc_y, slc_x]
bt = np.expand_dims(bt, axis=0)
bt_us = upsample_static(bt, x_2, y_2, t, s, None, None)
bt_us = normalize(bt_us, 'temp_11_0um_nom', mean_std_dct)
print('BT done')
refl_sub_lo = refl_sub_lo[slc_y, slc_x]
refl_sub_lo = np.expand_dims(refl_sub_lo, axis=0)
refl_sub_lo = upsample_nearest(refl_sub_lo)
refl_sub_lo = normalize(refl_sub_lo, 'refl_0_65um_nom', mean_std_dct)
refl_sub_hi = refl_sub_hi[slc_y, slc_x]
refl_sub_hi = np.expand_dims(refl_sub_hi, axis=0)
refl_sub_hi = upsample_nearest(refl_sub_hi)
refl_sub_hi = normalize(refl_sub_hi, 'refl_0_65um_nom', mean_std_dct)
refl_sub_std = refl_sub_std[slc_y, slc_x]
refl_sub_std = np.expand_dims(refl_sub_std, axis=0)
refl_sub_std = upsample_nearest(refl_sub_std)
cld_opd = np.where(np.isnan(cld_opd), 0, cld_opd)
cld_opd = cld_opd[slc_y, slc_x]
cld_opd = np.expand_dims(cld_opd, axis=0)
cld_opd_us = upsample_static(cld_opd, x_2, y_2, t, s, None, None)
cld_opd_us = normalize(cld_opd_us, label_param, mean_std_dct)
print('OPD done')
# data = np.stack([bt_us, refl_us, refl_sub_lo, refl_sub_hi, refl_sub_std, cld_opd_us], axis=3)
data = np.stack([bt_us, refl_us, cld_opd_us], axis=3)
print('INPUT: ', data.shape)
cld_opd_sres = nn.run_evaluate(data, ckpt_dir)
# cld_opd_sres = descale(cld_opd_sres, label_param, mean_std_dct)
cld_opd_sres = denormalize(cld_opd_sres, label_param, mean_std_dct)
_, ylen, xlen, _ = cld_opd_sres.shape
print('OUT: ', ylen, xlen)
cld_opd_sres_out = np.zeros((2*LEN_Y, 2*LEN_X), dtype=np.float32)
refl_out = np.zeros((LEN_Y, LEN_X), dtype=np.float32)
cld_opd_out = np.zeros((LEN_Y, LEN_X), dtype=np.float32)
border = int((KERNEL_SIZE - 1) / 2)
cld_opd_sres_out[border:(border+ylen), border:(border+xlen)] = cld_opd_sres[0, :, :, 0]
# refl_out[0:(ylen+2*border), 0:(xlen+2*border)] = refl[0, :, :]
# cld_opd_out[0:(ylen+2*border), 0:(xlen+2*border)] = cld_opd[0, :, :]
# refl_out = denormalize(refl_out, 'refl_0_65um_nom', mean_std_dct)
# cld_opd_out = denormalize(cld_opd_out, label_param, mean_std_dct)
h5f.close()
if out_file is not None:
# np.save(out_file, (cld_opd_sres_out, refl_out, cld_opd_out, cld_opd_hres))
np.save(out_file, cld_opd_sres_out)
else:
return cld_opd_sres_out, bt, refl
if __name__ == "__main__":
nn = SRCNN()
nn.run('matchup_filename')