Skip to content
Snippets Groups Projects
Commit 791a4c44 authored by tomrink's avatar tomrink
Browse files

snapshot...

parent d2e9b3e0
No related branches found
No related tags found
No related merge requests found
......@@ -37,7 +37,7 @@ NOISE_TRAINING = False
NOISE_STDDEV = 0.01
DO_AUGMENT = True
DO_SMOOTH = True
DO_SMOOTH = False
SIGMA = 1.0
DO_ZERO_OUT = False
DO_ESPCN = False # Note: If True, cannot do mixed resolution input fields (Adjust accordingly below)
......@@ -62,7 +62,7 @@ IMG_DEPTH = 1
# label_param = 'cld_opd_dcomp'
label_param = 'cloud_probability'
params = ['temp_11_0um_nom', 'temp_12_0um_nom', 'refl_0_65um_nom', label_param]
params = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param]
data_params_half = ['temp_11_0um_nom']
data_params_full = ['refl_0_65um_nom']
......@@ -131,43 +131,6 @@ def build_residual_conv2d_block(conv, num_filters, block_name, activation=tf.nn.
return conv
def build_residual_block_conv2d_down2x(x_in, num_filters, activation, padding='SAME', drop_rate=0.5,
do_drop_out=True, do_batch_norm=True):
skip = x_in
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(x_in)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
if do_drop_out:
conv = tf.keras.layers.Dropout(drop_rate)(conv)
if do_batch_norm:
conv = tf.keras.layers.BatchNormalization()(conv)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
if do_drop_out:
conv = tf.keras.layers.Dropout(drop_rate)(conv)
if do_batch_norm:
conv = tf.keras.layers.BatchNormalization()(conv)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
if do_drop_out:
conv = tf.keras.layers.Dropout(drop_rate)(conv)
if do_batch_norm:
conv = tf.keras.layers.BatchNormalization()(conv)
skip = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=None)(skip)
skip = tf.keras.layers.MaxPool2D(padding=padding)(skip)
if do_drop_out:
skip = tf.keras.layers.Dropout(drop_rate)(skip)
if do_batch_norm:
skip = tf.keras.layers.BatchNormalization()(skip)
conv = conv + skip
conv = tf.keras.layers.LeakyReLU()(conv)
print(conv.shape)
return conv
def upsample(tmp):
tmp = tmp[:, slc_y_2, slc_x_2]
tmp = resample_2d_linear(x_2, y_2, tmp, t, s)
......@@ -395,10 +358,6 @@ class SRCNN:
# -----------------------------------------------------
label = input_data[:, label_idx, :, :]
label = label.copy()
# if DO_SMOOTH:
# label = np.where(np.isnan(label), 0, label)
# label = smooth_2d(label, sigma=SIGMA)
# # label = median_filter_2d(label)
label = label[:, y_128, x_128]
label = get_label_data(label)
......@@ -518,16 +477,13 @@ class SRCNN:
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_4', kernel_size=KERNEL_SIZE, scale=scale)
# conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_5', kernel_size=KERNEL_SIZE, scale=scale)
# conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_6', kernel_size=KERNEL_SIZE, scale=scale)
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_5', kernel_size=KERNEL_SIZE, scale=scale)
conv_b = build_residual_block_conv2d_down2x(conv_b, num_filters, activation)
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_6', kernel_size=KERNEL_SIZE, scale=scale)
conv_b = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, activation=activation, kernel_initializer='he_uniform', padding=padding)(conv_b)
# conv = conv + conv_b
conv = conv_b
conv = conv + conv_b
print(conv.shape)
if NumClasses == 2:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment