Skip to content
Snippets Groups Projects
Commit 20433b52 authored by tomrink's avatar tomrink
Browse files

snapshot...

parent 57f221e8
Branches
No related tags found
No related merge requests found
......@@ -104,6 +104,32 @@ def build_residual_conv2d_block(conv, num_filters, block_name, activation=tf.nn.
return conv
def build_residual_block_1x1(input_layer, num_filters, activation, block_name, padding='SAME', drop_rate=0.5,
do_drop_out=True, do_batch_norm=True):
with tf.name_scope(block_name):
skip = input_layer
if do_drop_out:
input_layer = tf.keras.layers.Dropout(drop_rate)(input_layer)
if do_batch_norm:
input_layer = tf.keras.layers.BatchNormalization()(input_layer)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(input_layer)
print(conv.shape)
if do_drop_out:
conv = tf.keras.layers.Dropout(drop_rate)(conv)
if do_batch_norm:
conv = tf.keras.layers.BatchNormalization()(conv)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=None)(conv)
conv = conv + skip
conv = tf.keras.layers.LeakyReLU()(conv)
print(conv.shape)
return conv
class CNN:
def __init__(self):
......@@ -286,15 +312,15 @@ class CNN:
if np.sum(np.isnan(cell)) == 0:
cnt = np.sum(cell[t, :, ] == 1.0)
if cnt == 0:
grd_down_2x[t, j, i] = 1
grd_down_2x[t, j, i] = 0
elif cnt == 1:
grd_down_2x[t, j, i] = 2
grd_down_2x[t, j, i] = 1
elif cnt == 2:
grd_down_2x[t, j, i] = 3
grd_down_2x[t, j, i] = 2
elif cnt == 3:
grd_down_2x[t, j, i] = 4
grd_down_2x[t, j, i] = 3
elif cnt == 4:
grd_down_2x[t, j, i] = 5
grd_down_2x[t, j, i] = 4
pass
else:
grd_down_2x[t, j, i] = 0
......@@ -407,7 +433,7 @@ class CNN:
activation = tf.nn.relu
momentum = 0.99
num_filters = 64
num_filters = 32
input_2d = self.inputs[0]
print('input: ', input_2d.shape)
......@@ -415,7 +441,7 @@ class CNN:
conv = input_2d
print('input: ', conv.shape)
conv = conv_b = tf.keras.layers.Conv2D(num_filters, kernel_size=3, kernel_initializer='he_uniform', activation=activation, padding='SAME')(input_2d)
conv = conv_b = tf.keras.layers.Conv2D(num_filters, kernel_size=2, strides=1, kernel_initializer='he_uniform', activation=activation, padding='SAME')(input_2d)
print(conv.shape)
if NOISE_TRAINING:
......@@ -423,22 +449,17 @@ class CNN:
scale = 0.2
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_1', scale=scale)
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_2', scale=scale)
conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_3', scale=scale)
# conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_4', scale=scale)
conv_b = build_residual_block_1x1(conv_b, num_filters, 'Residual_Block_1')
# conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_5', scale=scale)
conv_b = build_residual_block_1x1(conv_b, num_filters, 'Residual_Block_2')
conv_b = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, kernel_initializer='he_uniform', padding=padding)(conv_b)
conv_b = build_residual_block_1x1(conv_b, num_filters, 'Residual_Block_3')
conv = conv + conv_b
# conv = conv + conv_b
conv = conv_b
print(conv.shape)
self.logits = tf.keras.layers.Conv2D(1, kernel_size=3, strides=1, padding=padding, name='regression')(conv)
self.logits = tf.keras.layers.Conv2D(NumLogits, kernel_size=1, strides=1, padding=padding, name='regression')(conv)
print(self.logits.shape)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment