Skip to content
Snippets Groups Projects
Commit 59faf5e0 authored by tomrink's avatar tomrink
Browse files

snapshot...

parent b7c85a94
No related branches found
No related tags found
No related merge requests found
......@@ -575,85 +575,60 @@ class IcingIntensityFCN:
else:
input_2d = self.inputs[0]
conv = tf.keras.layers.Conv2D(num_filters, 5, strides=[1, 1], padding=padding, activation=activation)(input_2d)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=5, strides=1, padding=padding, activation=activation)(input_2d)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
conv = tf.keras.layers.BatchNormalization()(conv)
print(conv.shape)
num_filters *= 2
conv = tf.keras.layers.Conv2D(num_filters, 3, strides=[1, 1], padding=padding, activation=activation)(conv)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
conv = tf.keras.layers.BatchNormalization()(conv)
print(conv.shape)
num_filters *= 2
conv = tf.keras.layers.Conv2D(num_filters, 3, strides=[1, 1], padding=padding, activation=activation)(conv)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
conv = tf.keras.layers.BatchNormalization()(conv)
print(conv.shape)
num_filters *= 2
conv = tf.keras.layers.Conv2D(num_filters, 3, strides=[1, 1], padding=padding, activation=activation)(conv)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
conv = tf.keras.layers.BatchNormalization()(conv)
print(conv.shape)
# num_filters *= 2
# conv = tf.keras.layers.Conv2D(num_filters, 3, strides=[1, 1], padding=padding, activation=activation)(conv)
# conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
# conv = tf.keras.layers.BatchNormalization()(conv)
# print(conv.shape)
return conv
flat = tf.keras.layers.Flatten()(conv)
return flat
def build_dnn(self, input_layer=None):
def build_fcl(self, input_layer):
print('build fully connected layer')
num_filters = input_layer.shape[3]
drop_rate = 0.5
# activation = tf.nn.relu
# activation = tf.nn.elu
activation = tf.nn.leaky_relu
momentum = 0.99
if input_layer is not None:
flat = input_layer
n_hidden = input_layer.shape[1]
else:
flat = self.X_img
n_hidden = self.X_img.shape[1]
fac = 2
fc = build_residual_block(flat, drop_rate, fac*n_hidden, activation, 'Residual_Block_1', doDropout=True, doBatchNorm=True)
fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_2', doDropout=True, doBatchNorm=True)
fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_3', doDropout=True, doBatchNorm=True)
fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_4', doDropout=True, doBatchNorm=True)
# padding = "VALID"
padding = "SAME"
fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_5', doDropout=True, doBatchNorm=True)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(input_layer)
# fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_6', doDropout=True, doBatchNorm=True)
#
# fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_7', doDropout=True, doBatchNorm=True)
#
# fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_8', doDropout=True, doBatchNorm=True)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(conv)
fc = tf.keras.layers.Dense(n_hidden, activation=activation)(fc)
fc = tf.keras.layers.BatchNormalization()(fc)
conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(conv)
print(conv.shape)
if NumClasses == 2:
activation = tf.nn.sigmoid # For binary
else:
activation = tf.nn.softmax # For multi-class
# Called logits, but these are actually probabilities, see activation
logits = tf.keras.layers.Dense(NumLogits, activation=activation)(fc)
logits = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, padding=padding, activation=activation)(conv)
print(logits.shape)
self.logits = logits
def build_training(self):
......@@ -705,6 +680,7 @@ class IcingIntensityFCN:
labels = mini_batch[2]
with tf.GradientTape() as tape:
pred = self.model(inputs, training=True)
pred = tf.reshape(pred, (pred.shape[0], NumLogits))
loss = self.loss(labels, pred)
total_loss = loss
if len(self.model.losses) > 0:
......@@ -725,6 +701,7 @@ class IcingIntensityFCN:
inputs = [mini_batch[0], mini_batch[1]]
labels = mini_batch[2]
pred = self.model(inputs, training=False)
pred = tf.reshape(pred, (pred.shape[0], NumLogits))
t_loss = self.loss(labels, pred)
self.test_loss(t_loss)
......@@ -924,14 +901,14 @@ class IcingIntensityFCN:
f.close()
def build_model(self):
flat = self.build_cnn()
cnn = self.build_cnn()
# flat_1d = self.build_1d_cnn()
# flat = tf.keras.layers.concatenate([flat, flat_1d, flat_anc])
# flat = tf.keras.layers.concatenate([flat, flat_1d])
# self.build_dnn(flat)
if self.USE_FLIGHT_ALTITUDE:
flat = tf.keras.layers.concatenate([flat, self.inputs[1]])
self.build_dnn(flat)
## if self.USE_FLIGHT_ALTITUDE:
## flat = tf.keras.layers.concatenate([flat, self.inputs[1]])
self.build_fcl(cnn)
self.model = tf.keras.Model(self.inputs, self.logits)
def restore(self, ckpt_dir):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment