Skip to content
Snippets Groups Projects
Commit 59faf5e0 authored by tomrink's avatar tomrink
Browse files

snapshot...

parent b7c85a94
No related branches found
No related tags found
No related merge requests found
...@@ -575,85 +575,60 @@ class IcingIntensityFCN: ...@@ -575,85 +575,60 @@ class IcingIntensityFCN:
else: else:
input_2d = self.inputs[0] input_2d = self.inputs[0]
conv = tf.keras.layers.Conv2D(num_filters, 5, strides=[1, 1], padding=padding, activation=activation)(input_2d) conv = tf.keras.layers.Conv2D(num_filters, kernel_size=5, strides=1, padding=padding, activation=activation)(input_2d)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv) conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
conv = tf.keras.layers.BatchNormalization()(conv) conv = tf.keras.layers.BatchNormalization()(conv)
print(conv.shape) print(conv.shape)
num_filters *= 2 num_filters *= 2
conv = tf.keras.layers.Conv2D(num_filters, 3, strides=[1, 1], padding=padding, activation=activation)(conv) conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv) conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
conv = tf.keras.layers.BatchNormalization()(conv) conv = tf.keras.layers.BatchNormalization()(conv)
print(conv.shape) print(conv.shape)
num_filters *= 2 num_filters *= 2
conv = tf.keras.layers.Conv2D(num_filters, 3, strides=[1, 1], padding=padding, activation=activation)(conv) conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv) conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
conv = tf.keras.layers.BatchNormalization()(conv) conv = tf.keras.layers.BatchNormalization()(conv)
print(conv.shape) print(conv.shape)
num_filters *= 2 num_filters *= 2
conv = tf.keras.layers.Conv2D(num_filters, 3, strides=[1, 1], padding=padding, activation=activation)(conv) conv = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=activation)(conv)
conv = tf.keras.layers.MaxPool2D(padding=padding)(conv) conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
conv = tf.keras.layers.BatchNormalization()(conv) conv = tf.keras.layers.BatchNormalization()(conv)
print(conv.shape) print(conv.shape)
# num_filters *= 2 return conv
# conv = tf.keras.layers.Conv2D(num_filters, 3, strides=[1, 1], padding=padding, activation=activation)(conv)
# conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
# conv = tf.keras.layers.BatchNormalization()(conv)
# print(conv.shape)
flat = tf.keras.layers.Flatten()(conv) def build_fcl(self, input_layer):
return flat
def build_dnn(self, input_layer=None):
print('build fully connected layer') print('build fully connected layer')
num_filters = input_layer.shape[3]
drop_rate = 0.5 drop_rate = 0.5
# activation = tf.nn.relu # activation = tf.nn.relu
# activation = tf.nn.elu # activation = tf.nn.elu
activation = tf.nn.leaky_relu activation = tf.nn.leaky_relu
momentum = 0.99 momentum = 0.99
# padding = "VALID"
if input_layer is not None: padding = "SAME"
flat = input_layer
n_hidden = input_layer.shape[1]
else:
flat = self.X_img
n_hidden = self.X_img.shape[1]
fac = 2
fc = build_residual_block(flat, drop_rate, fac*n_hidden, activation, 'Residual_Block_1', doDropout=True, doBatchNorm=True)
fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_2', doDropout=True, doBatchNorm=True)
fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_3', doDropout=True, doBatchNorm=True)
fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_4', doDropout=True, doBatchNorm=True)
fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_5', doDropout=True, doBatchNorm=True) conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(input_layer)
# fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_6', doDropout=True, doBatchNorm=True) conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(conv)
#
# fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_7', doDropout=True, doBatchNorm=True)
#
# fc = build_residual_block(fc, drop_rate, fac*n_hidden, activation, 'Residual_Block_8', doDropout=True, doBatchNorm=True)
fc = tf.keras.layers.Dense(n_hidden, activation=activation)(fc) conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(conv)
fc = tf.keras.layers.BatchNormalization()(fc) print(conv.shape)
if NumClasses == 2: if NumClasses == 2:
activation = tf.nn.sigmoid # For binary activation = tf.nn.sigmoid # For binary
else: else:
activation = tf.nn.softmax # For multi-class activation = tf.nn.softmax # For multi-class
# Called logits, but these are actually probabilities, see activation # Called logits, but these are actually probabilities, see activation
logits = tf.keras.layers.Dense(NumLogits, activation=activation)(fc) logits = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, padding=padding, activation=activation)(conv)
print(logits.shape) print(logits.shape)
self.logits = logits self.logits = logits
def build_training(self): def build_training(self):
...@@ -705,6 +680,7 @@ class IcingIntensityFCN: ...@@ -705,6 +680,7 @@ class IcingIntensityFCN:
labels = mini_batch[2] labels = mini_batch[2]
with tf.GradientTape() as tape: with tf.GradientTape() as tape:
pred = self.model(inputs, training=True) pred = self.model(inputs, training=True)
pred = tf.reshape(pred, (pred.shape[0], NumLogits))
loss = self.loss(labels, pred) loss = self.loss(labels, pred)
total_loss = loss total_loss = loss
if len(self.model.losses) > 0: if len(self.model.losses) > 0:
...@@ -725,6 +701,7 @@ class IcingIntensityFCN: ...@@ -725,6 +701,7 @@ class IcingIntensityFCN:
inputs = [mini_batch[0], mini_batch[1]] inputs = [mini_batch[0], mini_batch[1]]
labels = mini_batch[2] labels = mini_batch[2]
pred = self.model(inputs, training=False) pred = self.model(inputs, training=False)
pred = tf.reshape(pred, (pred.shape[0], NumLogits))
t_loss = self.loss(labels, pred) t_loss = self.loss(labels, pred)
self.test_loss(t_loss) self.test_loss(t_loss)
...@@ -924,14 +901,14 @@ class IcingIntensityFCN: ...@@ -924,14 +901,14 @@ class IcingIntensityFCN:
f.close() f.close()
def build_model(self): def build_model(self):
flat = self.build_cnn() cnn = self.build_cnn()
# flat_1d = self.build_1d_cnn() # flat_1d = self.build_1d_cnn()
# flat = tf.keras.layers.concatenate([flat, flat_1d, flat_anc]) # flat = tf.keras.layers.concatenate([flat, flat_1d, flat_anc])
# flat = tf.keras.layers.concatenate([flat, flat_1d]) # flat = tf.keras.layers.concatenate([flat, flat_1d])
# self.build_dnn(flat) # self.build_dnn(flat)
if self.USE_FLIGHT_ALTITUDE: ## if self.USE_FLIGHT_ALTITUDE:
flat = tf.keras.layers.concatenate([flat, self.inputs[1]]) ## flat = tf.keras.layers.concatenate([flat, self.inputs[1]])
self.build_dnn(flat) self.build_fcl(cnn)
self.model = tf.keras.Model(self.inputs, self.logits) self.model = tf.keras.Model(self.inputs, self.logits)
def restore(self, ckpt_dir): def restore(self, ckpt_dir):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment