diff --git a/modules/deeplearning/icing_fcn.py b/modules/deeplearning/icing_fcn.py
index 6eb2472bdbe7445191327419b3cbfc05203082ee..6ad32f0b6e8089f39a0a71a01c2d52581bb16cbe 100644
--- a/modules/deeplearning/icing_fcn.py
+++ b/modules/deeplearning/icing_fcn.py
@@ -76,11 +76,27 @@ DO_ZERO_OUT = False
 lunar_map = {'cld_reff_dcomp': 'cld_reff_nlcomp', 'cld_opd_dcomp': 'cld_opd_nlcomp', 'iwc_dcomp': None, 'lwc_dcomp': None}
 
 
+# def build_residual_block_conv2d(x_in, num_filters, activation, block_name, padding='SAME', drop_rate=0.5,
+#                                 do_drop_out=True, do_batch_norm=True):
+#     conv = tf.keras.layers.Conv2D(num_filters, kernel_size=5, strides=1, padding=padding, activation=activation)(x_in)
+#     conv = tf.keras.layers.MaxPool2D(padding=padding)(conv)
+#     conv = tf.keras.layers.BatchNormalization()(conv)
+#
+#     skip = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, padding=padding, activation=None)(skip)
+#     skip = tf.keras.layers.MaxPool2D(padding=padding)(skip)
+#     skip = tf.keras.layers.BatchNormalization()(skip)
+#
+#     conv = conv + skip
+#     conv = tf.keras.layers.LeakyReLU()(conv)
+#     print(conv.shape)
+
+
 def build_residual_block_1x1(input_layer, num_filters, activation, block_name, padding='SAME', drop_rate=0.5,
                              do_drop_out=True, do_batch_norm=True):
 
     with tf.name_scope(block_name):
         skip = input_layer
+
         if do_drop_out:
             input_layer = tf.keras.layers.Dropout(drop_rate)(input_layer)
         if do_batch_norm:
@@ -88,13 +104,6 @@ def build_residual_block_1x1(input_layer, num_filters, activation, block_name, p
         conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(input_layer)
         print(conv.shape)
 
-        # if do_drop_out:
-        #     conv = tf.keras.layers.Dropout(drop_rate)(conv)
-        # if do_batch_norm:
-        #     conv = tf.keras.layers.BatchNormalization()(conv)
-        # conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(conv)
-        # print(conv.shape)
-
         if do_drop_out:
             conv = tf.keras.layers.Dropout(drop_rate)(conv)
         if do_batch_norm: