diff --git a/modules/deeplearning/unet_l1b_l2.py b/modules/deeplearning/unet_l1b_l2.py
index aaae7b8ce82ad0d811ca1f1a7b2917239fada9e9..00948a9829b6c2c1b6ea25a6a40ead3bae3b5e07 100644
--- a/modules/deeplearning/unet_l1b_l2.py
+++ b/modules/deeplearning/unet_l1b_l2.py
@@ -52,9 +52,9 @@ f.close()
 mean_std_dct.update(mean_std_dct_l1b)
 mean_std_dct.update(mean_std_dct_l2)
 
-emis_params = ['temp_10_4um_nom', 'temp_11_0um_nom', 'temp_12_0um_nom', 'temp_13_3um_nom', 'temp_3_9um_nom',
-               'temp_6_7um_nom']
-l2_params = ['cloud_fraction', 'cld_temp_acha', 'cld_press_acha']
+emis_params = ['temp_10_4um_nom', 'temp_11_0um_nom', 'temp_12_0um_nom', 'temp_13_3um_nom', 'temp_3_75um_nom',
+               'temp_6_7um_nom', 'temp_6_2um_nom', 'temp_7_3um_nom', 'temp_8_5um_nom', 'temp_9_7um_nom']
+l2_params = ['cloud_fraction', 'cld_temp_acha', 'cld_press_acha', 'cld_opd_acha', 'cld_reff_acha']
 
 # -- Zero out params (Experimentation Only) ------------
 zero_out_params = ['cld_reff_dcomp', 'cld_opd_dcomp', 'iwc_dcomp', 'lwc_dcomp']
@@ -181,7 +181,7 @@ class UNET:
         self.test_label_nda = None
 
         # self.n_chans = len(self.train_params)
-        self.n_chans = 6
+        self.n_chans = 10
         if TRIPLET:
             self.n_chans *= 3
         self.X_img = tf.keras.Input(shape=(None, None, self.n_chans))
@@ -425,10 +425,12 @@ class UNET:
         momentum = 0.99
 
         # num_filters = len(self.train_params) * 4
-        num_filters = self.n_chans * 12
+        num_filters = self.n_chans * 4
 
         input_2d = self.inputs[0]
-        conv = tf.keras.layers.Conv2D(num_filters, kernel_size=5, strides=1, padding=padding, activation=None)(input_2d)
+        print('input: ', input_2d.shape)
+        conv = tf.keras.layers.Conv2D(num_filters, kernel_size=7, strides=1, padding=padding, activation=None)(input_2d)
+        conv = conv[:, 6:70, 6:70, :]
         print('Contracting Branch')
         print('input: ', conv.shape)
         skip = conv
@@ -527,8 +529,8 @@ class UNET:
         conv = tf.keras.layers.Conv2DTranspose(num_filters, kernel_size=3, strides=2, padding=padding)(conv)
         print('8: ', conv.shape)
 
-        #conv = tf.keras.layers.Conv2DTranspose(num_filters, kernel_size=3, strides=2, padding=padding)(conv)
-        #print('9: ', conv.shape)
+        # conv = tf.keras.layers.Conv2DTranspose(num_filters, kernel_size=3, strides=2, padding=padding)(conv)
+        # print('9: ', conv.shape)
 
         # if NumClasses == 2:
         #     activation = tf.nn.sigmoid  # For binary