diff --git a/modules/deeplearning/icing_cnn.py b/modules/deeplearning/icing_cnn.py
index 51f40292490e5dd13d08b82030651008456475b2..0afe586c6cbce1872cf9be24292d03433ffd7ebd 100644
--- a/modules/deeplearning/icing_cnn.py
+++ b/modules/deeplearning/icing_cnn.py
@@ -2,13 +2,14 @@ import tensorflow as tf
 import tensorflow_addons as tfa
 from util.setup import logdir, modeldir, cachepath, now
 from util.util import homedir, EarlyStop, normalize, make_for_full_domain_predict
-from util.geos_nav import GEOSNavigation, get_navigation
+from util.geos_nav import get_navigation
 
 import os, datetime
 import numpy as np
 import pickle
 import h5py
 
+USE_FLIGHT_ALTITUDE = True
 
 LOG_DEVICE_PLACEMENT = False
 
@@ -208,6 +209,9 @@ class IcingIntensityNN:
         self.X_img = tf.keras.Input(shape=(img_width, img_width, n_chans))
 
         self.inputs.append(self.X_img)
+        self.inputs.append(tf.keras.Input(5))
+
+        self.flight_level = 0
 
         self.DISK_CACHE = False
 
@@ -219,8 +223,7 @@ class IcingIntensityNN:
 
         tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT)
 
-        # Note: Don't do this anymore, because nobody else willing to do so as well!
-        # Also, doesn't seem to play well with SLURM
+        # Doesn't seem to play well with SLURM
         # gpus = tf.config.experimental.list_physical_devices('GPU')
         # if gpus:
         #     try:
@@ -243,7 +246,7 @@ class IcingIntensityNN:
             else:
                 tup = self.in_mem_data_cache_test(key)
             if tup is not None:
-                return tup[0], tup[1]
+                return tup[0], tup[1], tup[2]
 
         # sort these to use as numpy indexing arrays
         nd_idxs = np.array(idxs)
@@ -269,6 +272,8 @@ class IcingIntensityNN:
         data = data.astype(np.float32)
         data = np.transpose(data, axes=(1, 2, 3, 0))
 
+        data_alt = self.get_scalar_data(nd_idxs, is_training)
+
         label = self.get_label_data(nd_idxs, is_training)
         label = np.where(label == -1, 0, label)
 
@@ -283,11 +288,11 @@ class IcingIntensityNN:
 
         if CACHE_DATA_IN_MEM:
             if is_training:
-                self.in_mem_data_cache[key] = (data, label)
+                self.in_mem_data_cache[key] = (data, data_alt, label)
             else:
-                self.in_mem_data_cache_test[key] = (data, label)
+                self.in_mem_data_cache_test[key] = (data, data_alt, label)
 
-        return data, label
+        return data, data_alt, label
 
     def get_parameter_data(self, param, nd_idxs, is_training):
         if is_training:
@@ -319,16 +324,13 @@ class IcingIntensityNN:
                 h5f = self.h5f_l2_tst
 
         nda = h5f[param][nd_idxs,]
-        b0 = np.logical_and(nda >= 0, nda < 2000)
-        b1 = np.logical_and(nda >= 2000, nda < 4000)
-        b2 = np.logical_and(nda >= 4000, nda < 6000)
-        b3 = np.logical_and(nda >= 6000, nda < 8000)
-        b4 = np.logical_and(nda >= 8000, nda < 15000)
-        nda[b0] = 0
-        nda[b1] = 1
-        nda[b2] = 2
-        nda[b3] = 3
-        nda[b4] = 4
+
+        nda[np.logical_and(nda >= 0, nda < 2000)] = 0
+        nda[np.logical_and(nda >= 2000, nda < 4000)] = 1
+        nda[np.logical_and(nda >= 4000, nda < 6000)] = 2
+        nda[np.logical_and(nda >= 6000, nda < 8000)] = 3
+        nda[np.logical_and(nda >= 8000, nda < 15000)] = 4
+
         nda = tf.one_hot(nda, 5).numpy()
 
         return nda
@@ -370,21 +372,27 @@ class IcingIntensityNN:
         data = data.astype(np.float32)
         data = np.transpose(data, axes=(1, 2, 3, 0))
 
-        return data
+        # TODO: altitude data will be specified by user at run-time
+        nda = np.zeros([nd_idxs.size])
+        nda[:] = self.flight_level
+        nda = tf.one_hot(nda, 5).numpy()
+
+        return data, nda
 
     @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
     def data_function(self, indexes):
-        out = tf.numpy_function(self.get_in_mem_data_batch_train, [indexes], [tf.float32, tf.int32])
+        out = tf.numpy_function(self.get_in_mem_data_batch_train, [indexes], [tf.float32, tf.float32, tf.int32])
         return out
 
     @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
     def data_function_test(self, indexes):
-        out = tf.numpy_function(self.get_in_mem_data_batch_test, [indexes], [tf.float32, tf.int32])
+        out = tf.numpy_function(self.get_in_mem_data_batch_test, [indexes], [tf.float32, tf.float32, tf.int32])
         return out
 
     @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
     def data_function_evaluate(self, indexes):
-        out = tf.numpy_function(self.get_in_mem_data_batch_eval, [indexes], tf.float32)
+        # TODO: modify for user specified altitude
+        out = tf.numpy_function(self.get_in_mem_data_batch_eval, [indexes], [tf.float32, tf.float32])
         return out
 
     def get_train_dataset(self, indexes):
@@ -669,8 +677,8 @@ class IcingIntensityNN:
 
     @tf.function
     def train_step(self, mini_batch):
-        inputs = [mini_batch[0]]
-        labels = mini_batch[1]
+        inputs = [mini_batch[0], mini_batch[1]]
+        labels = mini_batch[2]
         with tf.GradientTape() as tape:
             pred = self.model(inputs, training=True)
             loss = self.loss(labels, pred)
@@ -693,8 +701,8 @@ class IcingIntensityNN:
 
     @tf.function
     def test_step(self, mini_batch):
-        inputs = [mini_batch[0]]
-        labels = mini_batch[1]
+        inputs = [mini_batch[0], mini_batch[1]]
+        labels = mini_batch[2]
         pred = self.model(inputs, training=False)
         t_loss = self.loss(labels, pred)
 
@@ -710,8 +718,8 @@ class IcingIntensityNN:
             self.test_false_pos(labels, pred)
 
     def predict(self, mini_batch):
-        inputs = [mini_batch[0]]
-        labels = mini_batch[1]
+        inputs = [mini_batch[0], mini_batch[1]]
+        labels = mini_batch[2]
         pred = self.model(inputs, training=False)
         t_loss = self.loss(labels, pred)
 
@@ -790,8 +798,8 @@ class IcingIntensityNN:
             proc_batch_cnt = 0
             n_samples = 0
 
-            for data0, label in self.train_dataset:
-                trn_ds = tf.data.Dataset.from_tensor_slices((data0, label))
+            for data0, data1, label in self.train_dataset:
+                trn_ds = tf.data.Dataset.from_tensor_slices((data0, data1, label))
                 trn_ds = trn_ds.batch(BATCH_SIZE)
                 for mini_batch in trn_ds:
                     if self.learningRateSchedule is not None:
@@ -805,8 +813,8 @@ class IcingIntensityNN:
                             tf.summary.scalar('num_epochs', epoch, step=step)
 
                         self.reset_test_metrics()
-                        for data0_tst, label_tst in self.test_dataset:
-                            tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, label_tst))
+                        for data0_tst, data1_tst, label_tst in self.test_dataset:
+                            tst_ds = tf.data.Dataset.from_tensor_slices((data0_tst, data1_tst, label_tst))
                             tst_ds = tst_ds.batch(BATCH_SIZE)
                             for mini_batch_test in tst_ds:
                                 self.test_step(mini_batch_test)
@@ -840,8 +848,8 @@ class IcingIntensityNN:
             total_time += (t1-t0)
 
             self.reset_test_metrics()
-            for data0, label in self.test_dataset:
-                ds = tf.data.Dataset.from_tensor_slices((data0, label))
+            for data0, data1, label in self.test_dataset:
+                ds = tf.data.Dataset.from_tensor_slices((data0, data1, label))
                 ds = ds.batch(BATCH_SIZE)
                 for mini_batch in ds:
                     self.test_step(mini_batch)
@@ -903,6 +911,8 @@ class IcingIntensityNN:
         # flat = tf.keras.layers.concatenate([flat, flat_1d, flat_anc])
         # flat = tf.keras.layers.concatenate([flat, flat_1d])
         # self.build_dnn(flat)
+        if USE_FLIGHT_ALTITUDE:
+            flat = tf.keras.layers.concatenate([flat, self.inputs[1]])
         self.build_dnn(flat)
         self.model = tf.keras.Model(self.inputs, self.logits)
 
@@ -916,8 +926,8 @@ class IcingIntensityNN:
         self.test_loss.reset_states()
         self.test_accuracy.reset_states()
 
-        for data0, label in self.test_dataset:
-            ds = tf.data.Dataset.from_tensor_slices((data0, label))
+        for data0, data1, label in self.test_dataset:
+            ds = tf.data.Dataset.from_tensor_slices((data0, data1, label))
             ds = ds.batch(BATCH_SIZE)
             for mini_batch_test in ds:
                 self.predict(mini_batch_test)
@@ -1020,7 +1030,7 @@ def run_restore_static(filename_l1b, filename_l2, ckpt_dir_s_path):
     return labels, prob_avg, cm_avg
 
 
-def run_evaluate_static(h5f, ckpt_dir_s_path, prob_thresh=0.5, satellite='GOES16', domain='FD'):
+def run_evaluate_static(h5f, ckpt_dir_s_path, flight_level=4, prob_thresh=0.5, satellite='GOES16', domain='FD'):
     data_dct, ll, cc = make_for_full_domain_predict(h5f, name_list=train_params, domain=domain)
     num_elems = len(cc)
     num_lines = len(ll)
@@ -1035,6 +1045,7 @@ def run_evaluate_static(h5f, ckpt_dir_s_path, prob_thresh=0.5, satellite='GOES16
         if not os.path.isdir(ckpt_dir):
             continue
         nn = IcingIntensityNN()
+        nn.flight_level = flight_level
         nn.setup_eval_pipeline(data_dct, num_lines * num_elems)
         nn.build_model()
         nn.build_training()
diff --git a/modules/icing/pirep_goes.py b/modules/icing/pirep_goes.py
index caed83ae0cfabefa36b33ba34690b09be5998735..18f5da5222e155ce803458e95bfb3e3896590a70 100644
--- a/modules/icing/pirep_goes.py
+++ b/modules/icing/pirep_goes.py
@@ -1750,7 +1750,7 @@ def time_filter_2(times, dt_str_0=None, dt_str_1=None, format_code='%Y-%m-%d_%H:
     return keep_times, keep_idxs
 
 
-def time_filter_3(icing_dct, ts_0, ts_1):
+def time_filter_3(icing_dct, ts_0, ts_1, alt_lo=None, alt_hi=None):
     keep_reports = []
     keep_times = []
     keep_lons = []
@@ -1760,10 +1760,12 @@ def time_filter_3(icing_dct, ts_0, ts_1):
         if ts_0 <= ts < ts_1:
             rpts = icing_dct[ts]
             for idx, tup in enumerate(rpts):
-                keep_reports.append(tup)
-                keep_times.append(ts)
-                keep_lats.append(tup[0])
-                keep_lons.append(tup[1])
+                falt = tup[2]
+                if alt_lo is not None and (alt_lo < falt <= alt_hi):
+                    keep_reports.append(tup)
+                    keep_times.append(ts)
+                    keep_lats.append(tup[0])
+                    keep_lons.append(tup[1])
 
     return keep_times, keep_lons, keep_lats, keep_reports
 
@@ -1850,10 +1852,18 @@ def tiles_info(filename):
     print('Icing 6:  ', np.sum(iint == 6))
 
 
+flt_level_ranges = {k:None for k in range(5)}
+flt_level_ranges[0] = [0.0, 2000.0]
+flt_level_ranges[1] = [2000.0, 4000.0]
+flt_level_ranges[2] = [4000.0, 6000.0]
+flt_level_ranges[3] = [6000.0, 8000.0]
+flt_level_ranges[4] = [8000.0, 15000.0]
+
+
 def run_make_images(clvrx_dir='/Users/tomrink/data/clavrx/RadC/', ckpt_dir_s_path='/Users/tomrink/tf_model/', prob_thresh=0.5, satellite='GOES16', domain='CONUS',
                     extent=[-105, -70, 15, 50],
                     pirep_file='/Users/tomrink/data/pirep/pireps_202109200000_202109232359.csv',
-                    obs_lons=None, obs_lats=None, obs_times=None):
+                    obs_lons=None, obs_lats=None, obs_times=None, flight_level=None):
     if pirep_file is not None:
         ice_dict, no_ice_dict, neg_ice_dict = setup(pirep_file)
 
@@ -1863,6 +1873,8 @@ def run_make_images(clvrx_dir='/Users/tomrink/data/clavrx/RadC/', ckpt_dir_s_pat
         clvrx_ds = CLAVRx(clvrx_dir)
     clvrx_files = clvrx_ds.flist
 
+    alt_lo, alt_hi = flt_level_ranges[flight_level]
+
     for fname in clvrx_files:
         h5f = h5py.File(fname, 'r')
         dto = clvrx_ds.get_datetime(fname)
@@ -1875,7 +1887,7 @@ def run_make_images(clvrx_dir='/Users/tomrink/data/clavrx/RadC/', ckpt_dir_s_pat
         ts_0 = dto_0.timestamp()
         ts_1 = dto_1.timestamp()
         if pirep_file is not None:
-            _, keep_lons, keep_lats, _ = time_filter_3(ice_dict, ts_0, ts_1)
+            _, keep_lons, keep_lats, _ = time_filter_3(ice_dict, ts_0, ts_1, alt_lo, alt_hi)
         elif obs_times is not None:
             keep = np.logical_and(obs_times >= ts_0, obs_times < ts_1)
             keep_lons = obs_lons[keep]
@@ -1884,7 +1896,7 @@ def run_make_images(clvrx_dir='/Users/tomrink/data/clavrx/RadC/', ckpt_dir_s_pat
             keep_lons = None
             keep_lats = None
 
-        ice_lons, ice_lats, preds_2d, lons_2d, lats_2d, x_rad, y_rad = run_evaluate_static(h5f, ckpt_dir_s_path=ckpt_dir_s_path, prob_thresh=prob_thresh, satellite=satellite, domain=domain)
+        ice_lons, ice_lats, preds_2d, lons_2d, lats_2d, x_rad, y_rad = run_evaluate_static(h5f, ckpt_dir_s_path=ckpt_dir_s_path, flight_level=flight_level, prob_thresh=prob_thresh, satellite=satellite, domain=domain)
         make_icing_image(h5f, ice_lons, ice_lats, clvrx_str_time, satellite, domain, ice_lons_vld=keep_lons, ice_lats_vld=keep_lats, extent=extent)
         write_icing_file(clvrx_str_time, preds_2d, x_rad, y_rad, lons_2d, lats_2d)
         print('Done: ', clvrx_str_time)
diff --git a/modules/util/util.py b/modules/util/util.py
index f7bf79a77badcb718c154dc71f11c6c762142f38..86d7f2c33448ded0f0aff3a1b45ef435067e4fae 100644
--- a/modules/util/util.py
+++ b/modules/util/util.py
@@ -502,8 +502,8 @@ exmp_file_fd = '/Users/tomrink/data/OR_ABI-L1b-RadF-M6C16_G16_s20212521800223_e2
 # # UR from Taiwan
 # lon, lat = 135.0, 35.0
 # elem_ur, line_ur = (2499, 995)
-taiwan_x0 = 1079
-taiwan_y0 = 995
+taiwan_i0 = 1079
+taiwan_j0 = 995
 taiwan_lenx = 1420
 taiwan_leny = 1020
 # geos.transform_point(135.0, 35.0, ccrs.PlateCarree(), False)
@@ -517,18 +517,20 @@ def make_for_full_domain_predict(h5f, name_list=None, satellite='GOES16', domain
     w_y = 16
     i_0 = 0
     j_0 = 0
+    s_x = w_x
+    s_y = w_y
 
     geos, xlen, xmin, xmax, ylen, ymin, ymax = get_cartopy_crs(satellite, domain)
     if satellite == 'H08':
         xlen = taiwan_lenx
         ylen = taiwan_leny
-        i_0 = taiwan_x0
-        j_0 = taiwan_y0
+        i_0 = taiwan_i0
+        j_0 = taiwan_j0
 
     grd_dct = {name: None for name in name_list}
 
     cnt_a = 0
-    for didx, ds_name in enumerate(name_list):
+    for ds_name in name_list:
         gvals = get_grid_values(h5f, ds_name, j_0, i_0, None, num_j=ylen, num_i=xlen)
         if gvals is not None:
             grd_dct[ds_name] = gvals
@@ -539,21 +541,21 @@ def make_for_full_domain_predict(h5f, name_list=None, satellite='GOES16', domain
 
     grd_dct_n = {name: [] for name in name_list}
 
-    n_x = int(xlen/w_x)
-    n_y = int(ylen/w_y)
+    n_x = int(xlen/s_x)
+    n_y = int(ylen/s_y)
 
-    ll = [j_0 + j*w_y for j in range(n_y-1)]
-    cc = [i_0 + i*w_x for i in range(n_x-1)]
+    ll = [j_0 + j*s_y for j in range(n_y-1)]
+    cc = [i_0 + i*s_x for i in range(n_x-1)]
 
     for ds_name in name_list:
         for j in range(n_y-1):
-            j_ul = j * w_y
+            j_ul = j * s_y
             for i in range(n_x-1):
-                i_ul = i * w_x
+                i_ul = i * s_x
                 grd_dct_n[ds_name].append(grd_dct[ds_name][j_ul:j_ul+w_y, i_ul:i_ul+w_x])
 
     grd_dct = {name: None for name in name_list}
-    for didx, ds_name in enumerate(name_list):
+    for ds_name in name_list:
         grd_dct[ds_name] = np.stack(grd_dct_n[ds_name])
 
     return grd_dct, ll, cc