diff --git a/modules/deeplearning/icing_cnn.py b/modules/deeplearning/icing_cnn.py
index 3c0336342745d914859f0b5e6cac868548b7896a..fcadd0f36db14ee8be919d3e4d323e8bf3ed7c92 100644
--- a/modules/deeplearning/icing_cnn.py
+++ b/modules/deeplearning/icing_cnn.py
@@ -18,7 +18,7 @@ PROC_BATCH_SIZE = 2046
 PROC_BATCH_BUFFER_SIZE = 50000
 NumLabels = 1
 BATCH_SIZE = 256
-NUM_EPOCHS = 60
+NUM_EPOCHS = 80
 
 TRACK_MOVING_AVERAGE = False
 
@@ -243,7 +243,7 @@ class IcingIntensityNN:
         dataset = dataset.map(self.data_function, num_parallel_calls=8)
         self.test_dataset = dataset
 
-    def setup_pipeline(self, filename, train_idxs=None, test_idxs=None):
+    def setup_pipeline(self, filename):
         self.filename = filename
         self.h5f = h5py.File(filename, 'r')
         time = self.h5f['time']
@@ -592,16 +592,16 @@ class IcingIntensityNN:
                 self.predict(mini_batch_test)
         print('loss, acc: ', self.test_loss.result(), self.test_accuracy.result())
 
-    def run(self, filename, filename_l1b=None, train_dict=None, valid_dict=None):
+    def run(self, filename, filename_l1b=None):
         with tf.device('/device:GPU:'+str(self.gpu_device)):
-            self.setup_pipeline(filename, train_idxs=train_dict, test_idxs=valid_dict)
+            self.setup_pipeline(filename)
             self.build_model()
             self.build_training()
             self.build_evaluation()
             self.do_training()
 
-    def run_restore(self, filename, ckpt_dir, train_dict=None, valid_dict=None):
-        self.setup_pipeline(filename, train_idxs=train_dict, test_idxs=valid_dict)
+    def run_restore(self, filename, ckpt_dir):
+        self.setup_pipeline(filename)
         self.build_model()
         self.build_training()
         self.build_evaluation()