diff --git a/scripts/viirs_scan_bytes.py b/scripts/viirs_scan_bytes.py
index 09c51c2fda20db24a8daa4bea548c26bf330bedc..42869fe4053c2e40d24f3ce7bfae1766dbb861b6 100755
--- a/scripts/viirs_scan_bytes.py
+++ b/scripts/viirs_scan_bytes.py
@@ -40,12 +40,11 @@ band_map = {
 apids = sorted(band_map.keys())
 
 
-def utc2tai(val):
-    dt = datetime.strptime(val, '%Y-%m-%d %H:%M:%S')
-    return grain.Grain().utc2tai(dt, grain.VIIRS_EPOCH) * 10**2
+def utc2tai(dt):
+    return grain.Grain().utc2tai(dt, grain.VIIRS_EPOCH) * 1000**2
 
 
-def write_netcdf_file(destpath, scans, sizes):
+def create_netcdf_file(destpath):
     dataset = netCDF4.Dataset(destpath, 'w')
     dataset.bands = """Band APID  Day
 ==============
@@ -78,55 +77,67 @@ ENGR 826"""
     dataset.createDimension('scan_time', None)
     dataset.createDimension('apid', size=len(apids))
     dataset.createVariable('time', 'u8', ('scan_time',))
-    dataset.createVariable('size', 'i4', ('apid', 'scan_time',), fill_value=-999)
+    dataset.createVariable(
+        'size', 'i4', ('apid', 'scan_time',),
+        fill_value=-999, chunksizes=[len(apids), 1024])
     dataset.createVariable('apid', 'u2', ('apid',))
 
-    dataset['time'][:] = np.array(list(scans))
     dataset['apid'][:] = np.array(list(apids))
-    dataset['size'][:] = sizes
 
-    dataset.close()
+    return dataset
 
 
 def read_data_from_rdr(filepath, start, end):
     # XXX: just get the VIIRS Science RDR for now
     rdr = jpssrdr.rdr_datasets(filepath)['science'][0]
-    dat = defaultdict(lambda: 0)
+    sizes = defaultdict(lambda: 0)
     times = set()
     for tracker, packet in rdr.packets():
         apid = packet.apid
         time = tracker.obs_time
         size = tracker.size
-
+        if tracker.offset == -1:
+            continue
         # skip data outside requested window
-        if end < time < start:
+        if time < start or time >= end or apid not in apids:
             continue  # skip data outside window
 
-        key = (apid, time)
-        dat[key] += size
-
+        sizes[apid, time] += size
         times.add(time)
 
-    return times, apids, dat
+    return sorted(times), sizes
 
 
 if __name__ == '__main__':
-    import argparse
+    import argparse, sys
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('start', type=utc2tai)
-    parser.add_argument('end', type=utc2tai)
-    parser.add_argument('rdrs', nargs='+')
+    datetype = lambda v: datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
+    parser.add_argument('start', type=datetype)
+    parser.add_argument('end', type=datetype)
+    parser.add_argument('rdrs', type=argparse.FileType('r'), default=sys.stdin)
     args = parser.parse_args()
 
-    scan_times = set()
-    sizes = np.ones((len(apids), 86400 / 1.7864)) * float('nan')
-    for filepath in args.rdrs:
+    start, end = utc2tai(args.start), utc2tai(args.end)
+    destpath = args.start.strftime('viirs_scanbytes_d%Y%m%d_t%H%M%S.nc')
+    dataset = create_netcdf_file(destpath)
+    for filepath in [l.strip() for l in args.rdrs]:
         print "reading", filepath
-        times, _, dat = read_data_from_rdr(filepath, args.start, args.end)
+        times, sizes = read_data_from_rdr(filepath, start, end)
+
+        dat = np.ones((len(apids), len(times))) * -999
         for tidx, time in enumerate(times):
             for aidx, apid in enumerate(apids):
-                sizes[aidx][tidx] = dat[apid, time]
+                dat[aidx][tidx] = sizes[apid, time]
 
-    destpath = os.path.basename(args.rdrs[0]) + '.scanbytes.nc'
-    print "writing", destpath
-    write_netcdf_file(destpath, scan_times, sizes)
+        var = dataset['time']
+        num_times = var.shape[0]
+        print 'time before', var.shape,
+        var[num_times:] = np.array(list(times))
+        print 'after', var.shape
+
+        var = dataset['size']
+        print 'size before', var.shape,
+        var[:,num_times:] = dat
+        print 'after', var.shape
+
+    dataset.close()