Skip to content
Snippets Groups Projects
Commit ac57bcaf authored by Bruce Flynn's avatar Bruce Flynn
Browse files

viirs_scan_bytes: aggregate RDRs

parent 44532834
No related branches found
No related tags found
No related merge requests found
#!/usr/bin/env python
#!/home/brucef/code/PeateScience/local/dist/edosl0/0.1/env/bin/python
"""
Write a NetCDF file containing size in bytes of VIIRS scans.
"""
import os
import numpy as np
from collections import defaultdict
from datetime import datetime
import netCDF4
import numpy as np
from edosl0util import jpssrdr
from grain import grain
band_map = {
800: 'M5',
801: 'M4',
802: 'M3',
803: 'M2',
804: 'M1',
805: 'M6',
806: 'M7',
807: 'M9',
808: 'M10',
809: 'M8',
810: 'M11',
811: 'M13',
812: 'M12',
813: 'I4',
814: 'M16',
815: 'M15',
816: 'M14',
817: 'I5',
818: 'I1',
819: 'I2',
820: 'I3',
821: 'DNB',
825: 'CAL',
826: 'ENGR'}
apids = sorted(band_map.keys())
def utc2tai(val):
dt = datetime.strptime(val, '%Y-%m-%d %H:%M:%S')
return grain.Grain().utc2tai(dt, grain.VIIRS_EPOCH) * 10**2
def write_netcdf_file(destpath, scans, apids, sizes):
def write_netcdf_file(destpath, scans, sizes):
dataset = netCDF4.Dataset(destpath, 'w')
dataset.bands = """Band APID Day
==============
......@@ -38,56 +75,58 @@ I5 817
DNB 821
CAL 825
ENGR 826"""
dataset.createDimension('scan', size=len(scans))
dataset.createDimension('scan_time', None)
dataset.createDimension('apid', size=len(apids))
dataset.createVariable('time', 'u8', ('scan',))
dataset.createVariable('size', 'i4', ('apid', 'scan',), fill_value=-999)
dataset.createVariable('time', 'u8', ('scan_time',))
dataset.createVariable('size', 'i4', ('apid', 'scan_time',), fill_value=-999)
dataset.createVariable('apid', 'u2', ('apid',))
dataset['time'][:] = np.array(list(scans))
dataset['time'].description = 'Scan start time (IET)'
dataset['apid'][:] = np.array(list(apids))
# FIXME: Is there a more numpyish way to do this?
dat = np.ones((len(apids), len(scans))) * -999
for sidx, scan in enumerate(scans):
for aidx, apid in enumerate(apids):
key = (apid, scan)
if key in sizes:
dat[aidx,sidx] = sizes[key]
dataset['size'][:] = dat
dataset['size'][:] = sizes
dataset.close()
def read_data_from_rdr(filepath):
def read_data_from_rdr(filepath, start, end):
# XXX: just get the VIIRS Science RDR for now
rdr = jpssrdr.rdr_datasets(filepath)['science'][0]
scans = set()
apids = set()
dat = {}
dat = defaultdict(lambda: 0)
times = set()
for tracker, packet in rdr.packets():
apid = packet.apid
scan = tracker.obs_time
time = tracker.obs_time
size = tracker.size
key = (apid, scan)
if key in dat:
dat[key] += size
else:
dat[key] = size
# skip data outside requested window
if end < time < start:
continue # skip data outside window
key = (apid, time)
dat[key] += size
scans.add(scan)
apids.add(apid)
times.add(time)
return scans, apids, dat
return times, apids, dat
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('viirs_rdr')
parser.add_argument('start', type=utc2tai)
parser.add_argument('end', type=utc2tai)
parser.add_argument('rdrs', nargs='+')
args = parser.parse_args()
destpath = os.path.basename(args.viirs_rdr) + '.size.nc'
scans, apids, dat = read_data_from_rdr(args.viirs_rdr)
write_netcdf_file(destpath, scans, apids, dat)
scan_times = set()
sizes = np.ones((len(apids), 86400 / 1.7864)) * float('nan')
for filepath in args.rdrs:
print "reading", filepath
times, _, dat = read_data_from_rdr(filepath, args.start, args.end)
for tidx, time in enumerate(times):
for aidx, apid in enumerate(apids):
sizes[aidx][tidx] = dat[apid, time]
destpath = os.path.basename(args.rdrs[0]) + '.scanbytes.nc'
print "writing", destpath
write_netcdf_file(destpath, scan_times, sizes)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment