Skip to content
Snippets Groups Projects
Commit e1cc9c69 authored by Bruce Flynn's avatar Bruce Flynn
Browse files

Take RDRs on stdin, fix offset issue.

parent 2a544403
No related branches found
No related tags found
No related merge requests found
......@@ -40,12 +40,11 @@ band_map = {
apids = sorted(band_map.keys())
def utc2tai(val):
dt = datetime.strptime(val, '%Y-%m-%d %H:%M:%S')
return grain.Grain().utc2tai(dt, grain.VIIRS_EPOCH) * 10**2
def utc2tai(dt):
return grain.Grain().utc2tai(dt, grain.VIIRS_EPOCH) * 1000**2
def write_netcdf_file(destpath, scans, sizes):
def create_netcdf_file(destpath):
dataset = netCDF4.Dataset(destpath, 'w')
dataset.bands = """Band APID Day
==============
......@@ -78,55 +77,67 @@ ENGR 826"""
dataset.createDimension('scan_time', None)
dataset.createDimension('apid', size=len(apids))
dataset.createVariable('time', 'u8', ('scan_time',))
dataset.createVariable('size', 'i4', ('apid', 'scan_time',), fill_value=-999)
dataset.createVariable(
'size', 'i4', ('apid', 'scan_time',),
fill_value=-999, chunksizes=[len(apids), 1024])
dataset.createVariable('apid', 'u2', ('apid',))
dataset['time'][:] = np.array(list(scans))
dataset['apid'][:] = np.array(list(apids))
dataset['size'][:] = sizes
dataset.close()
return dataset
def read_data_from_rdr(filepath, start, end):
# XXX: just get the VIIRS Science RDR for now
rdr = jpssrdr.rdr_datasets(filepath)['science'][0]
dat = defaultdict(lambda: 0)
sizes = defaultdict(lambda: 0)
times = set()
for tracker, packet in rdr.packets():
apid = packet.apid
time = tracker.obs_time
size = tracker.size
if tracker.offset == -1:
continue
# skip data outside requested window
if end < time < start:
if time < start or time >= end or apid not in apids:
continue # skip data outside window
key = (apid, time)
dat[key] += size
sizes[apid, time] += size
times.add(time)
return times, apids, dat
return sorted(times), sizes
if __name__ == '__main__':
import argparse
import argparse, sys
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('start', type=utc2tai)
parser.add_argument('end', type=utc2tai)
parser.add_argument('rdrs', nargs='+')
datetype = lambda v: datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
parser.add_argument('start', type=datetype)
parser.add_argument('end', type=datetype)
parser.add_argument('rdrs', type=argparse.FileType('r'), default=sys.stdin)
args = parser.parse_args()
scan_times = set()
sizes = np.ones((len(apids), 86400 / 1.7864)) * float('nan')
for filepath in args.rdrs:
start, end = utc2tai(args.start), utc2tai(args.end)
destpath = args.start.strftime('viirs_scanbytes_d%Y%m%d_t%H%M%S.nc')
dataset = create_netcdf_file(destpath)
for filepath in [l.strip() for l in args.rdrs]:
print "reading", filepath
times, _, dat = read_data_from_rdr(filepath, args.start, args.end)
times, sizes = read_data_from_rdr(filepath, start, end)
dat = np.ones((len(apids), len(times))) * -999
for tidx, time in enumerate(times):
for aidx, apid in enumerate(apids):
sizes[aidx][tidx] = dat[apid, time]
dat[aidx][tidx] = sizes[apid, time]
destpath = os.path.basename(args.rdrs[0]) + '.scanbytes.nc'
print "writing", destpath
write_netcdf_file(destpath, scan_times, sizes)
var = dataset['time']
num_times = var.shape[0]
print 'time before', var.shape,
var[num_times:] = np.array(list(times))
print 'after', var.shape
var = dataset['size']
print 'size before', var.shape,
var[:,num_times:] = dat
print 'after', var.shape
dataset.close()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment