Skip to content
Snippets Groups Projects
Commit 44532834 authored by Bruce Flynn's avatar Bruce Flynn
Browse files

Handle day-only apids, fixing scan indexes

parent 164b643c
No related branches found
No related tags found
No related merge requests found
...@@ -6,23 +6,57 @@ import os ...@@ -6,23 +6,57 @@ import os
import numpy as np import numpy as np
import netCDF4 import netCDF4
from edosl0util import jpssrdr from edosl0util import jpssrdr
from edosl0util.headers import GROUP_CONTINUING, GROUP_LAST
def write_netcdf_file(destpath, scans, sizes): def write_netcdf_file(destpath, scans, apids, sizes):
dataset = netCDF4.Dataset(destpath, 'w') dataset = netCDF4.Dataset(destpath, 'w')
dataset.bands = """Band APID Day
==============
M1 804 X
M2 803 X
M3 802 X
M4 801 X
M5 800 X
M6 805 X
M7 806
M8 809
M9 807 X
M10 808
M11 810 X
M12 812
M13 811
M14 816
M15 815
M16 814
I1 818 X
I2 819 X
I3 820 X
I4 813
I5 817
DNB 821
CAL 825
ENGR 826"""
dataset.createDimension('scan', size=len(scans)) dataset.createDimension('scan', size=len(scans))
dataset.createDimension('apid', size=len(sizes)) dataset.createDimension('apid', size=len(apids))
dataset.createVariable('time', 'u8', ('scan',)) dataset.createVariable('time', 'u8', ('scan',))
dataset.createVariable('size', 'u8', ('apid', 'scan',)) dataset.createVariable('size', 'i4', ('apid', 'scan',), fill_value=-999)
dataset.createVariable('apid', 'u2', ('apid',)) dataset.createVariable('apid', 'u2', ('apid',))
dataset['time'][:] = np.array(list(scans)) dataset['time'][:] = np.array(list(scans))
apids = sorted(sizes.keys()) dataset['time'].description = 'Scan start time (IET)'
# assert len(apids) == 16, "Expected 16 apids, got {}".format(apids) dataset['apid'][:] = np.array(list(apids))
dat = np.array([sizes[a] for a in apids])
# FIXME: Is there a more numpyish way to do this?
dat = np.ones((len(apids), len(scans))) * -999
for sidx, scan in enumerate(scans):
for aidx, apid in enumerate(apids):
key = (apid, scan)
if key in sizes:
dat[aidx,sidx] = sizes[key]
dataset['size'][:] = dat dataset['size'][:] = dat
dataset['apid'][:] = np.array(apids)
dataset.close() dataset.close()
...@@ -30,24 +64,23 @@ def read_data_from_rdr(filepath): ...@@ -30,24 +64,23 @@ def read_data_from_rdr(filepath):
# XXX: just get the VIIRS Science RDR for now # XXX: just get the VIIRS Science RDR for now
rdr = jpssrdr.rdr_datasets(filepath)['science'][0] rdr = jpssrdr.rdr_datasets(filepath)['science'][0]
scans = set() scans = set()
sizes = {} apids = set()
dat = {}
for tracker, packet in rdr.packets(): for tracker, packet in rdr.packets():
apid = packet.apid apid = packet.apid
scan = tracker.obs_time scan = tracker.obs_time
size = tracker.size size = tracker.size
if apid not in sizes: key = (apid, scan)
sizes[apid] = [] if key in dat:
dat[key] += size
# Sum up sizes for packet groups
if packet.sequence_grouping in (GROUP_CONTINUING, GROUP_LAST):
sizes[apid][-1] += size
else: else:
sizes[apid].append(size) dat[key] = size
scans.add(scan) scans.add(scan)
apids.add(apid)
return scans, sizes return scans, apids, dat
if __name__ == '__main__': if __name__ == '__main__':
...@@ -56,5 +89,5 @@ if __name__ == '__main__': ...@@ -56,5 +89,5 @@ if __name__ == '__main__':
parser.add_argument('viirs_rdr') parser.add_argument('viirs_rdr')
args = parser.parse_args() args = parser.parse_args()
destpath = os.path.basename(args.viirs_rdr) + '.size.nc' destpath = os.path.basename(args.viirs_rdr) + '.size.nc'
scans, sizes = read_data_from_rdr(args.viirs_rdr) scans, apids, dat = read_data_from_rdr(args.viirs_rdr)
write_netcdf_file(destpath, scans, sizes) write_netcdf_file(destpath, scans, apids, dat)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment