Skip to content
Snippets Groups Projects
Commit d11af0e7 authored by Bruce Flynn's avatar Bruce Flynn
Browse files

Fixs rdr2l0 CLI tool.

* Don't require same file type for all files.
* Fix bugs in packet handling.
* Properly close files to avoid exhausting file descriptors for large
  numbers of RNSCA flies.
parent 1b0d4701
No related branches found
No related tags found
No related merge requests found
...@@ -32,11 +32,6 @@ def main(): ...@@ -32,11 +32,6 @@ def main():
parser.add_argument( parser.add_argument(
'--ancillary', action='store_true', '--ancillary', action='store_true',
help='Dump spacecraft ancillary datasets') help='Dump spacecraft ancillary datasets')
"""
parser.add_argument(
'--skipfill', action='store_true',
help='Skip any packets marked as containing fill in the RDR')
"""
def timestamp(v): def timestamp(v):
return datetime.strptime(v, '%Y-%m-%d %H:%M:%S') return datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
parser.add_argument( parser.add_argument(
...@@ -52,17 +47,8 @@ def main(): ...@@ -52,17 +47,8 @@ def main():
def remove_files(files): def remove_files(files):
[os.remove(f) for f in files] [os.remove(f) for f in files]
science_products = {
'RNSCA-RVIRS': '0826VIIRSSCIENCE',
'RCRIS-RNSCA': '1289CRISSCIENCEA',
'RATMS-RNSCA': '0515ATMSSCIENCEA'}
file_type = os.path.basename(args.rdr[0]).split('_')[0]
LOG.info('File type %s', file_type)
for filepath in args.rdr: for filepath in args.rdr:
dirname, rdrname = os.path.split(filepath) dirname, rdrname = os.path.split(filepath)
if not rdrname.startswith(file_type):
raise ValueError('All rdrs must have same file type', rdrname)
LOG.info("dumping %s", filepath) LOG.info("dumping %s", filepath)
jpssrdr.write_rdr_datasets( jpssrdr.write_rdr_datasets(
filepath, filepath,
...@@ -79,11 +65,22 @@ def main(): ...@@ -79,11 +65,22 @@ def main():
product = '{:04d}AAAAAAAAAAA'.format(apid) product = '{:04d}AAAAAAAAAAA'.format(apid)
pdsname = pdsfilename(product) pdsname = pdsfilename(product)
LOG.info("merging apid %d to %s", apid, pdsname) LOG.info("merging apid %d to %s", apid, pdsname)
streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs] # there are potentially a very large number of inputs (RNSCA) so
merge.merge(streams, output=open(pdsname, 'wb'), trunc_to=interval) # make sure to keep track of files so we can explicitly close them
files = [open(f, 'rb') for f in inputs]
streams = [stream.jpss_packet_stream(f) for f in files]
with open(pdsname, 'wb') as dest:
merge.merge(streams, output=dest, trunc_to=interval)
for f in files:
f.close()
remove_files(inputs) remove_files(inputs)
science_products = {
'RNSCA-RVIRS': '0826VIIRSSCIENCE',
'RCRIS-RNSCA': '1289CRISSCIENCEA',
'RATMS-RNSCA': '0515ATMSSCIENCEA'}
if args.science: if args.science:
file_type = os.path.basename(args.rdr[0]).split('_')[0]
if file_type not in science_products: if file_type not in science_products:
parser.exit(1, '%s is not a supported science file type\n' % file_type) parser.exit(1, '%s is not a supported science file type\n' % file_type)
inputs = sorted(glob.glob('*.science.pkts')) inputs = sorted(glob.glob('*.science.pkts'))
...@@ -91,7 +88,8 @@ def main(): ...@@ -91,7 +88,8 @@ def main():
pdsname = pdsfilename(product) pdsname = pdsfilename(product)
LOG.info("merging %s to %s", product, pdsname) LOG.info("merging %s to %s", product, pdsname)
streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs] streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs]
merge.merge(streams, output=open(pdsname, 'wb'), trunc_to=interval) with open(pdsname, 'wb') as dest:
merge.merge(streams, output=dest, trunc_to=interval)
remove_files(inputs) remove_files(inputs)
......
...@@ -163,6 +163,7 @@ def _find_science_group(fobj): ...@@ -163,6 +163,7 @@ def _find_science_group(fobj):
def _rdrs_for_packet_dataset(group): def _rdrs_for_packet_dataset(group):
rdrs = []
if group: if group:
for name, buf in _generate_packet_datasets(group): for name, buf in _generate_packet_datasets(group):
if buf.shape[0] < c.sizeof(StaticHeader): if buf.shape[0] < c.sizeof(StaticHeader):
...@@ -170,16 +171,17 @@ def _rdrs_for_packet_dataset(group): ...@@ -170,16 +171,17 @@ def _rdrs_for_packet_dataset(group):
continue continue
header = StaticHeader.from_buffer(buf) header = StaticHeader.from_buffer(buf)
apids = _read_apid_list(header, buf) apids = _read_apid_list(header, buf)
yield CommonRdr(buf, header, list(apids)) rdrs.append(CommonRdr(buf, header, list(apids)))
return rdrs
def rdr_datasets(filepath): def rdr_datasets(filepath):
fobj = H5File(filepath) fobj = H5File(filepath)
rdr = {} rdr = {}
group = _find_science_group(fobj) rdr['science'] = _rdrs_for_packet_dataset(_find_science_group(fobj))
rdr['science'] = _rdrs_for_packet_dataset(group) rdr['ancillary'] = _rdrs_for_packet_dataset(
group = fobj.get('/All_Data/SPACECRAFT-DIARY-RDR_All') fobj.get('/All_Data/SPACECRAFT-DIARY-RDR_All'))
rdr['ancillary'] = _rdrs_for_packet_dataset(group) fobj.close()
return rdr return rdr
...@@ -234,12 +236,12 @@ def write_rdr_datasets(filepath, science=True, ancillary=True, skipfill=False): ...@@ -234,12 +236,12 @@ def write_rdr_datasets(filepath, science=True, ancillary=True, skipfill=False):
if ancillary: if ancillary:
for idx, rdr in enumerate(rdrs['ancillary']): for idx, rdr in enumerate(rdrs['ancillary']):
packets = {a.value: rdr.packets_for_apid(a.value) packets = {a.value: rdr.packets_for_apid(a)
for a in rdr.apids} for a in rdr.apids}
for apid, packets in packets.items(): for apid, pkts in packets.items():
LOG.debug( LOG.debug(
'writing ancillary gran %d %s-%s-%s %d', 'writing ancillary gran %d %s-%s-%s %d',
idx, rdr.header.satellite, rdr.header.sensor, idx, rdr.header.satellite, rdr.header.sensor,
rdr.header.type_id, apid.value) rdr.header.type_id, apid)
with open('{}.anc{}.pkts'.format(rdrname, apid), 'wb') as dest: with open('{}.anc{}.pkts'.format(rdrname, apid), 'wb') as dest:
_write_packets(packets, dest) _write_packets(pkts, dest, skipfill)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment