diff --git a/edosl0util/cli.py b/edosl0util/cli.py deleted file mode 100644 index eff03b90cd61cc3df92605145256b4eb3e991466..0000000000000000000000000000000000000000 --- a/edosl0util/cli.py +++ /dev/null @@ -1,194 +0,0 @@ -# encoding: utf-8 -""" -Console script entry points for CLI tools. -""" -__copyright__ = "Copyright (C) 2015 University of Wisconsin SSEC. All rights reserved." - -import io -import os -import glob -import logging -from datetime import datetime, timedelta - -from edosl0util import split, trunc, stream, merge, jpssrdr - -LOG = logging - - -def _timestamp(v): - return datetime.strptime(v, '%Y-%m-%d %H:%M:%S') - - -def _default_parser(): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('-v', '--verbose', action='store_true') - return parser - - -def _configure_logging(args): - level = logging.DEBUG if getattr(args, 'verbose', False) else logging.WARN - logging.basicConfig(level=level, format='%(message)s') - - -def cmd_trunc(): - parser = _default_parser() - parser.add_argument('-o', '--output') - parser.add_argument('filename') - parser.add_argument('start', type=_timestamp, help='YYYY-MM-DD HH:MM:SS') - parser.add_argument('end', type=_timestamp, help='YYYY-MM-DD HH:MM:SS') - args = parser.parse_args() - _configure_logging(args) - - output = args.output or os.path.basename(args.filename) + '.trunc' - with io.open(output, 'wb') as fptr: - for pkt in trunc.trunc_file(args.filename, args.start, args.end): - fptr.write(pkt.bytes()) - - -def cmd_split(): - parser = _default_parser() - parser.add_argument('--minutes', type=int, default=6) - parser.add_argument('filepath') - args = parser.parse_args() - _configure_logging(args) - - for stamp, fpath in split.split_file(args.filepath, args.minutes, os.getcwd()): - LOG.info('wrote bucket {} to {}'.format(stamp.isoformat(), fpath)) - - -def cmd_info(): - parser = _default_parser() - parser.add_argument('-a', '--aqua', action='store_true') - parser.add_argument('filepath') - args = parser.parse_args() - _configure_logging(args) - - num_packets = 0 - if not args.aqua: - packets = stream.jpss_packet_stream(io.open(args.filepath, 'rb')) - else: - packets = stream.aqua_packet_stream(io.open(args.filepath, 'rb')) - while True: - try: - packets.next() - num_packets += 1 - except stream.PacketTooShort as err: - LOG.warn("corrupt packet stream after %d packets: %s", - num_packets, err) - break - except StopIteration: - break - total = 0 - first, last, info = packets.info() - LOG.info("First: %s", first) - LOG.info("Last: %s", last) - for apid, dat in info.items(): - total += dat['count'] - LOG.info("%d: count=%d missing=%d", apid, dat['count'], dat['num_missing']) - LOG.info("{} total packets".format(total)) - - -def cmd_merge(): - parser = _default_parser() - parser.add_argument('-o', '--output', default='out.pds') - def interval(v): - dt = lambda v: datetime.strptime(v, '%Y-%m-%d %H:%M:%S') - return [dt(x) for x in v.split(',')] - parser.add_argument( - '-t', '--trunc-to', type=interval, - help=('Truncate to the interval given as coma separated timestamps of ' - 'the format YYYY-MM-DD HH:MM:SS. The begin time is inclusive, the ' - 'end time is exclusive.')) - parser.add_argument('pds', nargs='+') - args = parser.parse_args() - - _configure_logging(args) - - streams = [stream.jpss_packet_stream(io.open(f, 'rb')) for f in args.pds] - merge.merge( - streams, output=io.open(args.output, 'wb'), trunc_to=args.trunc_to) - - -def cmd_rdr2l0(): - """ - Create a NASA Level0 PDS file from a set of NOAA IDPS RDR files. The output - PDS file will have packets sorted by time and apid. Any packets marked in - the RDR PacketTracker as containing fill are removed. - """ - # XXX: This currently uses standard CCSDS packet merging that does not have - # any context regarding fill packets. In the future if it is desired to - # keep fill packets a new RDR specific merge will be required. - parser = _default_parser() - parser.description = cmd_rdr2l0.__doc__ - parser.add_argument( - '--minutes', type=int, default=120, - help=('Output size in minutes. The resulting file will be truncated ' - 'to created + minutes.')) - parser.add_argument( - '--science', action='store_true', - help='Dump science dataset') - parser.add_argument( - '--ancillary', action='store_true', - help='Dump spacecraft ancillary datasets') - """ - parser.add_argument( - '--skipfill', action='store_true', - help='Skip any packets marked as containing fill in the RDR') - """ - def timestamp(v): - return datetime.strptime(v, '%Y-%m-%d %H:%M:%S') - parser.add_argument( - '--created', type=timestamp, default=datetime.utcnow(), - help=('Time to use for creation time (yyyy-mm-dd hh:mm:ss).')) - parser.add_argument('rdr', nargs='+') - args = parser.parse_args() - _configure_logging(args) - - def pdsfilename(product): - return 'P157{}{:%y%j%H%M%S}001.PDX'.format(product, args.created) - - def remove_files(files): - [os.remove(f) for f in files] - - science_products = { - 'RNSCA-RVIRS': '0826VIIRSSCIENCEAT', - 'RCRIS-RNSCA': '1289CRISSCIENCEAAT', - 'RATMS-RNSCA': '0515ATMSSCIENCEAAT'} - - file_type = os.path.basename(args.rdr[0]).split('_')[0] - LOG.info('File type %s', file_type) - for filepath in args.rdr: - dirname, rdrname = os.path.split(filepath) - if not rdrname.startswith(file_type): - raise ValueError('All rdrs must have same file type', rdrname) - LOG.info("dumping %s", filepath) - jpssrdr.write_rdr_datasets( - filepath, - science=args.science, - ancillary=args.ancillary, - skipfill=True) - - interval = [args.created, args.created + timedelta(minutes=args.minutes, microseconds=-1)] - LOG.info("merge and truncate to %s", interval) - - if args.ancillary: - for apid in (0, 8, 11): - inputs = sorted(glob.glob('*.anc{:d}.pkts'.format(apid))) - product = '{:04d}AAAAAAAAAAAAA'.format(apid) - pdsname = pdsfilename(product) - LOG.info("merging apid %d to %s", apid, pdsname) - streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs] - merge.merge(streams, output=open(pdsname, 'wb'), trunc_to=interval) - remove_files(inputs) - - if args.science: - if file_type not in science_products: - parser.exit(1, '%s is not a supported science file type\n' % file_type) - inputs = sorted(glob.glob('*.science.pkts')) - product = science_products[file_type] - pdsname = pdsfilename(product) - LOG.info("merging %s to %s", product, pdsname) - streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs] - merge.merge(streams, output=open(pdsname, 'wb'), trunc_to=interval) - remove_files(inputs) diff --git a/edosl0util/cli/__init__.py b/edosl0util/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/edosl0util/cli/info.py b/edosl0util/cli/info.py new file mode 100644 index 0000000000000000000000000000000000000000..ead8aa746ebda0fafd5c1bee3d425f38b76af0fa --- /dev/null +++ b/edosl0util/cli/info.py @@ -0,0 +1,42 @@ +import io +import logging +from edosl0util.cli import util +from edosl0util import stream + +LOG = logging + + +def main(): + parser = util.default_parser() + parser.add_argument('-a', '--aqua', action='store_true') + parser.add_argument('filepath') + args = parser.parse_args() + util.configure_logging(args) + + num_packets = 0 + if not args.aqua: + packets = stream.jpss_packet_stream(io.open(args.filepath, 'rb')) + else: + packets = stream.aqua_packet_stream(io.open(args.filepath, 'rb')) + while True: + try: + packets.next() + num_packets += 1 + except stream.PacketTooShort as err: + LOG.warn("corrupt packet stream after %d packets: %s", + num_packets, err) + break + except StopIteration: + break + total = 0 + first, last, info = packets.info() + LOG.info("First: %s", first) + LOG.info("Last: %s", last) + for apid, dat in info.items(): + total += dat['count'] + LOG.info("%d: count=%d missing=%d", apid, dat['count'], dat['num_missing']) + LOG.info("{} total packets".format(total)) + + +if __name__ == '__main__': + main() diff --git a/edosl0util/cli/merge.py b/edosl0util/cli/merge.py new file mode 100644 index 0000000000000000000000000000000000000000..44ddf1d4efc4398ec106e706f1b8c982d578a9b1 --- /dev/null +++ b/edosl0util/cli/merge.py @@ -0,0 +1,29 @@ +import io +from datetime import datetime +from edosl0util.cli import util +from edosl0util import merge, stream + + +def main(): + parser = util.default_parser() + parser.add_argument('-o', '--output', default='out.pds') + def interval(v): + dt = lambda v: datetime.strptime(v, '%Y-%m-%d %H:%M:%S') + return [dt(x) for x in v.split(',')] + parser.add_argument( + '-t', '--trunc-to', type=interval, + help=('Truncate to the interval given as coma separated timestamps of ' + 'the format YYYY-MM-DD HH:MM:SS. The begin time is inclusive, the ' + 'end time is exclusive.')) + parser.add_argument('pds', nargs='+') + args = parser.parse_args() + + util.configure_logging(args) + + streams = [stream.jpss_packet_stream(io.open(f, 'rb')) for f in args.pds] + merge.merge( + streams, output=io.open(args.output, 'wb'), trunc_to=args.trunc_to) + + +if __name__ == '__main__': + main() diff --git a/edosl0util/cli/rdr2l0.py b/edosl0util/cli/rdr2l0.py new file mode 100644 index 0000000000000000000000000000000000000000..e1aaca44a376550ec6df0c5633ef6738bcb97b52 --- /dev/null +++ b/edosl0util/cli/rdr2l0.py @@ -0,0 +1,99 @@ +""" +Create a NASA Level0 PDS file from a set of NOAA IDPS RDR files. The output +PDS file will have packets sorted by time and apid. Any packets marked in +the RDR PacketTracker as containing fill are removed. +""" +__copyright__ = "Copyright (C) 2015 University of Wisconsin SSEC. All rights reserved." + +import os +import glob +import logging +from datetime import datetime, timedelta + +from edosl0util import stream, merge, jpssrdr +from edosl0util.cli import util + +LOG = logging + + +def main(): + # XXX: This currently uses standard CCSDS packet merging that does not have + # any context regarding fill packets. In the future if it is desired to + # keep fill packets a new RDR specific merge will be required. + parser = util.default_parser() + parser.description = __doc__ + parser.add_argument( + '--minutes', type=int, default=120, + help=('Output size in minutes. The resulting file will be truncated ' + 'to created + minutes.')) + parser.add_argument( + '--science', action='store_true', + help='Dump science dataset') + parser.add_argument( + '--ancillary', action='store_true', + help='Dump spacecraft ancillary datasets') + """ + parser.add_argument( + '--skipfill', action='store_true', + help='Skip any packets marked as containing fill in the RDR') + """ + def timestamp(v): + return datetime.strptime(v, '%Y-%m-%d %H:%M:%S') + parser.add_argument( + '--created', type=timestamp, default=datetime.utcnow(), + help=('Time to use for creation time (yyyy-mm-dd hh:mm:ss).')) + parser.add_argument('rdr', nargs='+') + args = parser.parse_args() + util.configure_logging(args) + + def pdsfilename(product): + return 'P157{}{:%y%j%H%M%S}001.PDX'.format(product, args.created) + + def remove_files(files): + [os.remove(f) for f in files] + + science_products = { + 'RNSCA-RVIRS': '0826VIIRSSCIENCEAT', + 'RCRIS-RNSCA': '1289CRISSCIENCEAAT', + 'RATMS-RNSCA': '0515ATMSSCIENCEAAT'} + + file_type = os.path.basename(args.rdr[0]).split('_')[0] + LOG.info('File type %s', file_type) + for filepath in args.rdr: + dirname, rdrname = os.path.split(filepath) + if not rdrname.startswith(file_type): + raise ValueError('All rdrs must have same file type', rdrname) + LOG.info("dumping %s", filepath) + jpssrdr.write_rdr_datasets( + filepath, + science=args.science, + ancillary=args.ancillary, + skipfill=True) + + interval = [args.created, args.created + timedelta(minutes=args.minutes, microseconds=-1)] + LOG.info("merge and truncate to %s", interval) + + if args.ancillary: + for apid in (0, 8, 11): + inputs = sorted(glob.glob('*.anc{:d}.pkts'.format(apid))) + product = '{:04d}AAAAAAAAAAAAA'.format(apid) + pdsname = pdsfilename(product) + LOG.info("merging apid %d to %s", apid, pdsname) + streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs] + merge.merge(streams, output=open(pdsname, 'wb'), trunc_to=interval) + remove_files(inputs) + + if args.science: + if file_type not in science_products: + parser.exit(1, '%s is not a supported science file type\n' % file_type) + inputs = sorted(glob.glob('*.science.pkts')) + product = science_products[file_type] + pdsname = pdsfilename(product) + LOG.info("merging %s to %s", product, pdsname) + streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs] + merge.merge(streams, output=open(pdsname, 'wb'), trunc_to=interval) + remove_files(inputs) + + +if __name__ == '__main__': + main() diff --git a/edosl0util/cli/split.py b/edosl0util/cli/split.py new file mode 100644 index 0000000000000000000000000000000000000000..6f7e8aa7d3d3779a2fbe775bd542fc7deb09a368 --- /dev/null +++ b/edosl0util/cli/split.py @@ -0,0 +1,21 @@ +import os +import logging +from edosl0util.cli import util +from edosl0util import split + +LOG = logging + + +def main(): + parser = util.default_parser() + parser.add_argument('--minutes', type=int, default=6) + parser.add_argument('filepath') + args = parser.parse_args() + util.configure_logging(args) + + for stamp, fpath in split.split_file(args.filepath, args.minutes, os.getcwd()): + LOG.info('wrote bucket {} to {}'.format(stamp.isoformat(), fpath)) + + +if __name__ == '__main__': + main() diff --git a/edosl0util/cli/trunc.py b/edosl0util/cli/trunc.py new file mode 100644 index 0000000000000000000000000000000000000000..85af935d1367c0cbc9d149280d3e90f7ceffc655 --- /dev/null +++ b/edosl0util/cli/trunc.py @@ -0,0 +1,23 @@ +import os +import io +from edosl0util.cli import util +from edosl0util import trunc + + +def main(): + parser = util.default_parser() + parser.add_argument('-o', '--output') + parser.add_argument('filename') + parser.add_argument('start', type=util.timestamp, help='YYYY-MM-DD HH:MM:SS') + parser.add_argument('end', type=util.timestamp, help='YYYY-MM-DD HH:MM:SS') + args = parser.parse_args() + util.configure_logging(args) + + output = args.output or os.path.basename(args.filename) + '.trunc' + with io.open(output, 'wb') as fptr: + for pkt in trunc.trunc_file(args.filename, args.start, args.end): + fptr.write(pkt.bytes()) + + +if __name__ == '__main__': + main() diff --git a/edosl0util/cli/util.py b/edosl0util/cli/util.py new file mode 100644 index 0000000000000000000000000000000000000000..ed6cc3496296396e0573ef52b091a5f4a0445114 --- /dev/null +++ b/edosl0util/cli/util.py @@ -0,0 +1,18 @@ +import logging +import argparse +from datetime import datetime + + +def timestamp(v): + return datetime.strptime(v, '%Y-%m-%d %H:%M:%S') + + +def default_parser(): + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + return parser + + +def configure_logging(args): + level = logging.DEBUG if getattr(args, 'verbose', False) else logging.WARN + logging.basicConfig(level=level, format='%(message)s') diff --git a/setup.py b/setup.py index 1d38336b9ec22dc927015e36c830e651faeb6b2e..90f35848d764f2e45532372d34d088e1e8f5d961 100644 --- a/setup.py +++ b/setup.py @@ -13,10 +13,10 @@ setup( ], entry_points=""" [console_scripts] - edosl0split = edosl0util.cli:cmd_split - edosl0trunc = edosl0util.cli:cmd_trunc - edosl0info = edosl0util.cli:cmd_info - edosl0merge = edosl0util.cli:cmd_merge - rdr2l0 = edosl0util.cli:cmd_rdr2l0 + edosl0split = edosl0util.cli.split:main + edosl0trunc = edosl0util.cli.trunc:main + edosl0info = edosl0util.cli.info:main + edosl0merge = edosl0util.cli.merge:main + rdr2l0 = edosl0util.cli.rdr2l0:main """ )