diff --git a/build.sh b/build.sh
deleted file mode 100755
index cbdc1088afd366ae65c25a16525addd8bde54481..0000000000000000000000000000000000000000
--- a/build.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-
-function write_version_file() {
-# pull version out of setup.py
-version=$(grep version setup.py | sed "s,.*'\(.*\)'.*$,\1,g")
-cat <<EOF > edosl0util/version.txt
-__version__ = '${version}'
-__githash__ = '$(git log --format="%H" | head -n 1)'
-__patch__ = """$(git diff)"""
-EOF
-}
-
-if [ -z $1 ]
-then
-    echo "USAGE: $0 sdist"
-fi
-
-case $1 in
-    sdist)
-        write_version_file
-        python setup.py sdist $@
-        ;;
-    *)
-        python setup.py $@
-        ;;
-esac
diff --git a/edosl0util/__init__.py b/edosl0util/__init__.py
index 8b93517946d7ae814cf295ccc3367503f2e0ca51..27a54a5f92d421b1b94bb5d942274be2af9ca4dd 100644
--- a/edosl0util/__init__.py
+++ b/edosl0util/__init__.py
@@ -1,6 +1,10 @@
+### PyVer Loader Start
 import os
-version = os.path.join(os.path.split(__file__)[0], 'version.txt')
-if os.path.exists(version):
-    execfile(version)
-del os
-del version
+verfile = 'edosl0util/version.txt'
+if os.path.exists(verfile):
+    execfile(verfile)
+else:
+    __version__ = '<unknown>'
+    __githash__ = ''
+del verfile
+### PyVer Loader End
diff --git a/edosl0util/cli.py b/edosl0util/cli.py
deleted file mode 100644
index 2532f5015e57c01196fb8da15036b9ba892c446c..0000000000000000000000000000000000000000
--- a/edosl0util/cli.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# encoding: utf-8
-"""
-Console script entry points for CLI tools.
-"""
-__copyright__ = "Copyright (C) 2015 University of Wisconsin SSEC. All rights reserved."
-
-import io
-import os
-import logging
-from datetime import datetime
-
-from edosl0util import split, trunc, stream, merge, jpssrdr
-
-LOG = logging
-
-
-def _timestamp(v):
-    return datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
-
-
-def _default_parser():
-    import argparse
-    parser = argparse.ArgumentParser()
-    parser.add_argument('-v', '--verbose', action='store_true')
-    return parser
-
-
-def _configure_logging(args):
-    level = logging.DEBUG if getattr(args, 'verbose', False) else logging.WARN
-    logging.basicConfig(level=level, format='%(message)s')
-
-
-def cmd_trunc():
-    parser = _default_parser()
-    parser.add_argument('-o', '--output')
-    parser.add_argument('filename')
-    parser.add_argument('start', type=_timestamp, help='YYYY-MM-DD HH:MM:SS')
-    parser.add_argument('end', type=_timestamp, help='YYYY-MM-DD HH:MM:SS')
-    args = parser.parse_args()
-    _configure_logging(args)
-
-    output = args.output or os.path.basename(args.filename) + '.trunc'
-    with io.open(output, 'wb') as fptr:
-        for pkt in trunc.trunc_file(args.filename, args.start, args.end):
-            fptr.write(pkt.bytes())
-
-
-def cmd_split():
-    parser = _default_parser()
-    parser.add_argument('--minutes', type=int, default=6)
-    parser.add_argument('filepath')
-    args = parser.parse_args()
-    _configure_logging(args)
-
-    for stamp, fpath in split.split_file(args.filepath, args.minutes, os.getcwd()):
-        LOG.info('wrote bucket {} to {}'.format(stamp.isoformat(), fpath))
-
-
-def cmd_info():
-    parser = _default_parser()
-    parser.add_argument('-a', '--aqua', action='store_true')
-    parser.add_argument('filepath')
-    args = parser.parse_args()
-    _configure_logging(args)
-
-    num_packets = 0
-    if not args.aqua:
-        packets = stream.jpss_packet_stream(io.open(args.filepath, 'rb'))
-    else:
-        packets = stream.aqua_packet_stream(io.open(args.filepath, 'rb'))
-    while True:
-        try:
-            packets.next()
-            num_packets += 1
-        except stream.PacketTooShort as err:
-            LOG.warn("corrupt packet stream after %d packets: %s",
-                     num_packets, err)
-            break
-        except StopIteration:
-            break
-    total = 0
-    first, last, info = packets.info()
-    LOG.info("First: %s", first)
-    LOG.info("Last: %s", last)
-    for apid, dat in info.items():
-        total += dat['count']
-        LOG.info("%d: count=%d missing=%d", apid, dat['count'], dat['num_missing'])
-    LOG.info("{} total packets".format(total))
-
-
-def cmd_merge():
-    parser = _default_parser()
-    parser.add_argument('-o', '--output', default='out.pds')
-    def interval(v):
-        dt = lambda v: datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
-        return [dt(x) for x in v.split(',')]
-    parser.add_argument(
-        '-t', '--trunc-to', type=interval,
-        help=('Truncate to the interval given as coma separated timestamps of '
-              'the format YYYY-MM-DD HH:MM:SS. The begin time is inclusive, the '
-              'end time is exclusive.'))
-    parser.add_argument('pds', nargs='+')
-    args = parser.parse_args()
-
-    _configure_logging(args)
-
-    streams = [stream.jpss_packet_stream(io.open(f, 'rb')) for f in args.pds]
-    merge.merge(
-        streams, output=io.open(args.output, 'wb'), trunc_to=args.trunc_to)
-
-
-def cmd_rdr2l0():
-    """
-    Extract CCSDS packets from a JPSS RDR. They are extracted in the order in
-    which they are listed in the APID list and in packet tracker order, i.e.,
-    Not in time/apid order. Files will be named using the input h5 name with
-    .science.pds and .diary[0,8,11].pds appended.
-    """
-    parser = _default_parser()
-    parser.description = cmd_rdr2l0.__doc__
-    parser.add_argument(
-        '-d', '--diary', action='store_true',
-        help='Write diary[0,8,11].pds if available')
-    parser.add_argument(
-        '-f', '--skipfill', action='store_true',
-        help=('Skip any packets that are marked in the packet tracker as '
-              'containing fill'))
-    parser.add_argument('rdr')
-    args = parser.parse_args()
-    _configure_logging(args)
-
-    jpssrdr.write_rdr_datasets(args.rdr, args.diary)
diff --git a/edosl0util/cli/__init__.py b/edosl0util/cli/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/edosl0util/cli/info.py b/edosl0util/cli/info.py
new file mode 100644
index 0000000000000000000000000000000000000000..ead8aa746ebda0fafd5c1bee3d425f38b76af0fa
--- /dev/null
+++ b/edosl0util/cli/info.py
@@ -0,0 +1,42 @@
+import io
+import logging
+from edosl0util.cli import util
+from edosl0util import stream
+
+LOG = logging
+
+
+def main():
+    parser = util.default_parser()
+    parser.add_argument('-a', '--aqua', action='store_true')
+    parser.add_argument('filepath')
+    args = parser.parse_args()
+    util.configure_logging(args)
+
+    num_packets = 0
+    if not args.aqua:
+        packets = stream.jpss_packet_stream(io.open(args.filepath, 'rb'))
+    else:
+        packets = stream.aqua_packet_stream(io.open(args.filepath, 'rb'))
+    while True:
+        try:
+            packets.next()
+            num_packets += 1
+        except stream.PacketTooShort as err:
+            LOG.warn("corrupt packet stream after %d packets: %s",
+                     num_packets, err)
+            break
+        except StopIteration:
+            break
+    total = 0
+    first, last, info = packets.info()
+    LOG.info("First: %s", first)
+    LOG.info("Last: %s", last)
+    for apid, dat in info.items():
+        total += dat['count']
+        LOG.info("%d: count=%d missing=%d", apid, dat['count'], dat['num_missing'])
+    LOG.info("{} total packets".format(total))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/edosl0util/cli/merge.py b/edosl0util/cli/merge.py
new file mode 100644
index 0000000000000000000000000000000000000000..44ddf1d4efc4398ec106e706f1b8c982d578a9b1
--- /dev/null
+++ b/edosl0util/cli/merge.py
@@ -0,0 +1,29 @@
+import io
+from datetime import datetime
+from edosl0util.cli import util
+from edosl0util import merge, stream
+
+
+def main():
+    parser = util.default_parser()
+    parser.add_argument('-o', '--output', default='out.pds')
+    def interval(v):
+        dt = lambda v: datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
+        return [dt(x) for x in v.split(',')]
+    parser.add_argument(
+        '-t', '--trunc-to', type=interval,
+        help=('Truncate to the interval given as coma separated timestamps of '
+              'the format YYYY-MM-DD HH:MM:SS. The begin time is inclusive, the '
+              'end time is exclusive.'))
+    parser.add_argument('pds', nargs='+')
+    args = parser.parse_args()
+
+    util.configure_logging(args)
+
+    streams = [stream.jpss_packet_stream(io.open(f, 'rb')) for f in args.pds]
+    merge.merge(
+        streams, output=io.open(args.output, 'wb'), trunc_to=args.trunc_to)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/edosl0util/cli/rdr2l0.py b/edosl0util/cli/rdr2l0.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1aaca44a376550ec6df0c5633ef6738bcb97b52
--- /dev/null
+++ b/edosl0util/cli/rdr2l0.py
@@ -0,0 +1,99 @@
+"""
+Create a NASA Level0 PDS file from a set of NOAA IDPS RDR files. The output
+PDS file will have packets sorted by time and apid. Any packets marked in
+the RDR PacketTracker as containing fill are removed.
+"""
+__copyright__ = "Copyright (C) 2015 University of Wisconsin SSEC. All rights reserved."
+
+import os
+import glob
+import logging
+from datetime import datetime, timedelta
+
+from edosl0util import stream, merge, jpssrdr
+from edosl0util.cli import util
+
+LOG = logging
+
+
+def main():
+    # XXX: This currently uses standard CCSDS packet merging that does not have
+    #      any context regarding fill packets. In the future if it is desired to
+    #      keep fill packets a new RDR specific merge will be required.
+    parser = util.default_parser()
+    parser.description = __doc__
+    parser.add_argument(
+        '--minutes', type=int, default=120,
+        help=('Output size in minutes. The resulting file will be truncated '
+              'to created + minutes.'))
+    parser.add_argument(
+        '--science', action='store_true',
+        help='Dump science dataset')
+    parser.add_argument(
+        '--ancillary', action='store_true',
+        help='Dump spacecraft ancillary datasets')
+    """
+    parser.add_argument(
+        '--skipfill', action='store_true',
+        help='Skip any packets marked as containing fill in the RDR')
+    """
+    def timestamp(v):
+        return datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
+    parser.add_argument(
+        '--created', type=timestamp, default=datetime.utcnow(),
+        help=('Time to use for creation time (yyyy-mm-dd hh:mm:ss).'))
+    parser.add_argument('rdr', nargs='+')
+    args = parser.parse_args()
+    util.configure_logging(args)
+
+    def pdsfilename(product):
+        return 'P157{}{:%y%j%H%M%S}001.PDX'.format(product, args.created)
+
+    def remove_files(files):
+        [os.remove(f) for f in files]
+
+    science_products = {
+        'RNSCA-RVIRS': '0826VIIRSSCIENCEAT',
+        'RCRIS-RNSCA': '1289CRISSCIENCEAAT',
+        'RATMS-RNSCA': '0515ATMSSCIENCEAAT'}
+
+    file_type = os.path.basename(args.rdr[0]).split('_')[0]
+    LOG.info('File type %s', file_type)
+    for filepath in args.rdr:
+        dirname, rdrname = os.path.split(filepath)
+        if not rdrname.startswith(file_type):
+            raise ValueError('All rdrs must have same file type', rdrname)
+        LOG.info("dumping %s", filepath)
+        jpssrdr.write_rdr_datasets(
+            filepath,
+            science=args.science,
+            ancillary=args.ancillary,
+            skipfill=True)
+
+    interval = [args.created, args.created + timedelta(minutes=args.minutes, microseconds=-1)]
+    LOG.info("merge and truncate to %s", interval)
+
+    if args.ancillary:
+        for apid in (0, 8, 11):
+            inputs = sorted(glob.glob('*.anc{:d}.pkts'.format(apid)))
+            product = '{:04d}AAAAAAAAAAAAA'.format(apid)
+            pdsname = pdsfilename(product)
+            LOG.info("merging apid %d to %s", apid, pdsname)
+            streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs]
+            merge.merge(streams, output=open(pdsname, 'wb'), trunc_to=interval)
+            remove_files(inputs)
+
+    if args.science:
+        if file_type not in science_products:
+            parser.exit(1, '%s is not a supported science file type\n' % file_type)
+        inputs = sorted(glob.glob('*.science.pkts'))
+        product = science_products[file_type]
+        pdsname = pdsfilename(product)
+        LOG.info("merging %s to %s", product, pdsname)
+        streams = [stream.jpss_packet_stream(open(f, 'rb')) for f in inputs]
+        merge.merge(streams, output=open(pdsname, 'wb'), trunc_to=interval)
+        remove_files(inputs)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/edosl0util/cli/split.py b/edosl0util/cli/split.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f7e8aa7d3d3779a2fbe775bd542fc7deb09a368
--- /dev/null
+++ b/edosl0util/cli/split.py
@@ -0,0 +1,21 @@
+import os
+import logging
+from edosl0util.cli import util
+from edosl0util import split
+
+LOG = logging
+
+
+def main():
+    parser = util.default_parser()
+    parser.add_argument('--minutes', type=int, default=6)
+    parser.add_argument('filepath')
+    args = parser.parse_args()
+    util.configure_logging(args)
+
+    for stamp, fpath in split.split_file(args.filepath, args.minutes, os.getcwd()):
+        LOG.info('wrote bucket {} to {}'.format(stamp.isoformat(), fpath))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/edosl0util/cli/trunc.py b/edosl0util/cli/trunc.py
new file mode 100644
index 0000000000000000000000000000000000000000..85af935d1367c0cbc9d149280d3e90f7ceffc655
--- /dev/null
+++ b/edosl0util/cli/trunc.py
@@ -0,0 +1,23 @@
+import os
+import io
+from edosl0util.cli import util
+from edosl0util import trunc
+
+
+def main():
+    parser = util.default_parser()
+    parser.add_argument('-o', '--output')
+    parser.add_argument('filename')
+    parser.add_argument('start', type=util.timestamp, help='YYYY-MM-DD HH:MM:SS')
+    parser.add_argument('end', type=util.timestamp, help='YYYY-MM-DD HH:MM:SS')
+    args = parser.parse_args()
+    util.configure_logging(args)
+
+    output = args.output or os.path.basename(args.filename) + '.trunc'
+    with io.open(output, 'wb') as fptr:
+        for pkt in trunc.trunc_file(args.filename, args.start, args.end):
+            fptr.write(pkt.bytes())
+
+
+if __name__ == '__main__':
+    main()
diff --git a/edosl0util/cli/util.py b/edosl0util/cli/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed6cc3496296396e0573ef52b091a5f4a0445114
--- /dev/null
+++ b/edosl0util/cli/util.py
@@ -0,0 +1,18 @@
+import logging
+import argparse
+from datetime import datetime
+
+
+def timestamp(v):
+    return datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
+
+
+def default_parser():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    return parser
+
+
+def configure_logging(args):
+    level = logging.DEBUG if getattr(args, 'verbose', False) else logging.WARN
+    logging.basicConfig(level=level, format='%(message)s')
diff --git a/edosl0util/jpssrdr.py b/edosl0util/jpssrdr.py
index a217fae272350d01a365444e5c4998abe5ffcdda..62e9714bccf679ca251dd4e29c722b13e488be52 100644
--- a/edosl0util/jpssrdr.py
+++ b/edosl0util/jpssrdr.py
@@ -12,7 +12,6 @@ __copyright__ = "Copyright (C) 2015 University of Wisconsin SSEC. All rights res
 import os
 import logging
 import ctypes as c
-from datetime import datetime
 from collections import namedtuple
 
 import numpy as np
@@ -177,7 +176,7 @@ def rdr_datasets(filepath):
     group = _find_science_group(fobj)
     rdr['science'] = _rdrs_for_packet_dataset(group)
     group = fobj.get('/All_Data/SPACECRAFT-DIARY-RDR_All')
-    rdr['diary'] = _rdrs_for_packet_dataset(group)
+    rdr['ancillary'] = _rdrs_for_packet_dataset(group)
     return rdr
 
 
@@ -211,25 +210,33 @@ def sort_packets_by_apid(packets, order=None):
         return sorted(packets, key=lambda p: p.packet.apid)
 
 
-def write_rdr_datasets(filepath, diary=True, skipfill=False):
-    def write_packets(pkts, dest):
-        with open(dest, 'wb') as fptr:
-            for pkt in pkts:
-                if pkt.tracker.fill_percent != 0 and skipfill:
-                    continue
-                fptr.write(pkt.packet)
+def _write_packets(pkts, dest, skipfill):
+    for pkt in pkts:
+        if pkt.tracker.fill_percent != 0 and skipfill:
+            continue
+        dest.write(pkt.packet)
+
+
+def write_rdr_datasets(filepath, science=True, ancillary=True, skipfill=False):
     rdrname = os.path.basename(filepath)
     rdrs = rdr_datasets(filepath)
-    for idx, rdr in enumerate(rdrs['science']):
-        fname = '{}.science.pds'.format(rdrname)
-        hdr = rdr.header
-        LOG.debug('writing gran %d %s-%s-%s', idx, hdr.satellite, hdr.sensor, hdr.type_id)
-        write_packets(rdr.packets(), fname)
-
-    if diary:
-        for idx, rdr in enumerate(rdrs['diary']):
-            for apid in rdr.apids:
-                fname = '{}.diary{}.pds'.format(rdrname, apid.value)
-                hdr = rdr.header
-                LOG.debug('writing gran %d %s-%s-%s %d', idx, hdr.satellite, hdr.sensor, hdr.type_id, apid.value)
-                write_packets(rdr.packets_for_apid(apid), fname)
+
+    if science:
+        dest = open('{}.science.pkts'.format(rdrname), 'wb')
+        for idx, rdr in enumerate(rdrs['science']):
+            LOG.debug(
+                'writing science gran %d %s-%s-%s',
+                idx, rdr.header.satellite, rdr.header.sensor, rdr.header.type_id)
+            _write_packets(rdr.packets(), dest, skipfill)
+
+    if ancillary:
+        for idx, rdr in enumerate(rdrs['ancillary']):
+            packets = {a.value: rdr.packets_for_apid(a.value)
+                       for a in rdr.apids}
+            for apid, packets in packets.items():
+                dest = open('{}.anc{}.pkts'.format(rdrname, apid), 'wb')
+                LOG.debug(
+                    'writing ancillary gran %d %s-%s-%s %d',
+                    idx, rdr.header.satellite, rdr.header.sensor,
+                    rdr.header.type_id, apid.value)
+                _write_packets(packets, dest)
diff --git a/setup.py b/setup.py
index 43d7489cb33258e0b2d760f8a0e9a39a13db2ca0..a41c6fb9a0695d221e022e2ec55137bfc5de1d4a 100644
--- a/setup.py
+++ b/setup.py
@@ -5,18 +5,23 @@ setup(
     author='Bruce Flynn',
     author_email='brucef@ssec.wisc.edu',
     description='Utilities for working with EDOS L0 PDS files',
-    version='0.5b',
+    version='0.6',
     zip_safe=False,
     packages=find_packages(),
+    pyver=True,
+    dependency_links=['https://sips.ssec.wisc.edu/eggs/packages'],
+    setup_requires=[
+        'PyVer',
+    ],
     install_requires=[
         'h5py',
     ],
     entry_points="""
     [console_scripts]
-    edosl0split = edosl0util.cli:cmd_split
-    edosl0trunc = edosl0util.cli:cmd_trunc
-    edosl0info = edosl0util.cli:cmd_info
-    edosl0merge = edosl0util.cli:cmd_merge
-    rdr2l0 = edosl0util.cli:cmd_rdr2l0
+    edosl0split = edosl0util.cli.split:main
+    edosl0trunc = edosl0util.cli.trunc:main
+    edosl0info = edosl0util.cli.info:main
+    edosl0merge = edosl0util.cli.merge:main
+    rdr2l0 = edosl0util.cli.rdr2l0:main
     """
 )