Skip to content
Snippets Groups Projects
Unverified Commit 33e3bd4a authored by David Hoese's avatar David Hoese
Browse files

Clean up style and rename netcdf module

parent c1f5d51e
No related branches found
No related tags found
No related merge requests found
File moved
...@@ -9,7 +9,7 @@ import time ...@@ -9,7 +9,7 @@ import time
import sys import sys
import requests import requests
from metobscommon import influxdb from metobscommon import influxdb
from aosstower.l00.parser import read_frames from aosstower.level_00.parser import read_frames
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
# map station name to InfluxDB tags # map station name to InfluxDB tags
... ...
......
File moved
File moved
File moved
...@@ -3,19 +3,19 @@ import sys ...@@ -3,19 +3,19 @@ import sys
import logging import logging
import pandas as pd import pandas as pd
from datetime import datetime as dt from datetime import datetime as dt
from aosstower.l00 import parser from aosstower.level_00 import parser
from netCDF4 import Dataset from netCDF4 import Dataset
import numpy as np import numpy as np
import platform import platform
from aosstower import station from aosstower import station
from datetime import timedelta as delta from datetime import timedelta as delta
import calc from aosstower.level_b1 import calc
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
#create the '_mean','_low','_high' file structure
def make_mean_dict(source_dict): def make_mean_dict(source_dict):
"""Create the '_mean','_low','_high' file structure."""
dest_dict = {} dest_dict = {}
for key in source_dict: for key in source_dict:
dest_dict[key + '_high'] = source_dict[key] dest_dict[key + '_high'] = source_dict[key]
...@@ -23,10 +23,11 @@ def make_mean_dict(source_dict): ...@@ -23,10 +23,11 @@ def make_mean_dict(source_dict):
dest_dict[key + '_low'] = source_dict[key] dest_dict[key + '_low'] = source_dict[key]
return dest_dict return dest_dict
mean_database = make_mean_dict(parser.database) mean_database = make_mean_dict(parser.database)
def filterArray(array, valid_min, valid_max):
def filter_array(array, valid_min, valid_max):
qcControl = [] qcControl = []
for value in array: for value in array:
...@@ -44,18 +45,15 @@ def filterArray(array, valid_min, valid_max): ...@@ -44,18 +45,15 @@ def filterArray(array, valid_min, valid_max):
return np.array(qcControl) return np.array(qcControl)
# The purpose of this function is to write the dimensions
# for the nc file
# no parameters
# no returns
def writeDimensions(ncFile): def write_dimensions(ncFile):
ncFile.createDimension('time', None) ncFile.createDimension('time', None)
ncFile.createDimension('max_len_station_name', 32) ncFile.createDimension('max_len_station_name', 32)
return ncFile return ncFile
def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.database):
def create_variables(ncFile, firstStamp, chunksizes, zlib, database=parser.database):
# base_time long name # base_time long name
btln = 'base time as unix timestamp' btln = 'base time as unix timestamp'
...@@ -71,8 +69,6 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa ...@@ -71,8 +69,6 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa
# time units # time units
tu = 'seconds since ' + firstStamp.strftime('%Y-%m-%d 00:00:00Z') tu = 'seconds since ' + firstStamp.strftime('%Y-%m-%d 00:00:00Z')
coordinates = { coordinates = {
# fields: type, dimension, fill, valid_min, std_name, longname, units, valid_max, cf_role, axis # fields: type, dimension, fill, valid_min, std_name, longname, units, valid_max, cf_role, axis
'lon': [np.float32, None, float(-999), '-180L', 'longitude', None, 'degrees_east', '180L', None], 'lon': [np.float32, None, float(-999), '-180L', 'longitude', None, 'degrees_east', '180L', None],
...@@ -81,22 +77,22 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa ...@@ -81,22 +77,22 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa
'base_time': [np.int32, None, float(-999), None, 'time', btln, btu, None, None], 'base_time': [np.int32, None, float(-999), None, 'time', btln, btu, None, None],
'time_offset': [np.float64, 'time', float(-999), None, 'time', tln, tu, None, None], 'time_offset': [np.float64, 'time', float(-999), None, 'time', tln, tu, None, None],
'station_name': ['c', 'max_len_station_name', '-', None, None, 'station name', None, None, 'timeseries_id'], 'station_name': ['c', 'max_len_station_name', '-', None, None, 'station name', None, None, 'timeseries_id'],
'time': [np.float32, 'time', float(-999), None, None, "Time offset from epoch", "seconds since 1970-01-01 00:00:00Z", None, None, None] 'time': [np.float32, 'time', float(-999), None, None, "Time offset from epoch",
"seconds since 1970-01-01 00:00:00Z", None, None, None]
} }
for key in coordinates: for key, attr in coordinates.items():
attr = coordinates[key] if attr[1]:
if(attr[1]):
if attr[1] == 'max_len_station_name': if attr[1] == 'max_len_station_name':
if (chunksizes) and chunksizes[0] > 32: if chunksizes and chunksizes[0] > 32:
variable = ncFile.createVariable(key, attr[0], dimensions=(attr[1]), fill_value=attr[2], zlib=zlib, chunksizes=[32]) variable = ncFile.createVariable(key, attr[0], dimensions=(attr[1]), fill_value=attr[2], zlib=zlib,
chunksizes=[32])
else: else:
variable = ncFile.createVariable(key, attr[0], dimensions=(attr[1]), fill_value=attr[2], zlib=zlib, chunksizes=chunksizes) variable = ncFile.createVariable(key, attr[0], dimensions=(attr[1]), fill_value=attr[2], zlib=zlib,
chunksizes=chunksizes)
else: else:
variable = ncFile.createVariable(key, attr[0], dimensions=(attr[1]), fill_value=attr[2], zlib=zlib, chunksizes=chunksizes) variable = ncFile.createVariable(key, attr[0], dimensions=(attr[1]), fill_value=attr[2], zlib=zlib,
chunksizes=chunksizes)
else: else:
variable = ncFile.createVariable(key, attr[0], fill_value=attr[1], zlib=zlib, chunksizes=chunksizes) variable = ncFile.createVariable(key, attr[0], fill_value=attr[1], zlib=zlib, chunksizes=chunksizes)
...@@ -105,20 +101,20 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa ...@@ -105,20 +101,20 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa
variable.positive = 'up' variable.positive = 'up'
variable.axis = 'Z' variable.axis = 'Z'
if(attr[3]): if attr[3]:
variable.valid_min = attr[3] variable.valid_min = attr[3]
variable.valid_max = attr[7] variable.valid_max = attr[7]
if(attr[4]): if attr[4]:
variable.standard_name = attr[4] variable.standard_name = attr[4]
if(attr[5]): if attr[5]:
variable.long_name = attr[5] variable.long_name = attr[5]
if(attr[6]): if attr[6]:
variable.units = attr[6] variable.units = attr[6]
if(attr[8]): if attr[8]:
variable.cf_role = attr[8] variable.cf_role = attr[8]
if key == 'base_time': if key == 'base_time':
...@@ -128,19 +124,19 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa ...@@ -128,19 +124,19 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa
variable.calendar = 'gregorian' variable.calendar = 'gregorian'
for entry in database: for entry in database:
if(entry == 'stamp'): if entry == 'stamp':
continue continue
varTup = database[entry] varTup = database[entry]
variable = ncFile.createVariable(entry, np.float32, variable = ncFile.createVariable(entry, np.float32,
dimensions=('time'), fill_value=float(-99999), zlib=zlib, chunksizes=chunksizes) dimensions=('time'), fill_value=float(-99999), zlib=zlib,
chunksizes=chunksizes)
variable.standard_name = varTup[1] variable.standard_name = varTup[1]
variable.description = varTup[3] variable.description = varTup[3]
variable.units = varTup[4] variable.units = varTup[4]
if(varTup[5] != ''): if varTup[5] != '':
variable.valid_min = float(varTup[5]) variable.valid_min = float(varTup[5])
variable.valid_max = float(varTup[6]) variable.valid_max = float(varTup[6])
...@@ -176,43 +172,37 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa ...@@ -176,43 +172,37 @@ def createVariables(ncFile, firstStamp, chunksizes, zlib, database=parser.databa
return ncFile return ncFile
def getGust(rollingAvg, speeds):
def get_gust(rollingAvg, speeds):
averages = rollingAvg.tolist() averages = rollingAvg.tolist()
maxSpeed = speeds['wind_speed'].tolist() maxSpeed = speeds['wind_speed'].tolist()
gust = [] gust = []
for idx, average in enumerate(averages): for idx, average in enumerate(averages):
if not average: if not average:
gust.append(np.nan) gust.append(np.nan)
continue continue
elif average >= 4.63 and maxSpeed[idx] > average + 2.573: elif average >= 4.63 and maxSpeed[idx] > average + 2.573:
gust.append(maxSpeed[idx]) gust.append(maxSpeed[idx])
else: else:
gust.append(np.nan) gust.append(np.nan)
continue continue
return gust return gust
#gets the rolling mean closest to the nearest minute
def getRolling(series, minutes):
returnSeries = series.rolling(25, win_type='boxcar').mean()
def get_rolling(series, minutes):
"""Get the rolling mean closest to the nearest minute"""
returnSeries = series.rolling(25, win_type='boxcar').mean()
data = {} data = {}
for minute in minutes: for minute in minutes:
# doesn't go past the minute # doesn't go past the minute
closestStamp = returnSeries.index.asof(minute)
data[minute] = returnSeries[returnSeries.index.asof(minute)] data[minute] = returnSeries[returnSeries.index.asof(minute)]
returnSeries = pd.Series(data) return pd.Series(data)
return returnSeries
def getNewWindDirection(wind_dir, wind_speed, stamps): def get_new_wind_direction(wind_dir, wind_speed, stamps):
newWindDir = {} newWindDir = {}
for stamp in stamps: for stamp in stamps:
...@@ -220,7 +210,6 @@ def getNewWindDirection(wind_dir, wind_speed, stamps): ...@@ -220,7 +210,6 @@ def getNewWindDirection(wind_dir, wind_speed, stamps):
if before not in wind_speed.index: if before not in wind_speed.index:
newWindDir[stamp] = None newWindDir[stamp] = None
else: else:
speed = wind_speed[before: stamp].tolist() speed = wind_speed[before: stamp].tolist()
dire = wind_dir[before: stamp].tolist() dire = wind_dir[before: stamp].tolist()
...@@ -231,7 +220,8 @@ def getNewWindDirection(wind_dir, wind_speed, stamps): ...@@ -231,7 +220,8 @@ def getNewWindDirection(wind_dir, wind_speed, stamps):
return pd.Series(newWindDir) return pd.Series(newWindDir)
def minuteAverages(frame):
def minute_averages(frame):
frame['minute'] = [(ts + delta(minutes=1)).replace(second=0) for ts in frame.index] frame['minute'] = [(ts + delta(minutes=1)).replace(second=0) for ts in frame.index]
newFrame = frame.groupby('minute').mean() newFrame = frame.groupby('minute').mean()
newFrame.index.names = [''] newFrame.index.names = ['']
...@@ -241,63 +231,52 @@ def minuteAverages(frame): ...@@ -241,63 +231,52 @@ def minuteAverages(frame):
del newFrame['wind_speed'] del newFrame['wind_speed']
windSeries = frame['wind_speed'] windSeries = frame['wind_speed']
windSeries = get_rolling(windSeries, list(newFrame.index))
windSeries = getRolling(windSeries, list(newFrame.index))
newFrame['wind_speed'] = windSeries newFrame['wind_speed'] = windSeries
rollingAvg = newFrame['wind_speed'] rollingAvg = newFrame['wind_speed']
maxSpeed = pd.DataFrame() maxSpeed = pd.DataFrame()
maxSpeed['minute'] = frame['minute'] maxSpeed['minute'] = frame['minute']
maxSpeed['speed'] = frame['wind_speed'] maxSpeed['speed'] = frame['wind_speed']
maxSpeed = frame.groupby('minute').max() maxSpeed = frame.groupby('minute').max()
gust = get_gust(rollingAvg, maxSpeed)
gust = getGust(rollingAvg, maxSpeed)
newFrame['gust'] = gust newFrame['gust'] = gust
if 'wind_dir' in columns: if 'wind_dir' in columns:
del newFrame['wind_dir'] del newFrame['wind_dir']
dupFrame = frame.set_index('minute') dupFrame = frame.set_index('minute')
stamps = newFrame.index stamps = newFrame.index
windDirSeries = dupFrame['wind_dir'] windDirSeries = dupFrame['wind_dir']
windSeries = dupFrame['wind_speed'] windSeries = dupFrame['wind_speed']
windDirSeries = get_new_wind_direction(windDirSeries, windSeries, stamps)
windDirSeries = getNewWindDirection(windDirSeries, windSeries, stamps)
newFrame['wind_dir'] = windDirSeries newFrame['wind_dir'] = windDirSeries
del frame['minute'] del frame['minute']
return newFrame.fillna(-99999) return newFrame.fillna(-99999)
def averageOverInterval(frame,interval_width):
def average_over_interval(frame, interval_width):
"""takes a frame and an interval to average it over, and returns a minimum, """takes a frame and an interval to average it over, and returns a minimum,
maximum, and average dataframe for that interval""" maximum, and average dataframe for that interval"""
ts = frame.index ts = frame.index
# round each timestamp to the nearest n minutes # round each timestamp to the nearest n minutes
frame['interval'] = (ts.astype(int) - ts.astype(int) % (interval_width * 60e9)).astype('datetime64[ns]') frame['interval'] = (ts.astype(int) - ts.astype(int) % (interval_width * 60e9)).astype('datetime64[ns]')
outFrames = {} out_frames = {
outFrames['low'] = frame.groupby('interval').min() 'low': frame.groupby('interval').min(),
outFrames['high'] = frame.groupby('interval').max() 'high': frame.groupby('interval').max(),
outFrames['mean'] = frame.groupby('interval').mean() 'mean': frame.groupby('interval').mean(),
}
del frame['interval'] del frame['interval']
for key in outFrames: for key in out_frames:
# append the appropriate suffix to each column # append the appropriate suffix to each column
columns = outFrames[key].columns columns = out_frames[key].columns
outFrames[key].columns = ['_'.join([col,key]) for col in columns] out_frames[key].columns = ['_'.join([col, key]) for col in columns]
outFrames = pd.concat(outFrames.values(),axis=1) out_frames = pd.concat(out_frames.values(), axis=1)
return outFrames return out_frames
def getData(inputFiles):
dictData = {}
def get_data(inputFiles):
dictData = {}
for filename in inputFiles: for filename in inputFiles:
getFrames = list(parser.read_frames(filename)) getFrames = list(parser.read_frames(filename))
...@@ -312,7 +291,8 @@ def getData(inputFiles): ...@@ -312,7 +291,8 @@ def getData(inputFiles):
return pd.DataFrame(dictData).transpose().replace(-99999, np.nan) return pd.DataFrame(dictData).transpose().replace(-99999, np.nan)
def writeVars(ncFile, frame, database=parser.database):
def write_vars(ncFile, frame, database=parser.database):
stamps = list(frame.index) stamps = list(frame.index)
baseDTObj = dt.strptime(str(stamps[0]).split(' ')[0], '%Y-%m-%d') baseDTObj = dt.strptime(str(stamps[0]).split(' ')[0], '%Y-%m-%d')
...@@ -370,7 +350,7 @@ def writeVars(ncFile, frame, database=parser.database): ...@@ -370,7 +350,7 @@ def writeVars(ncFile, frame, database=parser.database):
valid_min = database[varName][5] valid_min = database[varName][5]
valid_max = database[varName][6] valid_max = database[varName][6]
fileVar['qc_' + varName][:] = filterArray(dataArray, valid_min, valid_max) fileVar['qc_' + varName][:] = filter_array(dataArray, valid_min, valid_max)
coordinates = ['lon', 'lat', 'alt', 'base_time', 'time_offset', 'station_name', 'time'] coordinates = ['lon', 'lat', 'alt', 'base_time', 'time_offset', 'station_name', 'time']
...@@ -389,6 +369,7 @@ def writeVars(ncFile, frame, database=parser.database): ...@@ -389,6 +369,7 @@ def writeVars(ncFile, frame, database=parser.database):
return ncFile return ncFile
# The purpose of this method is to take a begin date, and end date # The purpose of this method is to take a begin date, and end date
# input filenames and output filename and create a netCDF file # input filenames and output filename and create a netCDF file
# based upon that # based upon that
...@@ -397,7 +378,7 @@ def writeVars(ncFile, frame, database=parser.database): ...@@ -397,7 +378,7 @@ def writeVars(ncFile, frame, database=parser.database):
# @param input filenames - list of filenames # @param input filenames - list of filenames
# @param output filename - filename of the netcdf file # @param output filename - filename of the netcdf file
def createGiantNetCDF(start, end, inputFiles, outputName, zlib, chunkSize, def create_giant_netCDF(start, end, inputFiles, outputName, zlib, chunkSize,
interval_width=None, database=parser.database): interval_width=None, database=parser.database):
default = False default = False
...@@ -407,16 +388,16 @@ def createGiantNetCDF(start, end, inputFiles, outputName, zlib, chunkSize, ...@@ -407,16 +388,16 @@ def createGiantNetCDF(start, end, inputFiles, outputName, zlib, chunkSize,
else: else:
default = True default = True
frame = getData(inputFiles) frame = get_data(inputFiles)
if (frame.empty): if (frame.empty):
return False return False
else: else:
frame = minuteAverages(frame) frame = minute_averages(frame)
if interval_width: if interval_width:
frame = averageOverInterval(frame,interval_width) frame = average_over_interval(frame, interval_width)
if (start and end): if (start and end):
frame = frame[start.strftime('%Y-%m-%d %H:%M:%S'): end.strftime('%Y-%m-%d %H:%M:%S')] frame = frame[start.strftime('%Y-%m-%d %H:%M:%S'): end.strftime('%Y-%m-%d %H:%M:%S')]
...@@ -428,37 +409,32 @@ def createGiantNetCDF(start, end, inputFiles, outputName, zlib, chunkSize, ...@@ -428,37 +409,32 @@ def createGiantNetCDF(start, end, inputFiles, outputName, zlib, chunkSize,
ncFile = Dataset(outputName, 'w', format='NETCDF4_CLASSIC') ncFile = Dataset(outputName, 'w', format='NETCDF4_CLASSIC')
ncFile = writeDimensions(ncFile) ncFile = write_dimensions(ncFile)
ncFile = createVariables(ncFile, firstStamp, chunksizes, zlib,database) ncFile = create_variables(ncFile, firstStamp, chunksizes, zlib, database)
ncFile.inputFiles = ', '.join(inputFiles) ncFile.inputFiles = ', '.join(inputFiles)
ncFile = writeVars(ncFile, frame,database) ncFile = write_vars(ncFile, frame, database)
ncFile.close() ncFile.close()
return True return True
def createMultiple(filenames, outputFilenames, zlib, chunkSize):
if(outputFilenames and len(filenames) != len(outputFilenames)):
print('USAGE: number of output filenames must equal number of input filenames when start and end times are not specified')
exit(0)
results = [] def create_multiple(filenames, outputFilenames, zlib, chunkSize):
if outputFilenames and len(filenames) != len(outputFilenames):
raise ValueError(
'Number of output filenames must equal number of input filenames when start and end times are not specified')
results = []
for idx, filename in enumerate(filenames): for idx, filename in enumerate(filenames):
results.append(createGiantNetCDF(None, None, [filename], outputFilenames[idx], zlib, chunkSize)) results.append(create_giant_netCDF(None, None, [filename], outputFilenames[idx], zlib, chunkSize))
allFalse = True
for result in results: if not any(results):
if result == True:
allFalse = False
if allFalse == True:
raise IOError('All ASCII files were empty') raise IOError('All ASCII files were empty')
# The purpose of this method is to take a string in the format # The purpose of this method is to take a string in the format
# YYYY-mm-ddTHH:MM:SS and convert that to a datetime object # YYYY-mm-ddTHH:MM:SS and convert that to a datetime object
# used in coordination with argparse -s and -e params # used in coordination with argparse -s and -e params
...@@ -472,23 +448,21 @@ def _dt_convert(datetime_str): ...@@ -472,23 +448,21 @@ def _dt_convert(datetime_str):
except: except:
return dt.strptime(datetime_str, '%Y-%m-%d') return dt.strptime(datetime_str, '%Y-%m-%d')
def main(): def main():
import argparse import argparse
#argparse description
argparser = argparse.ArgumentParser(description="Convert level_00 aoss tower data to level_a0", argparser = argparse.ArgumentParser(description="Convert level_00 aoss tower data to level_a0",
fromfile_prefix_chars='@') fromfile_prefix_chars='@')
#argparse verbosity info
argparser.add_argument('-v', '--verbose', action="count", default=int(os.environ.get("VERBOSITY", 2)), argparser.add_argument('-v', '--verbose', action="count", default=int(os.environ.get("VERBOSITY", 2)),
dest='verbosity', dest='verbosity',
help='each occurrence increases verbosity 1 level through ERROR-WARNING-INFO-DEBUG (default INFO)') help='each occurrence increases verbosity 1 level through ERROR-WARNING-INFO-DEBUG (default INFO)')
#argparse start and end times
argparser.add_argument('-s', '--start-time', type=_dt_convert, argparser.add_argument('-s', '--start-time', type=_dt_convert,
help="Start time of massive netcdf file, if only -s is given, a netcdf file for only that day is given" + help="Start time of massive netcdf file, if only -s is given, a netcdf file for only that day is given" +
". Formats allowed: \'YYYY-MM-DDTHH:MM:SS\', \'YYYY-MM-DD\'") ". Formats allowed: \'YYYY-MM-DDTHH:MM:SS\', \'YYYY-MM-DD\'")
argparser.add_argument('-e', '--end-time', type=_dt_convert, help='End time of massive netcdf file. Formats allowed:' + argparser.add_argument('-e', '--end-time', type=_dt_convert,
help='End time of massive netcdf file. Formats allowed:' +
"\'YYYY-MM-DDTHH:MM:SS\', \'YYYY-MM-DD\'") "\'YYYY-MM-DDTHH:MM:SS\', \'YYYY-MM-DD\'")
argparser.add_argument('-i', '--interval', type=float, argparser.add_argument('-i', '--interval', type=float,
help='Width of the interval to average input data over in minutes.' + help='Width of the interval to average input data over in minutes.' +
...@@ -507,26 +481,25 @@ def main(): ...@@ -507,26 +481,25 @@ def main():
level = levels[min(3, args.verbosity)] level = levels[min(3, args.verbosity)]
logging.basicConfig(level=level) logging.basicConfig(level=level)
database = mean_database if args.interval else parser.database database = mean_database if args.interval else parser.database
if(args.start_time and args.end_time): if args.start_time and args.end_time:
result = createGiantNetCDF(args.start_time, args.end_time, args.input_files, args.output[0], args.zlib, args.chunk_size, result = create_giant_netCDF(args.start_time, args.end_time, args.input_files, args.output[0], args.zlib,
args.chunk_size,
args.interval, database) args.interval, database)
if(result == False): if not result:
raise IOError('An empty ASCII file was found') raise IOError('An empty ASCII file was found')
elif args.start_time:
elif(args.start_time):
end_time = args.start_time.replace(hour=23, minute=59, second=59) end_time = args.start_time.replace(hour=23, minute=59, second=59)
result = createGiantNetCDF(args.start_time, end_time, args.input_files, args.output[0], args.zlib, args.chunk_size, result = create_giant_netCDF(args.start_time, end_time, args.input_files, args.output[0], args.zlib,
args.chunk_size,
args.interval, database) args.interval, database)
if(result == False): if not result:
raise IOError('An empty ASCII file was found') raise IOError('An empty ASCII file was found')
elif args.end_time:
elif(args.end_time): raise ValueError('start time must be specified when end time is specified')
print('USAGE: start time must be specified when end time is specified')
else: else:
createMultiple(args.input_files, args.output, args.zlib, args.chunk_size) create_multiple(args.input_files, args.output, args.zlib, args.chunk_size)
if __name__ == "__main__": if __name__ == "__main__":
main() sys.exit(main())
...@@ -12,7 +12,7 @@ class ParserV0Tests(unittest.TestCase): ...@@ -12,7 +12,7 @@ class ParserV0Tests(unittest.TestCase):
"WDIR05305 143.380000\n") "WDIR05305 143.380000\n")
def _cut(self): def _cut(self):
from aosstower.l00.parser import ParserV0 from aosstower.level_00.parser import ParserV0
return ParserV0() return ParserV0()
def test_maybe_mine(self): def test_maybe_mine(self):
...@@ -38,7 +38,7 @@ class ParserV1V2Tests(unittest.TestCase): ...@@ -38,7 +38,7 @@ class ParserV1V2Tests(unittest.TestCase):
"22.669,10.417,145.2,22.665,163.94,0,0,30.015\n") "22.669,10.417,145.2,22.665,163.94,0,0,30.015\n")
def _cut(self): def _cut(self):
from aosstower.l00.parser import ParserV1V2 from aosstower.level_00.parser import ParserV1V2
return ParserV1V2() return ParserV1V2()
def test_maybe_mine(self): def test_maybe_mine(self):
... ...
......
...@@ -4,7 +4,7 @@ from datetime import datetime ...@@ -4,7 +4,7 @@ from datetime import datetime
import requests import requests
from metobs.data import wind_vector_components from metobs.data import wind_vector_components
from aosstower.l00.parser import read_frames from aosstower.level_00.parser import read_frames
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
... ...
......
...@@ -5,8 +5,8 @@ import logging ...@@ -5,8 +5,8 @@ import logging
from datetime import datetime from datetime import datetime
from metobscommon.model import RrdModel, ModelError from metobscommon.model import RrdModel, ModelError
from aosstower.l00.parser import read_records from aosstower.level_00.parser import read_records
from aosstower.l00.rrd import initialize_rrd from aosstower.level_00.rrd import initialize_rrd
LOG = logging LOG = logging
... ...
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment