Skip to content
Snippets Groups Projects
Unverified Commit e064a2cf authored by David Hoese's avatar David Hoese
Browse files

Simplify file finding logic

parent 855073a4
No related branches found
No related tags found
No related merge requests found
import os import os
from datetime import datetime as dt import logging
from datetime import datetime
from datetime import timedelta as delta from datetime import timedelta as delta
from io import StringIO from io import StringIO
import pandas as pd import pandas as pd
from flask import render_template, jsonify, Response from flask import render_template, jsonify, Response
from metobsapi.util import file_reponses, data_responses from metobsapi.util import file_responses, data_responses, ProductFrequency
LOG = logging.getLogger(__name__)
def dateHandler(date):
try:
return dt.strptime(date, "%Y-%m-%d")
except: def handle_date(date):
return 400 try:
date_len = len(date)
def beginEndHandler(begin, end): if date_len == 10:
if(not begin and not end): return datetime.strptime(date, "%Y-%m-%d")
begin = dt.today() else:
begin = begin.replace(hour=0, minute=0, second=0, microsecond=0) return datetime.strptime(date, "%Y-%m-%dT%H:%M:%S")
end = dt.today() except ValueError:
end = end.replace(hour=0, minute=0, second=0, microsecond=0) LOG.warning("Malformed date string '%s'", date, exc_info=True)
raise ValueError("Malformed date string '%s'" % (date,))
elif(not begin):
begin = dt.today()
begin = begin.replace(hour=0, minute=0, second=0, microsecond=0)
elif(not end): def handle_begin_end(begin, end):
end = dt.today() if not end:
end = datetime.utcnow()
end = end.replace(hour=0, minute=0, second=0, microsecond=0) end = end.replace(hour=0, minute=0, second=0, microsecond=0)
if(begin[0:1] == "-"): else:
try: end = handle_date(end)
begin = end - delta(days=int(begin[1:]))
except:
return 'malformed_string'
if not begin:
begin = datetime.utcnow()
begin = begin.replace(hour=0, minute=0, second=0, microsecond=0)
elif begin[0] == '-':
# begin is now days
begin = int(begin[1:])
# FIXME: Can a relative time be more complex than just days?
begin = end - delta(days=begin)
else: else:
begin = dateHandler(begin) begin = handle_date(begin)
end = dateHandler(end)
if(begin == 400 or end == 400): return begin, end
return 'malformed_string'
return [begin, end]
# The purpose of this method is to return a stream's data def handle_single_stream(cur_dt, site, inst, level, version, stream_pattern):
# only for the aoss tower
# @param cur_dt - the day related to the file
# @param version - the version related to the data stream
# @param pattern - the file pattern for the data stream
# @return dict with filename, url, site, inst, level, size
def handleAossTower(cur_dt, level, version, pattern):
from flask import current_app as app from flask import current_app as app
if site not in file_responses.ARCHIVE_INFO:
return 'missing_site'
info = file_responses.ARCHIVE_INFO[site]
#if level == level_00, get filename if inst not in info:
if level == 'level_00': return 'missing_inst'
if pattern == 'ascii': info = info[inst]
filename = 'rig_tower.%Y-%m-%d.ascii'
else:
#right now, we only have ascii level_00 files
return {}
#if level == level_b1, get filename if level not in info:
elif level == 'level_b1': return 'missing_level'
if pattern == 'nc-1mo-1d': info = info[level]
filename = cur_dt.strftime('aoss_tower.%Y-%m.nc')
elif pattern == 'nc-1d-1m': if stream_pattern not in info:
filename = cur_dt.strftime('/%d') return 'missing_stream_pattern'
filename += cur_dt.strftime('/aoss_tower.%Y-%m-%d.nc') info = info[stream_pattern]
else: if version == '*':
return {} # FIXME: Get version from archive_info
# and use newest one when '*'
version = 'version_00'
else: path = os.path.join(site,
return {} inst,
path = os.path.join('aoss',
'tower',
level, level,
version, version,
'%Y', file_responses.FREQUENCY_DIR_FMT[info['frequency']],
'%m', info['pattern'])
filename)
path = cur_dt.strftime(path) path = cur_dt.strftime(path)
pathname = os.path.join(app.config['ARCHIVE_ROOT'], path) pathname = os.path.join(app.config['ARCHIVE_ROOT'], path)
if os.path.isfile(pathname): if os.path.isfile(pathname):
url = os.path.join(app.config['ARCHIVE_URL'], path) url = os.path.join(app.config['ARCHIVE_URL'], path)
# url = file_reponses.url
# url += '{aoss/tower/' + level + '/'
# url += (version + '/')
# url += cur_dt.strftime('%Y/%m/') + filename + '}'
file_info = {} file_info = {}
file_info['filename'] = os.path.basename(pathname) file_info['filename'] = os.path.basename(pathname)
file_info['url'] = url file_info['url'] = url
file_info['site'] = 'AOSS' file_info['site'] = site
file_info['inst'] = 'Tower' file_info['inst'] = inst
file_info['level'] = level file_info['level'] = level
file_info['size'] = os.stat(pathname).st_size file_info['size'] = os.stat(pathname).st_size
return file_info return file_info
else:
return {}
# The purpose of this method is to return a stream's data
# only for the aoss tower
# @param cur_dt - the day related to the file
# @param version - the version related to the data stream
# @param pattern - the file pattern for the data stream
# @return dict with filename, url, site, inst, level, size
def handleAossAeri(cur_dt, level, version, pattern):
from flask import current_app as app
path = app.config['ARCHIVE_ROOT']
path += 'aoss/aeri/' + level + '/'
path += (version + '/')
#how much path I'm giving this depends on future levels and products
#of the aeri
# for now, every level uses the same directory pattern
# so I'm going with %Y/%m/%d
path += cur_dt.strftime('%Y/%m/%d/')
fileInfo = {}
#if level == level_00, get filename
if level == 'level_00':
#DPDPP - dates plus dot plus pattern
# These are level_00 patterns that need
# %y%m%d.pattern
DPDPP = ['PAR', 'QC', 'SUM']
#DPP - Dates plus patterns
# These are level_00 patterns that only need
# %y%m%d + pattern
DPP = ['B1.CXS', 'B2.CXS', 'B2.UVS',
'C1.RNC', 'C2.RNC', 'F1.CSV', 'F1.CXS', 'F1.UVS',
'F2.CSV', 'F2.CXS', 'F2.UVS']
if pattern in DPDPP:
filename = cur_dt.strftime('%y%m%d.') + pattern
elif pattern in DPP:
filename = cur_dt.strftime('%y%m%d') + pattern
elif pattern == 'R.SCR':
filename = 'AESITTER'
elif pattern == 'E.SCR':
filename = 'RADIANCE.SCR'
elif pattern == 'Y.SCR':
filename = 'SUMMARY.SCR'
else:
return {}
else:
return {}
path += filename
if os.path.isfile(path):
url = file_reponses.url
url += '{aoss/aeri/' + level + '/'
url += (version + '/')
url += cur_dt.strftime('%Y/%m/') + filename + '}'
fileInfo['filename'] = filename
fileInfo['url'] = url
fileInfo['site'] = 'AOSS'
fileInfo['inst'] = 'Tower'
fileInfo['level'] = level
fileInfo['size'] = os.stat(path).st_size
return fileInfo
else:
return {}
return {} return {}
# The purpose of this method is to return a stream's data
# only for the aoss tower
# @param cur_dt - the day related to the file
# @param version - the version related to the data stream
# @param pattern - the file pattern for the data stream
# @return dict with filename, url, site, inst, level, size
def handleMendotaBuoy(cur_dt, level, version, pattern):
return {}
# the purpose of this method is to return a stream's data
# @param cur_dt - the day related to the file
# @param site - the site related to the data stream
# @param inst - the instrument related to the data stream
# @param version - the version related to the data stream
# @param pattern - the file pattern for the data stream
# @return dict with filename, url, site, inst, level, size
def handleSingleStream(cur_dt, site, inst, level, version, pattern):
if(inst == 'tower' and site == 'aoss'):
return handleAossTower(cur_dt, level, version, pattern)
elif(inst == 'aeri' and site == 'aoss'):
return handleAossAeri(cur_dt, level, version, pattern)
elif(inst == 'buoy' and site == 'mendota'):
return handleMendotaBuoy(cur_dt, level, version, pattern)
else:
return {}
def getCurrDTData(cur_dt, sites, insts, levels, versions, patterns): def get_data_for_dt(cur_dt, sites, insts, levels, versions, patterns):
data = [] data = []
for idx, site in enumerate(sites): for idx, site in enumerate(sites):
...@@ -226,42 +99,50 @@ def getCurrDTData(cur_dt, sites, insts, levels, versions, patterns): ...@@ -226,42 +99,50 @@ def getCurrDTData(cur_dt, sites, insts, levels, versions, patterns):
version = versions[idx] version = versions[idx]
pattern = patterns[idx] pattern = patterns[idx]
fileInfo = handleSingleStream(cur_dt, site, inst, level, version, pattern) file_info = handle_single_stream(cur_dt, site, inst, level, version, pattern)
#if file info is empty, file does not exist #if file info is empty, file does not exist
#should not be appended to return frame #should not be appended to return frame
if not fileInfo: if not file_info:
continue continue
elif isinstance(file_info, str):
# FIXME: Gather stream info and then fill in for a specific date
# FIXME: Actually provide the stream name so they know what is wrong
return file_info
#append to list - later to be created into frame #append to list - later to be created into frame
data.append(fileInfo) data.append(file_info)
return data return data
def getData(start, end, sites, insts, levels, versions, patterns):
def get_data(start, end, sites, insts, levels, versions, patterns):
cur_dt = start cur_dt = start
data = [] data = []
for day in range((end - start).days + 1): for day in range((end - start).days + 1):
data = data + getCurrDTData(cur_dt, sites, insts, levels, versions, patterns) tmp_data = get_data_for_dt(cur_dt, sites, insts, levels, versions, patterns)
if isinstance(tmp_data, str):
return tmp_data
data += tmp_data
cur_dt += delta(days=1) cur_dt += delta(days=1)
return pd.DataFrame(data) return pd.DataFrame(data)
def testResources(sites, insts):
if(len(sites) == 0 or len(insts) == 0):
return False
if(len(sites) != len(insts)): def test_resources(sites, insts):
if not sites or not insts:
return False return False
for idx, site in enumerate(sites): if len(sites) != len(insts):
resource = site + ' ' + insts[idx] return False
if(resource not in file_reponses.resources): for site, inst in zip(sites, insts):
if site not in file_responses.ARCHIVE_INFO:
return False return False
if inst not in file_responses.ARCHIVE_INFO[site]:
return True return False
return True
def handleCSV(frame): def handleCSV(frame):
body = StringIO() body = StringIO()
...@@ -278,12 +159,6 @@ def handleCSV(frame): ...@@ -278,12 +159,6 @@ def handleCSV(frame):
if frame.empty: if frame.empty:
return output.getvalue() return output.getvalue()
urls = frame['url']
urls = urls.str.replace('{', '')
urls = urls.str.replace('}', '')
frame['url'] = urls
frame['size'] = frame['size'].apply(str) frame['size'] = frame['size'].apply(str)
allRows = frame.values.tolist() allRows = frame.values.tolist()
...@@ -293,6 +168,7 @@ def handleCSV(frame): ...@@ -293,6 +168,7 @@ def handleCSV(frame):
return output.getvalue() return output.getvalue()
def handleSH(frame): def handleSH(frame):
output = StringIO() output = StringIO()
output.write('# !/usr/bin/env bash\n') output.write('# !/usr/bin/env bash\n')
...@@ -322,6 +198,7 @@ def handleSH(frame): ...@@ -322,6 +198,7 @@ def handleSH(frame):
return output.getvalue() return output.getvalue()
def handleBAT(frame): def handleBAT(frame):
output = StringIO() output = StringIO()
...@@ -338,8 +215,6 @@ def handleBAT(frame): ...@@ -338,8 +215,6 @@ def handleBAT(frame):
return output.getvalue() return output.getvalue()
urls = frame['url'] urls = frame['url']
urls = urls.str.replace('}', '')
names = frame['filename'] names = frame['filename']
directories = [] directories = []
...@@ -391,12 +266,6 @@ def handleJSON(frame): ...@@ -391,12 +266,6 @@ def handleJSON(frame):
if frame.empty: if frame.empty:
return jsonify(**output) return jsonify(**output)
urls = frame['url']
urls = urls.str.replace('{', '')
urls = urls.str.replace('}', '')
frame['url'] = urls
allRows = frame.values.tolist() allRows = frame.values.tolist()
body = [] body = []
...@@ -415,6 +284,7 @@ def handleJSON(frame): ...@@ -415,6 +284,7 @@ def handleJSON(frame):
return jsonify(**output) return jsonify(**output)
def throwLevel(fmt, level): def throwLevel(fmt, level):
if(fmt == 'csv' or fmt == 'sh'): if(fmt == 'csv' or fmt == 'sh'):
return data_responses.createCSV(400, 'unidentified level \'' + level + '\' detected'), 400 return data_responses.createCSV(400, 'unidentified level \'' + level + '\' detected'), 400
...@@ -423,11 +293,12 @@ def throwLevel(fmt, level): ...@@ -423,11 +293,12 @@ def throwLevel(fmt, level):
return jsonify(**data_responses.createJSON(400, 'unidentified level \'' + level + '\' detected')), 400 return jsonify(**data_responses.createJSON(400, 'unidentified level \'' + level + '\' detected')), 400
elif(fmt == 'bat'): elif(fmt == 'bat'):
return file_reponses.createBAT(400, 'unidentified level \'' + level + '\' detected'), 400 return file_responses.createBAT(400, 'unidentified level \'' + level + '\' detected'), 400
else: else:
return render_template('400.html', format=fmt), 400 return render_template('400.html', format=fmt), 400
def throwVersion(fmt, version): def throwVersion(fmt, version):
if(fmt == 'csv' or fmt == 'sh'): if(fmt == 'csv' or fmt == 'sh'):
return data_responses.createCSV(400, 'unidentified version \'' + version + '\' detected'), 400 return data_responses.createCSV(400, 'unidentified version \'' + version + '\' detected'), 400
...@@ -436,11 +307,12 @@ def throwVersion(fmt, version): ...@@ -436,11 +307,12 @@ def throwVersion(fmt, version):
return jsonify(**data_responses.createJSON(400, 'unidentified version \'' + version + '\' detected')), 400 return jsonify(**data_responses.createJSON(400, 'unidentified version \'' + version + '\' detected')), 400
elif(fmt == 'bat'): elif(fmt == 'bat'):
return file_reponses.createBAT(400, 'unidentified version \'' + version + '\' detected'), 400 return file_responses.createBAT(400, 'unidentified version \'' + version + '\' detected'), 400
else: else:
return render_template('400.html', format=fmt), 400 return render_template('400.html', format=fmt), 400
def throwStream(fmt): def throwStream(fmt):
if(fmt == 'csv' or fmt == 'sh'): if(fmt == 'csv' or fmt == 'sh'):
return data_responses.createCSV(400, 'Stream has an invalid number of parameters in its format'), 400 return data_responses.createCSV(400, 'Stream has an invalid number of parameters in its format'), 400
...@@ -449,95 +321,62 @@ def throwStream(fmt): ...@@ -449,95 +321,62 @@ def throwStream(fmt):
return jsonify(**data_responses.createJSON(400, 'Stream has an invalid number of parameters in its format')), 400 return jsonify(**data_responses.createJSON(400, 'Stream has an invalid number of parameters in its format')), 400
elif(fmt == 'bat'): elif(fmt == 'bat'):
return file_reponses.createBAT(400, 'Stream has an invalid number of parameters in its format'), 400 return file_responses.createBAT(400, 'Stream has an invalid number of parameters in its format'), 400
else: else:
return render_template('400.html', format=fmt), 400 return render_template('400.html', format=fmt), 400
def returnFiles(fmt, beginTime, endTime, sites, insts, levels, patterns, versions):
times = beginEndHandler(beginTime, endTime)
#handle time errors
if(times == 'begin_end_error'):
if(fmt == 'csv' or fmt == 'sh'):
return file_reponses.csv_time, 400
elif(fmt == 'json'): def handle_error(fmt, error_str):
return jsonify(**file_reponses.json_time), 400 try:
handler = file_responses.ERROR_HANDLERS[fmt]
elif(fmt == 'bat'): except KeyError:
return file_reponses.bat_time, 400 return render_template('400.html', format=fmt), 400
else:
return render_template('400.html', format=fmt), 400
elif(times == 'malformed_string'):
if(fmt == 'csv' or fmt == 'sh'):
return file_reponses.csv_mal, 400
elif(fmt == 'json'):
return jsonify(**file_reponses.json_mal), 400
elif(fmt == 'bat'):
return file_reponses.bat_time, 400
else:
return render_template('400.html', format=fmt), 400
else:
beginDate = times[0]
endDate = times[1]
#handle resources err_code, err_msg = file_responses.ERROR_MESSAGES[error_str]
if(not sites or not insts): res = handler(err_code, err_msg)
if(fmt == 'csv' or fmt == 'sh'): if fmt == 'json':
return file_reponses.csv_resource, 400 return jsonify(**res), err_code
return res, err_code
elif(fmt == 'json'):
return jsonify(**file_reponses.json_resource), 400
elif(fmt == 'bat'): RESPONSE_HANDLERS = {
return file_reponses.bat_resource, 400 'csv': handleCSV,
'sh': handleSH,
'bat': handleBAT,
'json': handleJSON,
}
else:
return render_template('400.html', format=fmt), 400
if(testResources(sites, insts) == False): def find_stream_files(fmt, beginTime, endTime, sites, insts, levels, patterns, versions):
if(fmt == 'csv' or fmt == 'sh'): try:
return file_reponses.csv_404, 404 times = handle_begin_end(beginTime, endTime)
except TypeError:
return handle_error(fmt, 'malformed_string')
elif(fmt == 'json'): begin_date = times[0]
return jsonify(**file_reponses.json_404), 404 end_date = times[1]
elif(fmt == 'bat'): if not test_resources(sites, insts):
return file_reponses.bat_404, 400 return handle_error(fmt, 'missing_inst')
else: frame = get_data(begin_date, end_date, sites, insts, levels, versions, patterns)
return render_template('400.html', format=fmt), 400
frame = getData(beginDate, endDate, sites, insts, levels, versions, patterns) if isinstance(frame, str):
return handle_error(fmt, frame)
# Normalize the frame that was given so we only have expected information
# XXX: What if we want more information in the JSON or other format?
if not frame.empty: if not frame.empty:
FILES = ['filename', 'url', 'site', 'inst', 'level', 'size'] FILES = ['filename', 'url', 'site', 'inst', 'level', 'size']
frame = frame[FILES]
frame = frame[FILES] try:
handler = RESPONSE_HANDLERS[fmt]
if(fmt == 'csv'): except KeyError:
getCSV = handleCSV(frame) return render_template('400.html', format=fmt), 400
return Response(getCSV, mimetype='text/plain')
elif(fmt == 'json'):
getJSON = handleJSON(frame)
return getJSON
elif(fmt == 'sh'):
getSH = handleSH(frame)
return getSH
elif(fmt == 'bat'):
getBat = handleBAT(frame)
return getBat
else: res = handler(frame)
return render_template('400.html', format=fmt), 400 if fmt == 'json':
return res
return Response(res, mimetype='text/plain')
...@@ -8,7 +8,7 @@ from flask_cors import CORS ...@@ -8,7 +8,7 @@ from flask_cors import CORS
from flask_json import FlaskJSON from flask_json import FlaskJSON
from metobsapi import modifyData, multiData, files from metobsapi import modifyData, multiData, files
from metobsapi.util import file_reponses, data_responses from metobsapi.util import file_responses, data_responses
#create application #create application
...@@ -410,7 +410,6 @@ def getFiles(fmt): ...@@ -410,7 +410,6 @@ def getFiles(fmt):
if len(getData) != 4 and len(getData) != 5: if len(getData) != 4 and len(getData) != 5:
return files.throwStream(fmt) return files.throwStream(fmt)
else: else:
sites.append(getData[0]) sites.append(getData[0])
insts.append(getData[1]) insts.append(getData[1])
...@@ -418,29 +417,14 @@ def getFiles(fmt): ...@@ -418,29 +417,14 @@ def getFiles(fmt):
patterns.append(getData[2].replace('_', '.')) patterns.append(getData[2].replace('_', '.'))
level = getData[3].replace('l', 'level_') level = getData[3].replace('l', 'level_')
levels.append(level)
if(getData[1] in file_reponses.inst_levels): if len(getData) == 4:
if(level in file_reponses.inst_levels[getData[1]]): versions.append('*')
levels.append(level)
else:
return files.throwLevel(fmt, getData[3])
if(len(getData) == 4):
if(getData[1] in file_reponses.inst_versions):
versions.append(file_reponses.inst_versions[getData[1]])
else: else:
version = getData[4].replace('v', 'version_') versions.append(getData[4].replace('v', 'version_'))
if(getData[1] in file_reponses.inst_versions):
if version in file_reponses.inst_versions[getData[1]]:
versions.append(getData[4].replace('v', 'version_'))
else:
return files.throwVersion(fmt, getData[4])
return files.returnFiles(fmt, beginTime, endTime, sites, insts, levels, patterns, versions) return files.find_stream_files(fmt, beginTime, endTime, sites, insts, levels, patterns, versions)
if __name__ == '__main__': if __name__ == '__main__':
app.debug = True app.debug = True
......
import unittest
if __name__ == "__main__":
unittest.main()
\ No newline at end of file
...@@ -8,7 +8,7 @@ import shutil ...@@ -8,7 +8,7 @@ import shutil
ARCHIVE_INFO = { ARCHIVE_INFO = {
'aoss': { 'aoss': {
'tower': { 'tower': {
'00': { 'level_00': {
'ascii': { 'ascii': {
'frequency': ProductFrequency.DAILY_FILE, 'frequency': ProductFrequency.DAILY_FILE,
'pattern': 'rig_tower.%Y-%m-%d.ascii', 'pattern': 'rig_tower.%Y-%m-%d.ascii',
......
...@@ -35,8 +35,7 @@ def create_fake_archive(archive_info, root=FAKE_ARCHIVE_PATH, datetimes=None, ve ...@@ -35,8 +35,7 @@ def create_fake_archive(archive_info, root=FAKE_ARCHIVE_PATH, datetimes=None, ve
for inst, level_info in inst_info.items(): for inst, level_info in inst_info.items():
os.makedirs(inst, exist_ok=True) os.makedirs(inst, exist_ok=True)
os.chdir(inst) os.chdir(inst)
for level, level_info in level_info.items(): for level_name, level_info in level_info.items():
level_name = 'level_{}'.format(level)
os.makedirs(level_name, exist_ok=True) os.makedirs(level_name, exist_ok=True)
os.chdir(level_name) os.chdir(level_name)
for version in versions: for version in versions:
......
from metobsapi.util.data_responses import createCSV, createJSON
def createBAT(code, message):
returnString = ':: status: error<br>'
returnString += ':: code: ' + str(code) + '<br>'
returnString += ':: num_results: 0<br># message: ' + message
return returnString
inst_levels = {'aeri': ['level_00'], 'tower': ['level_00', 'level_b1'], 'buoy': []}
inst_versions = {'aeri': 'version_00', 'tower': 'version_00', 'buoy': ''}
path = '/mnt/inst-data/cache/'
url = 'http://metobs.ssec.wisc.edu/pub/cache/'
resources = ['aoss tower', 'aoss aeri', 'mendota buoy']
resourceString = 'aoss aeri, aoss tower, and the mendota buoy.'
json_time = createJSON(400, 'missing begin or end time parameters')
csv_time = createCSV(400, 'missing begin or end time parameters')
bat_time = createBAT(400, 'missing begin or end time parameters')
csv_mal = createCSV(400, 'could not parse timestamp, check format')
json_mal = createJSON(400, 'could not parse timestamp, check format')
bat_mal = createBAT(400, 'could not parse timestamp, check format')
json_404 = createJSON(404, 'the resource could not be found - The current resources supported are the ' + resourceString)
csv_404 = createCSV(404, 'the resource could not be found - The current resources supported are the ' + resourceString)
bat_404 = createBAT(404, 'the resource could not be found - The current resources supported are the ' + resourceString)
json_resource = createJSON(400, 'missing site or inst parameters')
csv_resource = createCSV(400, 'missing site or inst parameters')
bat_resource = createBAT(400, 'missing site or inst parameters')
\ No newline at end of file
import os
from metobsapi.util.data_responses import createCSV, createJSON
from metobsapi.util import ProductFrequency
def createBAT(code, message):
returnString = ':: status: error<br>'
returnString += ':: code: ' + str(code) + '<br>'
returnString += ':: num_results: 0<br># message: ' + message
return returnString
# TODO: Load from config file
# FIXME: Versions?
ARCHIVE_INFO = {
'aoss': {
'tower': {
'level_00': {
'ascii': {
'frequency': ProductFrequency.DAILY_FILE,
'pattern': 'rig_tower.%Y-%m-%d.ascii',
},
},
'level_b1': {
'nc-1mo-1d': {
'frequency': ProductFrequency.MONTHLY_DIR,
'pattern': 'aoss_tower.%Y-%m.nc',
},
'nc-1d-1m': {
'frequency': ProductFrequency.DAILY_DIR,
'pattern': 'aoss_tower.%Y-%m-%d.nc',
},
},
},
'aeri': {
'level_00': {
'par': {
'frequency': ProductFrequency.DAILY_DIR,
'pattern': '%y%m%d.par',
},
'qc': {
'frequency': ProductFrequency.DAILY_DIR,
'pattern': '%y%m%d.qc',
},
'sum': {
'frequency': ProductFrequency.DAILY_DIR,
'pattern': '%y%m%d.sum',
},
'scr-aesitter': {
'frequency': ProductFrequency.DAILY_DIR,
'pattern': 'AESITTER.SCR',
},
'scr-radiance': {
'frequency': ProductFrequency.DAILY_DIR,
'pattern': 'RADIANCE.SCR',
},
'scr-summary': {
'frequency': ProductFrequency.DAILY_DIR,
'pattern': 'SUMMARY.SCR',
},
},
},
},
'mendota': {
},
}
# Add the other AERI file types
for file_suffix in ('B1.CXS',
'B2.CXS',
'B2.UVS',
'C1.RNC',
'C2.RNC',
'F1.CSV',
'F1.CXS',
'F1.UVS',
'F2.CSV',
'F2.CXS',
'F2.UVS'):
parts = file_suffix.split('.')
stream_pat = parts[1].lower() + '-' + parts[0].lower()
nfo = {
'frequency': ProductFrequency.DAILY_DIR,
'pattern': '%y%m%d{}'.format(stream_pat),
}
ARCHIVE_INFO['aoss']['aeri']['level_00'][stream_pat] = nfo
# Directory format for the type of data file frequency
FREQUENCY_DIR_FMT = {
ProductFrequency.DAILY_DIR: os.path.join('%Y', '%m', '%d'),
ProductFrequency.DAILY_FILE: os.path.join('%Y', '%m'),
ProductFrequency.MONTHLY_DIR: os.path.join('%Y', '%m'),
ProductFrequency.MONTHLY_FILE: '%Y',
}
inst_levels = {'aeri': ['level_00'], 'tower': ['level_00', 'level_b1'], 'buoy': []}
inst_versions = {'aeri': 'version_00', 'tower': 'version_00', 'buoy': ''}
path = '/mnt/inst-data/cache/'
url = 'http://metobs.ssec.wisc.edu/pub/cache/'
resources = ['aoss tower', 'aoss aeri', 'mendota buoy']
resourceString = 'aoss aeri, aoss tower, and the mendota buoy.'
ERROR_HANDLERS = {
'csv': createCSV,
'sh': createCSV,
'bat': createBAT,
'json': createJSON,
}
ERROR_MESSAGES = {
'datetime_error': (400, 'missing begin or end time parameters'),
'malformed_string': (400, 'could not parse timestamp, check format'),
'missing_resource': (404, 'the resource could not be found - The current resources supported are the ' + resourceString),
'missing_inst': (400, 'missing or unknown inst parameter'),
'missing_site': (400, 'missing or unknown site parameter'),
'missing_level': (400, 'missing or unknown level parameter'),
'missing_stream_pattern': (400, 'missing or unknown stream pattern parameter'),
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment