Newer
Older
#!/usr/bin/env python
# encoding: utf-8
"""
Top-level routines to compare two files.
Created by rayg Apr 2009.
Copyright (c) 2009 University of Wisconsin SSEC. All rights reserved.
"""
(no author)
committed
import os, sys, logging, re, subprocess, datetime
(no author)
committed
import imp as imp
from pprint import pprint, pformat
from numpy import *
(no author)
committed
import pkg_resources
from pycdf import CDFError
(no author)
committed
from subprocess import check_call as sh
import glance.io as io
import glance.delta as delta
import glance.plot as plot
(no author)
committed
import glance.report as report
import glance.stats as statistics
import glance.plotcreatefns as plotcreate
import glance.collocation as collocation
# these are the built in defaults for the settings
glance_setting_defaults = {'shouldIncludeReport': True,
'shouldIncludeImages': False,
'doFork': False,
'useThreadsToControlMemory': False,
(no author)
committed
'useSharedRangeForOriginal': False,
'noLonLatVars': False}
# these are the built in longitude/latitude defaults
glance_lon_lat_defaults = {'longitude': 'pixel_longitude',
'latitude': 'pixel_latitude',
(no author)
committed
'lon_lat_epsilon': 0.0,
'data_filter_function_lon_in_a': None,
'data_filter_function_lat_in_a': None,
'data_filter_function_lon_in_b': None,
'data_filter_function_lat_in_b': None
}
(no author)
committed
# these are the built in default settings for the variable analysis
(no author)
committed
glance_analysis_defaults = {'epsilon': 0.0,
'epsilon_percent': None,
(no author)
committed
'missing_value': None,
'epsilon_failure_tolerance': 0.0,
'nonfinite_data_tolerance': 0.0,
(no author)
committed
'total_data_failure_tolerance': None,
'minimum_acceptable_squared_correlation_coefficient': None
(no author)
committed
}
def _clean_path(string_path) :
"""
Return a clean form of the path without any '.', '..', or '~'
"""
clean_path = None
if string_path is not None :
clean_path = os.path.abspath(os.path.expanduser(string_path))
return clean_path
def _parse_varnames(names, terms, epsilon=0.0, missing=None):
"""filter variable names and substitute default epsilon and missing settings if none provided
returns (variable name, epsilon, missing) triples
>>> _parse_varnames( ['foo','bar', 'baz', 'zoom', 'cat'], ['f..:0.5:-999', 'ba.*:0.001', 'c.t::-9999'], 1e-7 )
set([('foo', 0.5, -999.0), ('cat', 9.9999999999999995e-08, -9999.0), ('bar', 0.001, None), ('baz', 0.001, None)])
names - all the variable names in the file (ie. names that should be considered valid)
terms - variable selection terms given from the command line
epsilon - a default epsilon to be used for all variables that do not have a specific epsilon given
missing - a default fill value to be used for all variables that do not have a specific fill value given
"""
terms = [x.split(':') for x in terms]
terms = [(re.compile(x[0]).match,x[1:]) for x in terms]
def _cvt_em(eps=None, mis=None):
eps = float(eps) if eps else epsilon
mis = float(mis) if mis else missing
return eps, mis
sel = [ ((x,)+_cvt_em(*em)) for x in names for (t,em) in terms if t(x) ]
return set(sel)
(no author)
committed
def _setup_file(fileNameAndPath, prefexText='', allowWrite=False) :
'''
open the provided file name/path and extract information on the md5sum and last modification time
optional prefext text may be passed in for informational output formatting
'''
(no author)
committed
(no author)
committed
# some info to return
fileInfo = {'path': fileNameAndPath}
# check to see if the path exists to be opened
if not (os.path.exists(fileNameAndPath)) :
LOG.warn("Requested file " + fileNameAndPath + " could not be opened because it does not exist.")
return None, fileInfo
# open the file
(no author)
committed
LOG.info(prefexText + " opening " + fileNameAndPath)
fileNameAndPath = os.path.abspath(os.path.expanduser(fileNameAndPath))
LOG.debug("User provided path after normalization and user expansion: " + fileNameAndPath)
(no author)
committed
fileObject = io.open(fileNameAndPath, allowWrite=allowWrite)
# get the file md5sum
tempSubProcess = subprocess.Popen("md5sum \'" + fileNameAndPath + "\'", shell=True, stdout=subprocess.PIPE)
(no author)
committed
fileInfo['md5sum'] = tempSubProcess.communicate()[0].split()[0]
(no author)
committed
LOG.info(prefexText + " file md5sum: " + str(fileInfo['md5sum']))
# get the last modified stamp
statsForFile = os.stat(fileNameAndPath)
(no author)
committed
fileInfo['lastModifiedTime'] = datetime.datetime.fromtimestamp(statsForFile.st_mtime).ctime() # should time zone be forced?
(no author)
committed
LOG.info (prefexText + " file was last modified: " + fileInfo['lastModifiedTime'])
(no author)
committed
return fileObject, fileInfo
(no author)
committed
# TODO, make this generic for any number of file objects
(no author)
committed
def _check_file_names(fileAObject, fileBObject) :
"""
(no author)
committed
get information about the names in the two files and how they compare to each other
"""
# get information about the variables stored in the files
aNames = set(fileAObject())
bNames = set(fileBObject())
# get the variable names they have in common
commonNames = aNames.intersection(bNames)
# which names are unique to only one of the two files?
uniqueToANames = aNames - commonNames
uniqueToBNames = bNames - commonNames
(no author)
committed
return {'sharedVars': commonNames, 'uniqueToAVars': uniqueToANames, 'uniqueToBVars': uniqueToBNames}
def _resolve_names(fileAObject, fileBObject, defaultValues,
requestedNames, usingConfigFileFormat=False) :
"""
figure out which names the two files share and which are unique to each file, as well as which names
were requested and are in both sets
usingConfigFileFormat signals whether the requestedNames parameter will be in the form of the inputed
names from the command line or a more complex dictionary holding information about the names read in
from a configuration file
Note: if we ever need a variable with different names in file A and B to be comparable, this logic
will need to be changed.
"""
# look at the names present in the two files and compare them
nameComparison = _check_file_names(fileAObject, fileBObject)
# figure out which set should be selected based on the user requested names
(no author)
committed
fileCommonNames = nameComparison['sharedVars']
finalNames = {}
if (usingConfigFileFormat) :
# if the user didn't ask for any, try everything
(no author)
committed
if (len(requestedNames) is 0) :
(no author)
committed
finalFromCommandLine = _parse_varnames(fileCommonNames, ['.*'],
defaultValues['epsilon'], defaultValues['missing_value'])
for name, epsilon, missing in finalFromCommandLine :
# we'll use the variable's name as the display name for the time being
finalNames[name] = {}
# make sure we pick up any other controlling defaults
finalNames[name].update(defaultValues)
# but override the values that would have been determined by _parse_varnames
finalNames[name]['variable_name'] = name
finalNames[name]['epsilon'] = epsilon
(no author)
committed
# load the missing value if it was not provided
missing, missing_b = _get_missing_values_if_needed((fileAObject, fileBObject), name,
missing_value_A=missing, missing_value_B=missing)
finalNames[name]['missing_value'] = missing
finalNames[name]['missing_value_alt_in_b'] = missing_b
(no author)
committed
# otherwise just do the ones the user asked for
else :
(no author)
committed
# check each of the names the user asked for to see if it is either in the list of common names
# or, if the user asked for an alternate name mapping in file B, if the two mapped names are in
# files A and B respectively
(no author)
committed
for dispName in requestedNames :
(no author)
committed
(no author)
committed
# hang on to info on the current variable
currNameInfo = requestedNames[dispName]
(no author)
committed
(no author)
committed
# get the variable name
if 'variable_name' in currNameInfo :
name = currNameInfo['variable_name']
name_b = name
(no author)
committed
(no author)
committed
if ('alternate_name_in_B' in currNameInfo) :
name_b = currNameInfo['alternate_name_in_B']
if ( (name in fileCommonNames) and (not currNameInfo.has_key('alternate_name_in_B')) ) or \
( (currNameInfo.has_key('alternate_name_in_B') and
((name in nameComparison['uniqueToAVars']) or (name in fileCommonNames)) and
((name_b in nameComparison['uniqueToBVars']) or (name_b in fileCommonNames))) ) :
(no author)
committed
finalNames[dispName] = defaultValues.copy()
finalNames[dispName]['display_name'] = dispName
finalNames[dispName].update(currNameInfo)
# load the missing value if it was not provided
missing = finalNames[dispName]['missing_value']
if ('missing_value_alt_in_b' in finalNames[dispName]) :
missing_b = finalNames[dispName]['missing_value_alt_in_b']
else :
missing_b = missing
finalNames[dispName]['missing_value'], finalNames[dispName]['missing_value_alt_in_b'] = \
_get_missing_values_if_needed((fileAObject, fileBObject), name, name_b,
missing, missing_b)
(no author)
committed
else :
LOG.warn('No technical variable name was given for the entry described as "' + dispName + '". ' +
'Skipping this variable.')
(no author)
committed
else:
# format command line input similarly to the stuff from the config file
print (requestedNames)
(no author)
committed
finalFromCommandLine = _parse_varnames(fileCommonNames, requestedNames,
defaultValues['epsilon'], defaultValues['missing_value'])
for name, epsilon, missing in finalFromCommandLine :
## we'll use the variable's name as the display name for the time being
finalNames[name] = {}
# make sure we pick up any other controlling defaults
finalNames[name].update(defaultValues)
# but override the values that would have been determined by _parse_varnames
finalNames[name]['variable_name'] = name
finalNames[name]['epsilon'] = epsilon
(no author)
committed
# load the missing value if it was not provided
missing, missing_b = _get_missing_values_if_needed((fileAObject, fileBObject), name,
missing_value_A=missing, missing_value_B=missing)
(no author)
committed
finalNames[name]['missing_value'] = missing
finalNames[name]['missing_value_alt_in_b'] = missing_b
(no author)
committed
LOG.debug("Final selected set of variables to analyze:")
LOG.debug(str(finalNames))
(no author)
committed
return finalNames, nameComparison
def _get_missing_values_if_needed((fileA, fileB),
var_name, alt_var_name=None,
missing_value_A=None, missing_value_B=None) :
"""
get the missing values for two files based on the variable name(s)
if the alternate variable name is passed it will be used for the
second file in place of the primary variable name
"""
# if we don't have an alternate variable name, use the existing one
if alt_var_name is None :
alt_var_name = var_name
if missing_value_A is None :
missing_value_A = fileA.missing_value(var_name)
if missing_value_B is None :
missing_value_B = fileB.missing_value(alt_var_name)
return missing_value_A, missing_value_B
def _load_config_or_options(aPath, bPath, optionsSet, requestedVars = [ ]) :
(no author)
committed
"""
load information on how the user wants to run the command from a dictionary of options
and info on the files and variables to compare
note: the options may include a configuration file, which will override many of the
settings in the options
(no author)
committed
"""
# basic defaults for stuff we will need to return
runInfo = {}
runInfo.update(glance_setting_defaults) # get the default settings
(no author)
committed
if ('noLonLatVars' not in optionsSet) or (not optionsSet['noLonLatVars']):
runInfo.update(glance_lon_lat_defaults) # get the default lon/lat info
(no author)
committed
(no author)
committed
# by default, we don't have any particular variables to analyze
(no author)
committed
desiredVariables = { }
(no author)
committed
# use the built in default values, to start with
defaultsToUse = glance_analysis_defaults.copy()
requestedNames = None
# set up the paths, they can only come from the command line
paths = {}
(no author)
committed
paths['a'] = aPath
paths['b'] = bPath
paths['out'] = optionsSet['outputpath']
(no author)
committed
(no author)
committed
# the colocation selection can only come from the command line options
# TODO since this is really only coming from the user's selection of the call,
# this is ok for the moment, may want to reconsider later
runInfo['doColocate'] = ('doColocate' in optionsSet) and (optionsSet['doColocate'])
(no author)
committed
# check to see if the user wants to use a config file and if the path exists
requestedConfigFile = optionsSet['configFile']
(no author)
committed
usedConfigFile = False
(no author)
committed
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
if (requestedConfigFile is not None) and (requestedConfigFile != "") :
if not os.path.exists(requestedConfigFile) :
LOG.warn("Could not open config file: \"" + requestedConfigFile + "\"")
LOG.warn("Unable to continue analysis without selected configuration file.")
sys.exit(1)
else :
LOG.info ("Using Config File Settings")
# this will handle relative paths
requestedConfigFile = os.path.abspath(os.path.expanduser(requestedConfigFile))
# split out the file base name and the file path
(filePath, fileName) = os.path.split(requestedConfigFile)
splitFileName = fileName.split('.')
fileBaseName = fileName[:-3] # remove the '.py' from the end
# hang onto info about the config file for later
runInfo['config_file_name'] = fileName
runInfo['config_file_path'] = requestedConfigFile
# load the file
LOG.debug ('loading config file: ' + str(requestedConfigFile))
glanceRunConfig = imp.load_module(fileBaseName, file(requestedConfigFile, 'U'),
filePath, ('.py' , 'U', 1))
# this is an exception, since it is not advertised to the user we don't expect it to be in the file
# (at least not at the moment, it could be added later and if they did happen to put it in the
# config file, it would override this line)
runInfo['shouldIncludeReport'] = not optionsSet['imagesOnly'] if 'imagesOnly' in optionsSet else False
runInfo['noLonLatVars'] = optionsSet['noLonLatVars'] if 'noLonLatVars' in optionsSet else False
# get everything from the config file
runInfo.update(glanceRunConfig.settings)
if ('noLonLatVars' not in runInfo) or (not runInfo['noLonLatVars']) :
runInfo.update(glanceRunConfig.lat_lon_info) # get info on the lat/lon variables
# get any requested names
requestedNames = glanceRunConfig.setOfVariables.copy()
# user selected defaults, if they omit any we'll still be using the program defaults
defaultsToUse.update(glanceRunConfig.defaultValues)
usedConfigFile = True
(no author)
committed
# if we didn't get the info from the config file for some reason
# (the user didn't want to, we couldn't, etc...) get it from the command line options
if not usedConfigFile:
LOG.info ('Using Command Line Settings')
# so get everything from the options directly
runInfo['shouldIncludeReport'] = not optionsSet['imagesOnly']
runInfo['shouldIncludeImages'] = not optionsSet['htmlOnly']
runInfo['doFork'] = optionsSet['doFork']
(no author)
committed
# only record these if we are using lon/lat
runInfo['noLonLatVars'] = optionsSet['noLonLatVars']
if not runInfo['noLonLatVars'] :
runInfo['latitude'] = optionsSet['latitudeVar'] or runInfo['latitude']
runInfo['longitude'] = optionsSet['longitudeVar'] or runInfo['longitude']
runInfo['lon_lat_epsilon'] = optionsSet['lonlatepsilon']
(no author)
committed
# get any requested names from the command line
requestedNames = requestedVars or ['.*']
(no author)
committed
# user selected defaults
defaultsToUse['epsilon'] = optionsSet['epsilon']
defaultsToUse['missing_value'] = optionsSet['missing']
(no author)
committed
# note: there is no way to set the tolerances from the command line
(no author)
committed
(no author)
committed
return paths, runInfo, defaultsToUse, requestedNames, usedConfigFile
def _get_variable_can_end_program(fileObject, variableName, dataType, canEndProgram=True) :
"""
load a variable, exiting the program if there is an error
and canEndProgram is passed as True
TODO, instead of exiting, throw an exception
"""
dataToReturn = None
# get the data from the file
try :
dataToReturn = array(fileObject[variableName], dtype=dataType)
except CDFError :
LOG.warn ('Unable to retrieve ' + variableName + ' data. The variable name ' +
' may not exist in this file or an error may have occured while attempting to' +
' access the data.')
if canEndProgram :
LOG.warn ('Unable to continue analysis without ' + variableName + ' data. Aborting analysis.')
sys.exit(1)
return dataToReturn
def _get_and_analyze_lon_lat (fileObject,
latitudeVariableName, longitudeVariableName,
latitudeDataFilterFn=None, longitudeDataFilterFn=None) :
"""
get the longitude and latitude data from the given file, assuming they are in the given variable names
and analyze them to identify spacially invalid data (ie. data that would fall off the earth)
"""
(no author)
committed
# get the data from the file TODO, handle these exits out in the calling method?
# get the longitude
LOG.info ('longitude name: ' + longitudeVariableName)
# TODO, should this dtype be a float?
longitudeData = _get_variable_can_end_program(fileObject, longitudeVariableName, float)
# get the latitude
LOG.info ('latitude name: ' + latitudeVariableName)
# TODO, should this dtype be a float?
latitudeData = _get_variable_can_end_program(fileObject, latitudeVariableName, float)
# if we have filters, use them
if not (latitudeDataFilterFn is None) :
latitudeData = latitudeDataFilterFn(latitudeData)
LOG.debug ('latitude size after application of filter: ' + str(latitudeData.shape))
if not (longitudeDataFilterFn is None) :
longitudeData = longitudeDataFilterFn(longitudeData)
LOG.debug ('longitude size after application of filter: ' + str(longitudeData.shape))
# we are going to have issues with our comparision if they aren't the same shape
LOG.debug('latitude shape: ' + str(latitudeData.shape))
LOG.debug('longitude shape: ' + str(longitudeData.shape))
assert (latitudeData.shape == longitudeData.shape)
# build a mask of our spacially invalid data TODO, load actual valid range attributes?
invalidLatitude = (latitudeData < -90) | (latitudeData > 90) | ~isfinite(latitudeData)
invalidLongitude = (longitudeData < -180) | (longitudeData > 360) | ~isfinite(longitudeData)
spaciallyInvalidMask = invalidLatitude | invalidLongitude
# analyze our spacially invalid data
percentageOfSpaciallyInvalidPts, numberOfSpaciallyInvalidPts = _get_percentage_from_mask(spaciallyInvalidMask)
(no author)
committed
return longitudeData, latitudeData, spaciallyInvalidMask, {
'totNumInvPts': numberOfSpaciallyInvalidPts,
'perInvPts': percentageOfSpaciallyInvalidPts
}
def _get_percentage_from_mask(dataMask) :
"""
given a mask that marks the elements we want the percentage of as True (and is the size of our original data),
figure out what percentage of the whole they are
"""
numMarkedDataPts = sum(dataMask)
totalDataPts = dataMask.size
# avoid dividing by 0
if totalDataPts is 0 :
return 0.0, 0
percentage = 100.0 * float(numMarkedDataPts) / float(totalDataPts)
return percentage, numMarkedDataPts
def _check_lon_lat_equality(longitudeA, latitudeA,
longitudeB, latitudeB,
ignoreMaskA, ignoreMaskB,
llepsilon, doMakeImages,
outputPath) :
(no author)
committed
"""
check to make sure the longitude and latitude are equal everywhere that's not in the ignore masks
if they are not and doMakeImages was passed as True, generate appropriate figures to show where
return the number of points where they are not equal (0 would mean they're the same)
"""
# first of all, if the latitude and longitude are not the same shape, then things can't ever be "equal"
if (longitudeA.shape != longitudeB.shape) | (latitudeA.shape != latitudeB.shape) :
return None
lon_lat_not_equal_points_count = 0
lon_lat_not_equal_points_percent = 0.0
# get information about how the latitude and longitude differ
(no author)
committed
aDataObject = dataobj.DataObject(longitudeA, ignoreMask=ignoreMaskA)
bDataObject = dataobj.DataObject(longitudeB, ignoreMask=ignoreMaskB)
diffInfo = dataobj.DiffInfoObject(aDataObject, bDataObject, epsilonValue=llepsilon) #TODO, needs epsilon percent
#TODO, for the moment, unpack these values into local variables
(no author)
committed
longitudeDiff = diffInfo.diff_data_object.data
(no author)
committed
finiteLongitudeMask = diffInfo.diff_data_object.masks.valid_mask
lon_not_equal_mask = diffInfo.diff_data_object.masks.trouble_mask
(no author)
committed
aDataObject = dataobj.DataObject(latitudeA, ignoreMask=ignoreMaskA)
bDataObject = dataobj.DataObject(latitudeB, ignoreMask=ignoreMaskB)
diffInfo = dataobj.DiffInfoObject(aDataObject, bDataObject, epsilonValue=llepsilon) #TODO, needs epsilon percent
#TODO, for the moment, unpack these values into local variables
(no author)
committed
latitudeDiff = diffInfo.diff_data_object.data
(no author)
committed
finiteLatitudeMask = diffInfo.diff_data_object.masks.valid_mask
lat_not_equal_mask = diffInfo.diff_data_object.masks.trouble_mask
lon_lat_not_equal_mask = lon_not_equal_mask | lat_not_equal_mask
lon_lat_not_equal_points_count = sum(lon_lat_not_equal_mask)
(no author)
committed
lon_lat_not_equal_points_percent = (float(lon_lat_not_equal_points_count) / float(lon_lat_not_equal_mask.size)) * 100.0
# if we have unequal points, create user legible info about the problem
if (lon_lat_not_equal_points_count > 0) :
(no author)
committed
LOG.warn("Possible mismatch in values stored in file a and file b longitude and latitude values."
+ " Depending on the degree of mismatch, some data value comparisons may be "
+ "distorted or spacially nonsensical.")
# if we are making images, make two showing the invalid lons/lats
if (doMakeImages) :
if (len(longitudeA[~ignoreMaskA]) > 0) and (len(latitudeA[~ignoreMaskA]) > 0) :
plot.plot_and_save_spacial_trouble(longitudeA, latitudeA,
lon_lat_not_equal_mask,
ignoreMaskA,
"A", "Lon./Lat. Points Mismatched between A and B\n" +
"(Shown in A)",
"LonLatMismatch",
outputPath, True)
if (len(longitudeB[~ignoreMaskB]) > 0) and (len(latitudeB[~ignoreMaskB]) > 0) :
plot.plot_and_save_spacial_trouble(longitudeB, latitudeB,
lon_lat_not_equal_mask,
ignoreMaskB,
"B", "Lon./Lat. Points Mismatched between A and B\n" +
"(Shown in B)",
"LonLatMismatch",
outputPath, True)
# setup our return data
returnInfo = {}
returnInfo['lon_lat_not_equal_points_count'] = lon_lat_not_equal_points_count
returnInfo['lon_lat_not_equal_points_percent'] = lon_lat_not_equal_points_percent
return returnInfo
(no author)
committed
(no author)
committed
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def _compare_spatial_invalidity(invalid_in_a_mask, invalid_in_b_mask, spatial_info,
longitude_a, longitude_b, latitude_a, latitude_b,
do_include_images, output_path) :
"""
Given information about where the two files are spatially invalid, figure
out what invalidity they share and save information or plots for later use
also build a shared longitude/latitude based on A but also including valid
points in B
"""
# for convenience,
# make a combined mask
invalid_in_common_mask = invalid_in_a_mask | invalid_in_b_mask
# make a "common" latitude based on A
longitude_common = longitude_a
latitude_common = latitude_a
# compare our spacialy invalid info
spatial_info['perInvPtsInBoth'] = spatial_info['file A']['perInvPts']
# a default that will hold if the two files have the same spatially invalid pts
if not all(invalid_in_a_mask.ravel() == invalid_in_b_mask.ravel()) :
LOG.info("Mismatch in number of spatially invalid points. " +
"Files may not have corresponding data where expected.")
# figure out which points are only valid in one of the two files
valid_only_in_mask_a = (~invalid_in_a_mask) & invalid_in_b_mask
spatial_info['file A']['numInvPts'] = sum(valid_only_in_mask_a.ravel())
valid_only_in_mask_b = (~invalid_in_b_mask) & invalid_in_a_mask
spatial_info['file B']['numInvPts'] = sum(valid_only_in_mask_b.ravel())
# so how many do they have together?
spatial_info['perInvPtsInBoth'] = _get_percentage_from_mask(invalid_in_common_mask)[0]
# make a "clean" version of the lon/lat
longitude_common[valid_only_in_mask_a] = longitude_a[valid_only_in_mask_a]
longitude_common[valid_only_in_mask_b] = longitude_b[valid_only_in_mask_b]
latitude_common [valid_only_in_mask_a] = latitude_a [valid_only_in_mask_a]
latitude_common [valid_only_in_mask_b] = latitude_b [valid_only_in_mask_b]
# plot the points that are only valid one file and not the other
if ((spatial_info['file A']['numInvPts'] > 0) and (do_include_images) and
(len(longitude_a[~invalid_in_a_mask]) > 0) and (len(latitude_a[~invalid_in_a_mask]) > 0)) :
(no author)
committed
plot.plot_and_save_spacial_trouble(longitude_a, latitude_a,
valid_only_in_mask_a,
invalid_in_a_mask,
"A", "Points only valid in\nFile A\'s longitude & latitude",
(no author)
committed
"SpatialMismatch",
output_path, True)
if ((spatial_info['file B']['numInvPts'] > 0) and (do_include_images) and
(len(longitude_b[~invalid_in_b_mask]) > 0) and (len(latitude_b[~invalid_in_b_mask]) > 0)
) :
(no author)
committed
plot.plot_and_save_spacial_trouble(longitude_b, latitude_b,
valid_only_in_mask_b,
invalid_in_b_mask,
"B", "Points only valid in\nFile B\'s longitude & latitude",
(no author)
committed
"SpatialMismatch",
output_path, True)
return invalid_in_common_mask, spatial_info, longitude_common, latitude_common
(no author)
committed
def _handle_lon_lat_info (lon_lat_settings, a_file_object, b_file_object, output_path,
should_make_images=False, should_check_equality=True) :
"""
Manage loading and comparing longitude and latitude information for two files
Note: if the error message is returned as anything but None, something uncrecoverable
occured while trying to get the lon/lat info. TODO, replace this with a proper thrown exception
"""
# a place to save some general stats about our lon/lat data
(no author)
committed
spatialInfo = { }
# a place to put possible error messages TODO remove this in favor of an exception
error_msg = None
(no author)
committed
# if there is no lon/lat specified, stop now
(no author)
committed
if ( ('longitude' not in lon_lat_settings) or ('latitude' not in lon_lat_settings)
or (('noLonLatVars' in lon_lat_settings) and lon_lat_settings['noLonLatVars']) ) :
(no author)
committed
return { }, spatialInfo, error_msg
# if we should not be comparing against the logitude and latitude, stop now
print ('lon_lat_settings: ' + str(lon_lat_settings))
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
# figure out the names to be used for the longitude and latitude variables
a_longitude_name = lon_lat_settings['longitude']
a_latitude_name = lon_lat_settings['latitude']
b_longitude_name = a_longitude_name
b_latitude_name = a_latitude_name
# if we have alternate b names, use those for b instead
if ('longitude_alt_name_in_b' in lon_lat_settings) :
b_longitude_name = lon_lat_settings['longitude_alt_name_in_b']
if ( 'latitude_alt_name_in_b' in lon_lat_settings):
b_latitude_name = lon_lat_settings['latitude_alt_name_in_b']
# if we need to load our lon/lat from different files, open those files
# for the a file, do we have an alternate?
file_for_a_lon_lat = a_file_object
if ('a_lon_lat_from_alt_file' in lon_lat_settings) :
LOG.info("Loading alternate file (" + lon_lat_settings['a_lon_lat_from_alt_file'] + ") for file a longitude/latitude.")
file_for_a_lon_lat, _ = _setup_file(lon_lat_settings['a_lon_lat_from_alt_file'], "\t")
# for the b file, do we have an alternate?
file_for_b_lon_lat = b_file_object
if ('b_lon_lat_from_alt_file' in lon_lat_settings) :
LOG.info("Loading alternate file (" + lon_lat_settings['b_lon_lat_from_alt_file'] + ") for file b longitude/latitude.")
file_for_b_lon_lat, _ = _setup_file(lon_lat_settings['b_lon_lat_from_alt_file'], "\t")
# load our longitude and latitude and do some analysis on them
longitude_a, latitude_a, spaciallyInvalidMaskA, spatialInfo['file A'] = \
_get_and_analyze_lon_lat (file_for_a_lon_lat, a_latitude_name, a_longitude_name,
lon_lat_settings['data_filter_function_lat_in_a'], lon_lat_settings['data_filter_function_lon_in_a'])
longitude_b, latitude_b, spaciallyInvalidMaskB, spatialInfo['file B'] = \
_get_and_analyze_lon_lat (file_for_b_lon_lat, b_latitude_name, b_longitude_name,
lon_lat_settings['data_filter_function_lat_in_b'], lon_lat_settings['data_filter_function_lon_in_b'])
(no author)
committed
# if we need to, test the level of equality of the "valid" values in our lon/lat
if should_check_equality :
moreSpatialInfo = _check_lon_lat_equality(longitude_a, latitude_a, longitude_b, latitude_b,
spaciallyInvalidMaskA, spaciallyInvalidMaskB,
lon_lat_settings['lon_lat_epsilon'],
should_make_images, output_path)
# if we got the worst type of error result from the comparison this data is too dissimilar to continue
if moreSpatialInfo is None :
error_msg = ("Unable to reconcile sizes of longitude and latitude for variables "
+ str(lon_lat_settings['longitude']) + str(longitude_a.shape) + "/"
+ str(lon_lat_settings['latitude']) + str(latitude_a.shape) + " in file A and variables "
+ str(b_longitude_name) + str(longitude_b.shape) + "/"
+ str(b_latitude_name) + str(latitude_b.shape) + " in file B. Aborting attempt to compare files.")
return { }, { }, error_msg # things have gone wrong
# update our existing spatial information
spatialInfo.update(moreSpatialInfo)
(no author)
committed
# compare our spatially invalid info to see if the two files have invalid longitudes and latitudes in the same places
spaciallyInvalidMask, spatialInfo, longitude_common, latitude_common = \
_compare_spatial_invalidity(spaciallyInvalidMaskA, spaciallyInvalidMaskB, spatialInfo,
longitude_a, longitude_b, latitude_a, latitude_b,
should_make_images, output_path)
else:
spaciallyInvalidMask = None
longitude_common = None
latitude_common = None
(no author)
committed
return {'a': {"lon": longitude_a, "lat": latitude_a, "inv_mask": spaciallyInvalidMaskA},
'b': {"lon": longitude_b, "lat": latitude_b, "inv_mask": spaciallyInvalidMaskB},
'common': {"lon": longitude_common, "lat": latitude_common, "inv_mask": spaciallyInvalidMask} }, \
spatialInfo, error_msg
def _open_and_process_files (args, numFilesExpected):
"""
open files listed in the args and get information about the variables in them
"""
# get all the file names
fileNames = args[:numFilesExpected]
# open all the files & get their variable names
files = {}
commonNames = None
for fileName in fileNames:
LOG.info("opening %s" % fileName)
files[fileName] = {}
tempFileObject = (io.open(fileName))
files[fileName]['fileObject'] = tempFileObject
tempNames = set(tempFileObject())
LOG.debug ('variable names for ' + fileName + ': ' + str(tempNames))
files[fileName]['varNames'] = tempNames
if commonNames is None :
commonNames = tempNames
else :
commonNames = commonNames.intersection(tempNames)
files['commonVarNames'] = commonNames
return files
def _check_pass_or_fail(varRunInfo, variableStats, defaultValues) :
"""
Check whether the variable passed analysis, failed analysis, or
did not need to be quantitatively tested
also returns information about the fractions of failure
"""
passValues = [ ]
# test the epsilon value tolerance
# get the tolerance for failures compared to epsilon
epsilonTolerance = None
if ('epsilon_failure_tolerance' in varRunInfo) :
epsilonTolerance = varRunInfo['epsilon_failure_tolerance']
else :
epsilonTolerance = defaultValues['epsilon_failure_tolerance']
# did we fail based on the epsilon?
failed_fraction = variableStats['Numerical Comparison Statistics']['diff_outside_epsilon_fraction']
passed_epsilon = None
if epsilonTolerance is not None :
passed_epsilon = failed_fraction <= epsilonTolerance
passValues.append(passed_epsilon)
# test the nonfinite tolerance
# get the tolerance for failures in amount of nonfinite data (in spatially valid areas)
nonfiniteTolerance = None
if ('nonfinite_data_tolerance' in varRunInfo) :
nonfiniteTolerance = varRunInfo['nonfinite_data_tolerance']
else :
nonfiniteTolerance = defaultValues['nonfinite_data_tolerance']
# did we fail based on nonfinite data
non_finite_diff_fraction = variableStats['Finite Data Statistics']['finite_in_only_one_fraction']
passed_nonfinite = None
if nonfiniteTolerance is not None :
passed_nonfinite = non_finite_diff_fraction <= nonfiniteTolerance
passValues.append(passed_nonfinite)
(no author)
committed
# test if the total failed percentage is acceptable
# get the total percentage of failed data that is acceptable
totalFailTolerance = None
if ('total_data_failure_tolerance' in varRunInfo) :
totalFailTolerance = varRunInfo['total_data_failure_tolerance']
# did we fail based on all data failures?
passed_all_percentage = None
if totalFailTolerance is not None :
passed_all_percentage = (non_finite_diff_fraction + failed_fraction) <= totalFailTolerance
passValues.append(passed_all_percentage)
# test the r-squared correlation coefficent
(no author)
committed
# get the minimum acceptable r-squared correlation coefficient
min_r_squared = None
if ('minimum_acceptable_squared_correlation_coefficient' in varRunInfo) :
min_r_squared = varRunInfo['minimum_acceptable_squared_correlation_coefficient']
else :
min_r_squared = defaultValues['minimum_acceptable_squared_correlation_coefficient']
# did we fail based on the r-squared correlation coefficient?
r_squared_value = None
passed_r_squared = None
if min_r_squared is not None :
r_squared_value = variableStats['Numerical Comparison Statistics']['r-squared correlation']
passed_r_squared = r_squared_value >= min_r_squared
passValues.append(passed_r_squared)
# figure out the overall pass/fail result
didPass = None
for passValue in passValues :
(no author)
committed
# if passValue isn't none, we need to update didPass
if passValue is not None :
if didPass is not None :
didPass = passValue and didPass
else :
didPass = passValue
return didPass, failed_fraction, non_finite_diff_fraction, r_squared_value
def _get_run_identification_info( ) :
"""
get info about what user/machine/version of glance is being used
"""
info_to_return = { }
# get info on who's doing the run and where
info_to_return['machine'] = os.uname()[1] # the name of the machine running the report
info_to_return['user'] = os.getenv("LOGNAME") #os.getlogin() # the name of the user running the report
info_to_return['version'] = _get_glance_version_string()
return info_to_return
(no author)
committed
def _get_glance_version_string() :
version_num = pkg_resources.require('glance')[0].version
return "glance, version " + str(version_num)
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
def _get_name_info_for_variable(original_display_name, variable_run_info) :
"""
based on the variable run info, figure out the various names for
the variable and return them
the various names are:
technical_name - the name the variable is listed under in the file
b_variable_technical_name - the name the variable is listed under in the b file (may be the same as technical_name)
explanation_name - the more verbose name that will be shown to the user to identify the variable
original_display_name - the display name given by the user to describe the variable
"""
# figure out the various name related info
technical_name = variable_run_info['variable_name']
explanation_name = technical_name # for now, will add to this later
# if B has an alternate variable name, figure that out
b_variable_technical_name = technical_name
if 'alternate_name_in_B' in variable_run_info :
b_variable_technical_name = variable_run_info['alternate_name_in_B']
# put both names in our explanation
explanation_name = explanation_name + " / " + b_variable_technical_name
# show both the display and current explanation names if they differ
if not (original_display_name == explanation_name) :
explanation_name = original_display_name + ' (' + explanation_name + ')'
return technical_name, b_variable_technical_name, explanation_name
(no author)
committed
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
def _load_variable_data(fileObject, variableNameInFile,
dataFilter=None,
variableToFilterOn=None,
variableBasedFilter=None,
fileDescriptionForDisplay="file") :
"""
load data for a variable from a file
optionally filter the variable data based on a data filter or another variable
dataFilter must be in the form of (lambda data: some manipulation returning the new data)
variableBasedFilter must be in the form of (lambda data, filterData: some manipulation returning the new data))
"""
# get the data for the variable
LOG.debug("loading basic data for variable " + variableNameInFile + " from " + fileDescriptionForDisplay)
variableData = fileObject[variableNameInFile]
# apply the basic filter if there is one
if dataFilter is not None :
LOG.debug ("applying filter function to data from " + fileDescriptionForDisplay + " for variable " + variableNameInFile)
variableData = dataFilter(variableData)
# if we've got another variable to filter on, do that
if (variableToFilterOn is not None) and (variableBasedFilter is not None) :
LOG.debug ("filtering data from " + fileDescriptionForDisplay + " for variable " + variableNameInFile
+ " based on additional data from variable " + variableToFilterOn)
dataToFilterOn = fileObject[variableToFilterOn]
variableData = variableBasedFilter(variableData, dataToFilterOn)
return variableData
def _uri_needs_rsync(uri_to_check) :
"""
check if the uri requires an rsync in order to access the data
this will return some false positives if you phrase local uri's with the machine name
for ex. you are on the machine "lotus" and you use the path "rsync:://lotus/data/"
"""
return not os.path.exists(uri_to_check)
(no author)
committed
def _get_UV_info_from_magnitude_direction_info(fileObject, magnitudeName, directionName, invalidMask=None) :
"""
If there are magnitude and direction names, load that information and calculate the u and v that correspond to it
"""
# if we don't have magnitude and direction, we can't calculate the U and V values
if (magnitudeName is None) or (directionName is None) :
return None, None
# load the magnitude and direction data sets
magnitude = _load_variable_data(fileObject, magnitudeName)
direction = _load_variable_data(fileObject, directionName)
# convert the magnitude and direction data into u and v vectors
uData, vData = delta.convert_mag_dir_to_U_V_vector(magnitude, direction, invalidMask=invalidMask)
return uData, vData
def rsync_or_copy_files (list_of_files, target_directory='.', additionalFileNameSuffix='') :
(no author)
committed
"""
If the files in the list are remote, rsync them, otherwise, just copy
them to the target directory
"""
(no author)
committed
newPaths = [ ]
(no author)
committed
for file_uri in list_of_files :
(no author)
committed
fileName = os.path.split(file_uri)[1]
baseFile, ext = os.path.splitext(fileName)
newPath = os.path.join(target_directory, baseFile + additionalFileNameSuffix + ext)
newPaths.append(newPath)
(no author)
committed
if _uri_needs_rsync(file_uri) :
(no author)
committed
cmd = ['rsync', '-Cuav', file_uri, newPath]
(no author)
committed
else :
(no author)
committed
cmd = ['cp', os.path.abspath(file_uri), newPath]
(no author)
committed
LOG.debug('running ' + ' '.join(cmd))
sh(cmd)
(no author)
committed
return newPaths
(no author)
committed
def colocateToFile_library_call(a_path, b_path, var_list=[ ],
options_set={ },
# todo, this doesn't yet do anything
do_document=False,
# todo, the output channel does nothing at the moment
output_channel=sys.stdout) :
"""
this method handles the actual work of the colocateData command line tool
and can be used as a library routine.
TODO, properly document the options
"""
# load the user settings from either the command line or a user defined config file
pathsTemp, runInfo, defaultValues, requestedNames, usedConfigFile = _load_config_or_options(a_path, b_path,
options_set,
requestedVars = var_list)
# deal with the input and output files
if not (os.path.isdir(pathsTemp['out'])) :
LOG.info("Specified output directory (" + pathsTemp['out'] + ") does not exist.")
LOG.info("Creating output directory.")
os.makedirs(pathsTemp['out'])
(no author)
committed
# make copies of the input files for colocation TODO, fix paths
[pathsTemp['a'], pathsTemp['b']] = rsync_or_copy_files ([pathsTemp['a'], pathsTemp['b']],
target_directory=pathsTemp['out'],
additionalFileNameSuffix='-collocated')
(no author)
committed
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
# open the files
LOG.info("Processing File A:")
aFile, _ = _setup_file(pathsTemp['a'], "\t", allowWrite = True)
if aFile is None:
LOG.warn("Unable to continue with comparison because file a (" + pathsTemp['a'] + ") could not be opened.")
sys.exit(1)
LOG.info("Processing File B:")
bFile, _ = _setup_file(pathsTemp['b'], "\t", allowWrite = True)
if bFile is None:
LOG.warn("Unable to continue with comparison because file b (" + pathsTemp['b'] + ") could not be opened.")
sys.exit(1)
# get information about the names the user requested
finalNames, nameStats = _resolve_names(aFile, bFile,
defaultValues,
requestedNames, usedConfigFile)
# return for lon_lat_data variables will be in the form
#{"lon": longitude_data, "lat": latitude_data, "inv_mask": spaciallyInvalidMaskData}
# or { } if there is no lon/lat info
lon_lat_data, _, fatalErrorMsg = _handle_lon_lat_info (runInfo, aFile, bFile, pathsTemp['out'], should_check_equality=False)
if fatalErrorMsg is not None :
LOG.warn(fatalErrorMsg)
sys.exit(1)
# handle the longitude and latitude colocation
LOG.info("Colocating raw longitude and latitude information")
aColocationInfomation, bColocationInformation, totalNumberOfMatchedPoints = \
collocation.create_colocation_mapping_within_epsilon((lon_lat_data['a']['lon'], lon_lat_data['a']['lat']),
(lon_lat_data['b']['lon'], lon_lat_data['b']['lat']),
runInfo['lon_lat_epsilon'],
invalidAMask=lon_lat_data['a']['inv_mask'],
invalidBMask=lon_lat_data['b']['inv_mask'])
(no author)
committed
(colocatedLongitude, colocatedLatitude, (numMultipleMatchesInA, numMultipleMatchesInB)), \
(unmatchedALongitude, unmatchedALatitude), \
(unmatchedBLongitude, unmatchedBLatitude) = \
collocation.create_colocated_lonlat_with_lon_lat_colocation(aColocationInfomation, bColocationInformation,
totalNumberOfMatchedPoints,
lon_lat_data['a']['lon'], lon_lat_data['a']['lat'],
lon_lat_data['b']['lon'], lon_lat_data['b']['lat'])
(no author)
committed
# TODO, based on unmatched, issue warnings and record info in the file?
LOG.debug("colocated shape of the longitude: " + str(colocatedLongitude.shape))
LOG.debug("colocated shape of the latitude: " + str(colocatedLatitude.shape))
LOG.debug(str(numMultipleMatchesInA) + " lon/lat pairs contain A points used for multiple matches.")
LOG.debug(str(numMultipleMatchesInB) + " lon/lat pairs contain B points used for multiple matches.")
LOG.debug(str(len(unmatchedALatitude)) + " A lon/lat points could not be matched.")
LOG.debug(str(len(unmatchedBLatitude)) + " B lon/lat points could not be matched.")
# go through each of the possible variables in our files
# and do our colocation for whichever ones we can
for displayName in finalNames:
# pull out the information for this variable analysis run
varRunInfo = finalNames[displayName].copy()
# get the various names
technical_name, b_variable_technical_name, \
explanationName = _get_name_info_for_variable(displayName, varRunInfo)
print('analyzing: ' + explanationName + ')')
# load the variable data