Newer
Older
#!/usr/bin/env python
# encoding: utf-8
"""
Top-level routines to compare two files.
Created by rayg Apr 2009.
Copyright (c) 2009 University of Wisconsin SSEC. All rights reserved.
"""
(no author)
committed
#from pprint import pprint, pformat
Eva Schiffer
committed
import os, sys, logging, datetime, glob, re
from numpy import *
import numpy
import locale
locale.setlocale(locale.LC_ALL,'') # Initialize our locale
(no author)
committed
import matplotlib
(no author)
committed
# this is a hack to keep glance from needing pyqt unless you run the gui
(no author)
committed
if "gui" in sys.argv[1:] :
(no author)
committed
try :
(no author)
committed
import glance.gui_controller as gui_control
except ImportError :
print ("*** Unable to import PyQt5. Please install PyQt5 and add it to your PYTHONPATH in order to use the Glance GUI. ***")
(no author)
committed
raise
(no author)
committed
else :
matplotlib.use('Agg')
import glance.report as reportModule
import glance.stats as statistics
(no author)
committed
import glance.plot as plot
import glance.plotcreatefns as plotcreate
import glance.collocation as collocation
(no author)
committed
import glance.config_organizer as config_organizer
(no author)
committed
from glance.util import clean_path, rsync_or_copy_files, get_glance_version_string, get_run_identification_info, setup_dir_if_needed
from glance.load import get_UV_info_from_magnitude_direction_info, load_variable_data, open_and_process_files, handle_lon_lat_info, handle_lon_lat_info_for_one_file, ValueErrorStringToFloat
(no author)
committed
from glance.lonlat_util import VariableComparisonError
from glance.constants import *
from glance.gui_constants import A_CONST, B_CONST
(no author)
committed
LOG = logging.getLogger(__name__)
(no author)
committed
def _get_all_commands_help_string (commands_dict, ) :
"""
given the dictonary of commands, compose the string with brief information about all of them
"""
to_return = "Available commands in Glance:\n"
for command_name in commands_dict :
short_desc = commands_dict[command_name].__doc__.split('\n')[0]
to_return += "\t%-16s %s\n" % (command_name, short_desc)
return to_return
Eva Schiffer
committed
def _get_possible_files_from_dir (dir_path) :
"""given a path to a directory, return all the paths to files we think we can open in that directory
"""
# find all the appropriate files in a_path
possible_extensions = io.get_acceptable_file_extensions()
found_files = set()
for filepath in glob.iglob(os.path.join(dir_path, "**"), recursive=True, ):
ext_txt = filepath.split(".")[-1]
if ext_txt in possible_extensions:
found_files.add(filepath)
return found_files
Eva Schiffer
committed
def _match_files_from_dirs (a_path, b_path, strip_expressions=None, ) :
Eva Schiffer
committed
"""given two paths to directories, try to match up the files we can analyze in them
"""
Eva Schiffer
committed
if strip_expressions is None :
strip_expressions = [ ]
Eva Schiffer
committed
# find all the files in the a path we might be able to open
found_a_files = _get_possible_files_from_dir(a_path)
LOG.debug("Found " + str(len(found_a_files)) + " possible file(s) in the A directory: ")
Eva Schiffer
committed
for filepath in found_a_files :
LOG.debug(filepath)
Eva Schiffer
committed
"""
Eva Schiffer
committed
# TODO, when we get to python 3.9, we can use str.removeprefix but until then
def _remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return None
# test to see if there is a matching file in the b_path for each a_path file
file_pairs = set()
for a_filepath in found_a_files :
inner_path = _remove_prefix(a_filepath, a_path)[1:] # for some reason this leaves a prefix / on the inner_path, so we need to remove that
Eva Schiffer
committed
b_filepath = os.path.join(b_path, inner_path)
if os.path.exists(b_filepath) :
file_pairs.add((a_filepath, b_filepath,))
Eva Schiffer
committed
"""
# find all the files in the b path we might be able to open
found_b_files = _get_possible_files_from_dir(b_path)
LOG.debug("Found " + str(len(found_b_files)) + " possible file(s) in the B directory: ")
for filepath in found_a_files:
LOG.debug(filepath)
def strip_expressions_from_base (file_path, expressions,) :
clean_name = os.path.basename(file_path)
for expr in expressions :
clean_name = re.sub(expr, '', clean_name)
return clean_name
# try to pair up our files if possible
file_pairs = set()
for a_filepath in found_a_files :
clean_a = strip_expressions_from_base(a_filepath, strip_expressions,)
for b_filepath in found_b_files :
clean_b = strip_expressions_from_base(b_filepath, strip_expressions,)
if clean_a == clean_b :
file_pairs.add((a_filepath, b_filepath,))
Eva Schiffer
committed
return file_pairs
(no author)
committed
# TODO, I'd like to move this into a different file at some point
def _get_name_info_for_variable (original_display_name, variable_run_info) :
"""
based on the variable run info, figure out the various names for
the variable and return them
the various names are:
technical_name - the name the variable is listed under in the file
b_variable_technical_name - the name the variable is listed under in the b file (may be the same as technical_name)
explanation_name - the more verbose name that will be shown to the user to identify the variable
original_display_name - the display name given by the user to describe the variable
"""
# figure out the various name related info
(no author)
committed
technical_name = variable_run_info[VARIABLE_TECH_NAME_KEY]
explanation_name = technical_name # for now, will add to this later
# if B has an alternate variable name, figure that out
b_variable_technical_name = technical_name
(no author)
committed
if VARIABLE_B_TECH_NAME_KEY in variable_run_info :
b_variable_technical_name = variable_run_info[VARIABLE_B_TECH_NAME_KEY]
# put both names in our explanation
explanation_name = explanation_name + " / " + b_variable_technical_name
# show both the display and current explanation names if they differ
if not (original_display_name == explanation_name) :
explanation_name = original_display_name + ' (' + explanation_name + ')'
return technical_name, b_variable_technical_name, explanation_name
def colocateToFile_library_call(a_path, b_path, var_list=None,
options_set=None,
(no author)
committed
# todo, this doesn't yet do anything
do_document=False,
# todo, the output channel does nothing at the moment
output_channel=sys.stdout) :
"""
this method handles the actual work of the colocateData command line tool
and can be used as a library routine.
TODO, properly document the options
"""
# set some values for defaults
var_list = [ ] if var_list is None else var_list
options_set = { } if options_set is None else options_set
(no author)
committed
# load the user settings from either the command line or a user defined config file
(no author)
committed
pathsTemp, runInfo, defaultValues, requestedNames, usedConfigFile = config_organizer.load_config_or_options(a_path, b_path,
options_set,
requestedVars = var_list)
(no author)
committed
# deal with the input and output files
(no author)
committed
setup_dir_if_needed(pathsTemp[OUT_FILE_KEY], "output")
(no author)
committed
(no author)
committed
# make copies of the input files for colocation TODO, fix paths
(no author)
committed
[pathsTemp[A_FILE_KEY], pathsTemp[B_FILE_KEY]] = rsync_or_copy_files ([pathsTemp[A_FILE_KEY], pathsTemp[B_FILE_KEY]],
target_directory=pathsTemp[OUT_FILE_KEY],
additionalFileNameSuffix='-collocated')
(no author)
committed
# open the files
LOG.info("Processing File A:")
(no author)
committed
aFile = dataobj.FileInfo(pathsTemp[A_FILE_KEY], allowWrite=True)
(no author)
committed
if aFile is None:
LOG.error("Unable to continue with comparison because file a (" + pathsTemp[A_FILE_KEY] + ") could not be opened.")
(no author)
committed
sys.exit(1)
LOG.info("Processing File B:")
(no author)
committed
bFile = dataobj.FileInfo(pathsTemp[B_FILE_KEY], allowWrite=True)
(no author)
committed
if bFile is None:
LOG.error("Unable to continue with comparison because file b (" + pathsTemp[B_FILE_KEY] + ") could not be opened.")
(no author)
committed
sys.exit(1)
(no author)
committed
# get information about the names the user requested
(no author)
committed
finalNames, nameStats = config_organizer.resolve_names(aFile.file_object,
bFile.file_object,
defaultValues,
requestedNames,
usedConfigFile)
(no author)
committed
# return for lon_lat_data variables will be in the form
(no author)
committed
#{LON_KEY: longitude_data, LAT_KEY: latitude_data, INVALID_MASK_KEY: spaciallyInvalidMaskData}
(no author)
committed
# or { } if there is no lon/lat info
lon_lat_data = { }
try :
(no author)
committed
lon_lat_data, _ = handle_lon_lat_info (runInfo, aFile, bFile, pathsTemp[OUT_FILE_KEY], should_check_equality=False,
fullDPI=runInfo[DETAIL_DPI_KEY], thumbDPI=runInfo[THUMBNAIL_DPI_KEY])
LOG.error("Error while loading longitude or latitude: ")
LOG.error(str(vle))
exit(1)
except VariableComparisonError as vce :
LOG.error("Error while comparing longitude or latitude: ")
LOG.error(str(vce))
exit(1)
(no author)
committed
# handle the longitude and latitude colocation
LOG.info("Colocating raw longitude and latitude information")
aColocationInfomation, bColocationInformation, totalNumberOfMatchedPoints = \
collocation.create_colocation_mapping_within_epsilon(lon_lat_data[A_FILE_KEY][LON_KEY], lon_lat_data[A_FILE_KEY][LAT_KEY],
lon_lat_data[B_FILE_KEY][LON_KEY], lon_lat_data[B_FILE_KEY][LAT_KEY],
(no author)
committed
runInfo[LON_LAT_EPSILON_KEY],
invalidAMask=lon_lat_data[A_FILE_KEY][INVALID_MASK_KEY],
invalidBMask=lon_lat_data[B_FILE_KEY][INVALID_MASK_KEY])
(no author)
committed
(colocatedLongitude, colocatedLatitude, (numMultipleMatchesInA, numMultipleMatchesInB)), \
(unmatchedALongitude, unmatchedALatitude), \
(unmatchedBLongitude, unmatchedBLatitude) = \
collocation.create_colocated_lonlat_with_lon_lat_colocation(aColocationInfomation, bColocationInformation,
totalNumberOfMatchedPoints,
(no author)
committed
lon_lat_data[A_FILE_KEY][LON_KEY], lon_lat_data[A_FILE_KEY][LAT_KEY],
lon_lat_data[B_FILE_KEY][LON_KEY], lon_lat_data[B_FILE_KEY][LAT_KEY])
(no author)
committed
# TODO, based on unmatched, issue warnings and record info in the file?
LOG.debug("colocated shape of the longitude: " + str(colocatedLongitude.shape))
LOG.debug("colocated shape of the latitude: " + str(colocatedLatitude.shape))
LOG.debug(str(numMultipleMatchesInA) + " lon/lat pairs contain A points used for multiple matches.")
LOG.debug(str(numMultipleMatchesInB) + " lon/lat pairs contain B points used for multiple matches.")
LOG.debug(str(len(unmatchedALatitude)) + " A lon/lat points could not be matched.")
LOG.debug(str(len(unmatchedBLatitude)) + " B lon/lat points could not be matched.")
# go through each of the possible variables in our files
# and do our colocation for whichever ones we can
for displayName in finalNames:
# pull out the information for this variable analysis run
varRunInfo = finalNames[displayName].copy()
# get the various names
technical_name, b_variable_technical_name, \
explanationName = _get_name_info_for_variable(displayName, varRunInfo)
LOG.info('analyzing: ' + explanationName + ')')
(no author)
committed
# load the variable data
(no author)
committed
aData = load_variable_data(aFile.file_object, technical_name,
dataFilter = varRunInfo[FILTER_FUNCTION_A_KEY] if FILTER_FUNCTION_A_KEY in varRunInfo else None,
variableToFilterOn = varRunInfo[VAR_FILTER_NAME_A_KEY] if VAR_FILTER_NAME_A_KEY in varRunInfo else None,
variableBasedFilter = varRunInfo[VAR_FILTER_FUNCTION_A_KEY] if VAR_FILTER_FUNCTION_A_KEY in varRunInfo else None,
altVariableFileObject = dataobj.FileInfo(varRunInfo[VAR_FILTER_ALT_FILE_A_KEY]).file_object if VAR_FILTER_ALT_FILE_A_KEY in varRunInfo else None,
fileDescriptionForDisplay = "file A")
bData = load_variable_data(bFile.file_object, b_variable_technical_name,
dataFilter = varRunInfo[FILTER_FUNCTION_B_KEY] if FILTER_FUNCTION_B_KEY in varRunInfo else None,
variableToFilterOn = varRunInfo[VAR_FILTER_NAME_B_KEY] if VAR_FILTER_NAME_B_KEY in varRunInfo else None,
variableBasedFilter = varRunInfo[VAR_FILTER_FUNCTION_B_KEY] if VAR_FILTER_FUNCTION_B_KEY in varRunInfo else None,
altVariableFileObject = dataobj.FileInfo(varRunInfo[VAR_FILTER_ALT_FILE_B_KEY]).file_object if VAR_FILTER_ALT_FILE_B_KEY in varRunInfo else None,
fileDescriptionForDisplay = "file B")
(no author)
committed
# colocate the data for this variable if we have longitude/latitude data
if (len(lon_lat_data) > 0) and runInfo[DO_COLOCATION_KEY] :
(no author)
committed
(no author)
committed
# figure out the invalid masks
(no author)
committed
invalidA = lon_lat_data[A_FILE_KEY][INVALID_MASK_KEY] | (aData == varRunInfo[FILL_VALUE_KEY])
invalidB = lon_lat_data[B_FILE_KEY][INVALID_MASK_KEY] | (bData == varRunInfo[FILL_VALUE_ALT_IN_B_KEY])
(no author)
committed
(no author)
committed
# match up our points in A and B
(aData, bData, (numberOfMultipleMatchesInA, numberOfMultipleMatchesInB)), \
(aUnmatchedData, unmatchedALongitude, unmatchedALatitude), \
(bUnmatchedData, unmatchedBLongitude, unmatchedBLatitude) = \
collocation.create_colocated_data_with_lon_lat_colocation(aColocationInfomation, bColocationInformation,
colocatedLongitude, colocatedLatitude,
aData, bData,
(no author)
committed
missingData=varRunInfo[FILL_VALUE_KEY],
altMissingDataInB=varRunInfo[FILL_VALUE_ALT_IN_B_KEY],
invalidAMask=invalidA,
invalidBMask=invalidB)
(no author)
committed
LOG.debug(str(numberOfMultipleMatchesInA) + " data pairs contain A data points used for multiple matches.")
LOG.debug(str(numberOfMultipleMatchesInB) + " data pairs contain B data points used for multiple matches.")
LOG.debug(str(len(aUnmatchedData)) + " A data points could not be matched.")
LOG.debug(str(len(bUnmatchedData)) + " B data points could not be matched.")
# save the colocated data information in the output files
(no author)
committed
# all the a file information
variableObjTemp = aFile.file_object.create_new_variable( technical_name + '-colocated', # TODO, how should this suffix be handled?
missingvalue = varRunInfo[FILL_VALUE_KEY] if FILL_VALUE_KEY in varRunInfo else None,
data = aData,
variabletocopyattributesfrom = technical_name)
aFile.file_object.add_attribute_data_to_variable(technical_name + '-colocated', 'number of multiple matches',
numberOfMultipleMatchesInA, variableObject=variableObjTemp,)
aFile.file_object.add_attribute_data_to_variable(technical_name + '-colocated', 'number of unmatched points',
len(aUnmatchedData), variableObject=variableObjTemp,)
(no author)
committed
# all the b file information
variableObjTemp = bFile.file_object.create_new_variable( b_variable_technical_name + '-colocated', # TODO, how should this suffix be handled?
missingvalue = varRunInfo[FILL_VALUE_ALT_IN_B_KEY] if FILL_VALUE_ALT_IN_B_KEY in varRunInfo else None,
data = bData,
variabletocopyattributesfrom = b_variable_technical_name)
bFile.file_object.add_attribute_data_to_variable(b_variable_technical_name + '-colocated', 'number of multiple matches',
numberOfMultipleMatchesInB, variableObject=variableObjTemp,)
bFile.file_object.add_attribute_data_to_variable(b_variable_technical_name + '-colocated', 'number of unmatched points',
len(bUnmatchedData), variableObject=variableObjTemp,)
(no author)
committed
# Future, do we want any any additional statistics?
(no author)
committed
else :
LOG.debug(explanationName + " was not selected for colocation and will be ignored.")
# the end of the loop to examine all the variables
# we're done with the files, so close them up
aFile.file_object.close()
bFile.file_object.close()
(no author)
committed
return
def reportGen_raw_data_simple_call (aData, bData, variableDisplayName,
epsilon=0.0, missingValue=None,
useThreads=True, includeImages=True,
outputDirectory="./") :
"""
Generate a report for a single variable given raw data and
some minimal control settings. This method will also generate
images for the report if includeImages is True.
"""
LOG.info("Setting up basic information")
aData = array(aData)
bData = array(bData)
# set up the run info
(no author)
committed
runInfo = config_organizer.get_simple_options_dict( )
runInfo[DO_MAKE_IMAGES_KEY] = True
runInfo[DO_MAKE_REPORT_KEY] = True
runInfo[DO_MAKE_FORKS_KEY] = False
runInfo[DO_CLEAR_MEM_THREADED_KEY] = useThreads
# set up the variable specific info
(no author)
committed
variableSettings = config_organizer.get_simple_variable_defaults( )
variableSettings[EPSILON_KEY] = epsilon
variableSettings[FILL_VALUE_KEY] = missingValue
variableSettings[FILL_VALUE_ALT_IN_B_KEY] = missingValue
variableSettings[VARIABLE_TECH_NAME_KEY] = variableDisplayName
# hang onto identification info
(no author)
committed
runInfo[MACHINE_INFO_KEY], runInfo[USER_INFO_KEY], runInfo[GLANCE_VERSION_INFO_KEY] = get_run_identification_info()
# deal with the output directories
(no author)
committed
outputDirectory = clean_path(outputDirectory)
setup_dir_if_needed(outputDirectory, "output")
LOG.info("Analyzing " + variableDisplayName)
# if things are the same shape, analyze them and make our images
if aData.shape == bData.shape :
# setup some values in the variable settings for use in the report
(no author)
committed
variableSettings[VARIABLE_DIRECTORY_KEY] = outputDirectory
variableSettings[VAR_REPORT_PATH_KEY] = quote(os.path.join(variableDisplayName, 'index.html'))
variableSettings[DOCUMENTATION_PATH_KEY] = quote(os.path.join(outputDirectory, './' + 'doc.html'))
# calculate the variable statistics
(no author)
committed
variable_stats = statistics.StatisticalAnalysis.withSimpleData(aData, bData,
missingValue, missingValue,
None, None,
(no author)
committed
epsilon, None)
# add a little additional info
(no author)
committed
variableSettings[TIME_INFO_KEY] = datetime.datetime.ctime(datetime.datetime.now()) # TODO, move this to util?
didPass, epsilon_failed_fraction, \
(no author)
committed
non_finite_fail_fraction, r_squared_value \
= variable_stats.check_pass_or_fail(epsilon_failure_tolerance=variableSettings[EPSILON_FAIL_TOLERANCE_KEY] if EPSILON_FAIL_TOLERANCE_KEY in variableSettings else numpy.nan,
epsilon_failure_tolerance_default=runInfo[EPSILON_FAIL_TOLERANCE_KEY],
non_finite_data_tolerance=variableSettings[NONFINITE_TOLERANCE_KEY] if NONFINITE_TOLERANCE_KEY in variableSettings else numpy.nan,
non_finite_data_tolerance_default=runInfo[NONFINITE_TOLERANCE_KEY],
total_data_failure_tolerance=variableSettings[TOTAL_FAIL_TOLERANCE_KEY] if TOTAL_FAIL_TOLERANCE_KEY in variableSettings else numpy.nan,
total_data_failure_tolerance_default=runInfo[TOTAL_FAIL_TOLERANCE_KEY],
min_acceptable_r_squared=variableSettings[MIN_OK_R_SQUARED_COEFF_KEY] if MIN_OK_R_SQUARED_COEFF_KEY in variableSettings else numpy.nan,
min_acceptable_r_squared_default=runInfo[MIN_OK_R_SQUARED_COEFF_KEY],
)
variableSettings[DID_VARIABLE_PASS_KEY] = didPass
# to hold the names of any images created
image_names = {
(no author)
committed
ORIGINAL_IMAGES_KEY: [ ],
COMPARED_IMAGES_KEY: [ ]
}
# if we need the images, make them now
if includeImages :
LOG.info("Plotting images for " + variableDisplayName)
# the various functions that will create our plots
plotFunctionGenerationObjects = [plotcreate.BasicComparisonPlotsFunctionFactory(), # the function to make the histogram and scatter plot
plotcreate.IMShowPlotFunctionFactory(), ] # the function to do basic imshow images
# plot our lon/lat related info
(no author)
committed
image_names[ORIGINAL_IMAGES_KEY], image_names[COMPARED_IMAGES_KEY] = \
plot.plot_and_save_comparison_figures \
(aData, bData,
plotFunctionGenerationObjects,
outputDirectory,
variableDisplayName,
epsilon,
missingValue,
lonLatDataDict=None,
doFork=False,
shouldClearMemoryWithThreads=useThreads,
shouldUseSharedRangeForOriginal=True)
LOG.info("\tfinished creating figures for: " + variableDisplayName)
# create a temporary files object
(no author)
committed
files = {
A_FILE_TITLE_KEY: {
PATH_KEY: "raw data input",
LAST_MODIFIED_KEY: "unknown",
MD5SUM_KEY: "n/a"
},
B_FILE_TITLE_KEY: {
PATH_KEY: "raw data input",
LAST_MODIFIED_KEY: "unknown",
MD5SUM_KEY: "n/a"
}
}
# create our report
LOG.info ('Generating report for: ' + variableDisplayName)
reportModule.generate_and_save_variable_report(files,
variableSettings, runInfo,
(no author)
committed
variable_stats.dictionary_form(),
{ },
image_names,
outputDirectory, "index.html")
# make the glossary page
reportModule.generate_and_save_doc_page(statistics.StatisticalAnalysis.doc_strings(), outputDirectory)
else :
message = (variableDisplayName + ' ' +
'could not be compared. This may be because the data for this variable does not match in shape ' +
'between the two files (file A data shape: ' + str(aData.shape) + '; file B data shape: '
+ str(bData.shape) + ').')
LOG.warn(message)
def inspect_library_call (a_path, var_list=None,
options_set=None,
# todo, this doesn't yet do anything
do_document=False,
# todo, the output channel does nothing at the moment
output_channel=sys.stdout) :
(no author)
committed
"""
this method handles the actual work of the inspectReport command line tool
(no author)
committed
and can also be used as a library routine, pass in the slightly parsed
command line input, or call it as a library function... be sure to fill
out the options
TODO at the moment the options are very brittle and need to be fully filled
or this method will fail badly (note: the addition of some glance defaults
has minimized the problem, but you still need to be careful when dealing with
optional boolean values. this needs more work.)
"""
# set some values for defaults
var_list = [ ] if var_list is None else var_list
options_set = { } if options_set is None else options_set
(no author)
committed
# load the user settings from either the command line or a user defined config file
(no author)
committed
pathsTemp, runInfo, defaultValues, requestedNames, usedConfigFile = config_organizer.load_config_or_options(a_path, None, # there is no B path
options_set,
requestedVars = var_list)
(no author)
committed
# information for debugging purposes
(no author)
committed
LOG.debug('paths: ' + str(pathsTemp))
LOG.debug('defaults: ' + str(defaultValues))
LOG.debug('run information: ' + str(runInfo))
# if we wouldn't generate anything, just stop now
(no author)
committed
if (not runInfo[DO_MAKE_IMAGES_KEY]) and (not runInfo[DO_MAKE_REPORT_KEY]) :
(no author)
committed
LOG.warn("User selection of no image generation and no report generation will result in no " +
"content being generated. Aborting generation function.")
return
# hang onto info to identify who/what/when/where/etc. the report is being run by/for
(no author)
committed
runInfo[MACHINE_INFO_KEY], runInfo[USER_INFO_KEY], runInfo[GLANCE_VERSION_INFO_KEY] = get_run_identification_info()
(no author)
committed
# deal with the input and output files
(no author)
committed
setup_dir_if_needed(pathsTemp[OUT_FILE_KEY], "output")
(no author)
committed
# open the file
files = {}
LOG.info("Processing File A:")
(no author)
committed
aFile = dataobj.FileInfo(pathsTemp[A_FILE_KEY])
files[A_FILE_TITLE_KEY] = aFile.get_old_info_dictionary() # FUTURE move to actually using the file object to generate the report
(no author)
committed
if aFile.file_object is None:
LOG.error("Unable to continue with examination because file (" + pathsTemp[A_FILE_KEY] + ") could not be opened.")
(no author)
committed
sys.exit(1)
(no author)
committed
# get information about the names the user requested
(no author)
committed
finalNames, nameStats[POSSIBLE_NAMES_KEY] = config_organizer.resolve_names_one_file(aFile.file_object,
defaultValues, # TODO, might need a different default set
requestedNames,
usedConfigFile)
# get info on the global attributes
globalAttrInfo = {}
globalAttrInfo[A_FILE_TITLE_KEY] = aFile.file_object.get_global_attributes()
(no author)
committed
LOG.debug("output dir: " + str(pathsTemp[OUT_FILE_KEY]))
(no author)
committed
# return for lon_lat_data variables will be in the form
(no author)
committed
#{LON_KEY: longitude_data, LAT_KEY: latitude_data, INVALID_MASK_KEY: spaciallyInvalidMaskData}
(no author)
committed
# or { } if there is no lon/lat info
lon_lat_data = { }
spatialInfo = { }
try :
(no author)
committed
lon_lat_data, spatialInfo = handle_lon_lat_info_for_one_file (runInfo, aFile)
LOG.error("Error while loading longitude or latitude: ")
LOG.error(str(vle))
(no author)
committed
exit(1)
# if there is an approved lon/lat shape, hang on to that for future variable data shape checks
(no author)
committed
good_shape_from_lon_lat = None
(no author)
committed
good_shape_from_lon_lat = lon_lat_data[LON_KEY].shape
(no author)
committed
# go through each of the possible variables in our files
# and make a report section with images for whichever ones we can
variableInspections = { }
(no author)
committed
for displayName in finalNames:
# pull out the information for this variable analysis run
varRunInfo = finalNames[displayName].copy()
# get the various names
technical_name, _, explanationName = _get_name_info_for_variable(displayName, varRunInfo)
(no author)
committed
(no author)
committed
# make sure that it's possible to load this variable
if not(aFile.file_object.is_loadable_type(technical_name)) :
LOG.warn(displayName + " is of a type that cannot be loaded using current file handling libraries included with Glance." +
" Skipping " + displayName + ".")
continue
(no author)
committed
LOG.info('analyzing: ' + explanationName)
Eva Schiffer
committed
# load the variable data if we can
try :
aData = load_variable_data(aFile.file_object, technical_name,
dataFilter = varRunInfo[FILTER_FUNCTION_A_KEY] if FILTER_FUNCTION_A_KEY in varRunInfo else None,
variableToFilterOn = varRunInfo[VAR_FILTER_NAME_A_KEY] if VAR_FILTER_NAME_A_KEY in varRunInfo else None,
variableBasedFilter = varRunInfo[VAR_FILTER_FUNCTION_A_KEY] if VAR_FILTER_FUNCTION_A_KEY in varRunInfo else None,
altVariableFileObject = dataobj.FileInfo(varRunInfo[VAR_FILTER_ALT_FILE_A_KEY]).file_object if VAR_FILTER_ALT_FILE_A_KEY in varRunInfo else None,
fileDescriptionForDisplay = "file A")
except Exception as e :
LOG.warn(displayName + " data could not be loaded. This variable will not be included in the output report. " +
"The following error was encountered while trying to load this variable:\n" + str(e))
continue
# get variable attribute information for this variable
attributeInfo = { }
attributeInfo[A_FILE_TITLE_KEY] = aFile.file_object.get_variable_attributes(technical_name)
(no author)
committed
# pre-check if this data should be plotted and if it should be compared to the longitude and latitude
(no author)
committed
include_images_for_this_variable = ((not(DO_MAKE_IMAGES_KEY in runInfo)) or (runInfo[DO_MAKE_IMAGES_KEY]))
if DO_MAKE_IMAGES_KEY in varRunInfo :
include_images_for_this_variable = varRunInfo[DO_MAKE_IMAGES_KEY]
do_not_test_with_lon_lat = (not include_images_for_this_variable) or (len(lon_lat_data) <= 0)
(no author)
committed
# handle vector data
(no author)
committed
isVectorData = (MAGNITUDE_VAR_NAME_KEY in varRunInfo) and (DIRECTION_VAR_NAME_KEY in varRunInfo)
(no author)
committed
# check if this data can be examined
# (don't compare lon/lat sizes if we won't be plotting)
if ( do_not_test_with_lon_lat or (aData.shape == good_shape_from_lon_lat) ) :
(no author)
committed
# check to see if there is a directory to put information about this variable in,
# if not then create it
(no author)
committed
variableDir = os.path.join(pathsTemp[OUT_FILE_KEY], './' + displayName)
varRunInfo[VARIABLE_DIRECTORY_KEY] = variableDir
varRunInfo[VAR_REPORT_PATH_KEY] = quote(os.path.join(displayName, 'index.html'))
LOG.debug ("Directory selected for variable information: " + varRunInfo[VAR_REPORT_PATH_KEY])
setup_dir_if_needed(variableDir, "variable")
(no author)
committed
# form the doc and config paths relative to where the variable is
upwardPath = './'
for number in range(len(displayName.split('/'))) : # TODO this is not general to windows
upwardPath = os.path.join(upwardPath, '../')
(no author)
committed
varRunInfo[DOCUMENTATION_PATH_KEY] = quote(os.path.join(upwardPath, 'doc.html'))
if CONFIG_FILE_NAME_KEY in runInfo :
varRunInfo[CONFIG_FILE_PATH_KEY] = quote(os.path.join(upwardPath, runInfo[CONFIG_FILE_NAME_KEY]))
(no author)
committed
# figure out the masks we want, and then do our statistical analysis
(no author)
committed
mask_a_to_use = None if do_not_test_with_lon_lat else lon_lat_data[INVALID_MASK_KEY]
variable_stats = statistics.StatisticalInspectionAnalysis.withSimpleData(aData,
(no author)
committed
missingValue=varRunInfo[FILL_VALUE_KEY],
ignoreMask=mask_a_to_use).dictionary_form()
(no author)
committed
# add a little additional info to our variable run info before we squirrel it away
(no author)
committed
varRunInfo[TIME_INFO_KEY] = datetime.datetime.ctime(datetime.datetime.now()) # todo is this needed?
(no author)
committed
# to hold the names of any images created
image_names = {
(no author)
committed
ORIGINAL_IMAGES_KEY: [ ],
COMPARED_IMAGES_KEY: [ ]
(no author)
committed
}
# create the images for this variable
if (include_images_for_this_variable) :
plotFunctionGenerationObjects = [ ]
# we are always going to want to draw a basic histogram of the data values to tell which
# occur most frequently
plotFunctionGenerationObjects.append(plotcreate.DataHistogramPlotFunctionFactory())
# if it's vector data with longitude and latitude, quiver plot it on the Earth
if isVectorData and (not do_not_test_with_lon_lat) :
plotFunctionGenerationObjects.append(plotcreate.InspectMappedQuiverPlotFunctionFactory())
# if the data is one dimensional we can plot it as lines
plotFunctionGenerationObjects.append(plotcreate.InspectLinePlotsFunctionFactory())
# if the data is 2D we have some options based on the type of data
(no author)
committed
# if the data is not mapped to a longitude and latitude, just show it as an image
if (do_not_test_with_lon_lat) :
plotFunctionGenerationObjects.append(plotcreate.InspectIMShowPlotFunctionFactory())
(no author)
committed
# if it's 2D and mapped to the Earth, contour plot it on the earth
else :
plotFunctionGenerationObjects.append(plotcreate.InspectMappedContourPlotFunctionFactory())
(no author)
committed
# if there's magnitude and direction data, figure out the u and v, otherwise these will be None
(no author)
committed
aUData, aVData = get_UV_info_from_magnitude_direction_info (aFile.file_object,
varRunInfo[MAGNITUDE_VAR_NAME_KEY] if (MAGNITUDE_VAR_NAME_KEY in varRunInfo) else None,
varRunInfo[DIRECTION_VAR_NAME_KEY] if (DIRECTION_VAR_NAME_KEY in varRunInfo) else None,
lon_lat_data[INVALID_MASK_KEY] if (INVALID_MASK_KEY in lon_lat_data) else None )
(no author)
committed
(no author)
committed
image_names[ORIGINAL_IMAGES_KEY], image_names[COMPARED_IMAGES_KEY] = \
(no author)
committed
plot.plot_and_save_comparison_figures \
(aData, None, # there is no b data
(no author)
committed
plotFunctionGenerationObjects,
(no author)
committed
varRunInfo[VARIABLE_DIRECTORY_KEY],
(no author)
committed
displayName,
None, # there is no epsilon
(no author)
committed
varRunInfo[FILL_VALUE_KEY],
(no author)
committed
lonLatDataDict=lon_lat_data,
(no author)
committed
dataRanges = varRunInfo[DISPLAY_RANGES_KEY] if DISPLAY_RANGES_KEY in varRunInfo else None,
dataRangeNames = varRunInfo[DISPLAY_RANGE_NAMES_KEY] if DISPLAY_RANGE_NAMES_KEY in varRunInfo else None,
dataColors = varRunInfo[DISPLAY_RANGE_COLORS_KEY] if DISPLAY_RANGE_COLORS_KEY in varRunInfo else None,
doFork=runInfo[DO_MAKE_FORKS_KEY],
shouldClearMemoryWithThreads=runInfo[DO_CLEAR_MEM_THREADED_KEY],
shouldUseSharedRangeForOriginal=runInfo[USE_SHARED_ORIG_RANGE_KEY],
(no author)
committed
doPlotSettingsDict = varRunInfo,
aUData=aUData, aVData=aVData,
(no author)
committed
fullDPI= runInfo[DETAIL_DPI_KEY],
thumbDPI= runInfo[THUMBNAIL_DPI_KEY],
units_a= varRunInfo[VAR_UNITS_A_KEY] if VAR_UNITS_A_KEY in varRunInfo else None,
(no author)
committed
useBData=False,
(no author)
committed
histRange=varRunInfo[HISTOGRAM_RANGE_KEY] if HISTOGRAM_RANGE_KEY in varRunInfo else None)
(no author)
committed
LOG.info("\tfinished creating figures for: " + explanationName)
# create the report page for this variable
(no author)
committed
if (runInfo[DO_MAKE_REPORT_KEY]) :
(no author)
committed
# hang on to some info on our variable
variableInspections[displayName] = {
(no author)
committed
VARIABLE_RUN_INFO_KEY: varRunInfo
(no author)
committed
}
LOG.info ('\tgenerating report for: ' + explanationName)
reportModule.generate_and_save_inspect_variable_report(files, varRunInfo, runInfo,
variable_stats, spatialInfo, image_names,
varRunInfo[VARIABLE_DIRECTORY_KEY], "index.html",
variableAttrs=attributeInfo,)
(no author)
committed
# if we can't do anything with the variable, we should tell the user
(no author)
committed
else :
message = (explanationName + ' could not be examined. '
+ 'This may be because the data for this variable (data shape: '
+ str(aData.shape) + ') does not match the shape of the selected '
(no author)
committed
+ 'longitude ' + str(good_shape_from_lon_lat) + ' and '
+ 'latitude ' + str(good_shape_from_lon_lat) + ' variables.')
LOG.warn(message)
# the end of the loop to examine all the variables
# generate our general report pages once we've analyzed all the variables
(no author)
committed
if (runInfo[DO_MAKE_REPORT_KEY]) :
(no author)
committed
# get the current time
(no author)
committed
runInfo[TIME_INFO_KEY] = datetime.datetime.ctime(datetime.datetime.now())
# TODO, create a new report generation function here
(no author)
committed
# make the main summary report
LOG.info ('generating summary report')
reportModule.generate_and_save_inspection_summary_report (files,
(no author)
committed
pathsTemp[OUT_FILE_KEY], 'index.html',
runInfo,
variableInspections,
spatialInfo,
nameStats,
globalAttrs=globalAttrInfo,)
(no author)
committed
# make the glossary
LOG.info ('generating glossary')
reportModule.generate_and_save_doc_page(statistics.StatisticalInspectionAnalysis.doc_strings(), pathsTemp[OUT_FILE_KEY])
(no author)
committed
(no author)
committed
def reportGen_library_call (a_path, b_path, var_list=None,
options_set=None,
# todo, this doesn't yet do anything
do_document=False,
# todo, the output channel does nothing at the moment
output_channel=sys.stdout,
do_return_summary_info=False,) :
"""
this method handles the actual work of the reportGen command line tool
and can also be used as a library routine, pass in the slightly parsed
command line input, or call it as a library function... be sure to fill
out the options
TODO at the moment the options are very brittle and need to be fully filled
or this method will fail badly (note: the addition of some glance defaults
has minimized the problem, but you still need to be careful when dealing with
optional boolean values. this needs more work.)
do_return_summary_info tells us if we also need to return the info needed to make
the concise summary page with our return code
"""
# set some values for defaults
var_list = [ ] if var_list is None else var_list
options_set = { } if options_set is None else options_set
# have all the variables passed test criteria set for them?
# if no criteria were set then this will be true
didPassAll = True
(no author)
committed
do_pass_fail = options_set[DO_TEST_PASSFAIL_KEY] # todo, this is a temporary hack, should be loaded with other options
# load the user settings from either the command line or a user defined config file
(no author)
committed
pathsTemp, runInfo, defaultValues, requestedNames, usedConfigFile = config_organizer.load_config_or_options(a_path, b_path,
options_set,
requestedVars = var_list)
# note some of this information for debugging purposes
LOG.debug('paths: ' + str(pathsTemp))
LOG.debug('defaults: ' + str(defaultValues))
LOG.debug('run information: ' + str(runInfo))
# if we wouldn't generate anything, just stop now
(no author)
committed
if (not runInfo[DO_MAKE_IMAGES_KEY]) and (not runInfo[DO_MAKE_REPORT_KEY]) :
LOG.warn("User selection of no image generation and no report generation will result in no " +
"content being generated. Aborting generation function.")
(no author)
committed
if do_pass_fail :
return 0 # nothing went wrong, we just had nothing to do!
else :
# hang onto info to identify who/what/when/where/etc. the report is being run by/for
(no author)
committed
runInfo[MACHINE_INFO_KEY], runInfo[USER_INFO_KEY], runInfo[GLANCE_VERSION_INFO_KEY] = get_run_identification_info()
# deal with the input and output files
(no author)
committed
setup_dir_if_needed(pathsTemp[OUT_FILE_KEY], "output")
# open the files
files = {}
LOG.info("Processing File A:")
(no author)
committed
aFile = dataobj.FileInfo(pathsTemp[A_FILE_KEY])
files[A_FILE_TITLE_KEY] = aFile.get_old_info_dictionary() # FUTURE move to actually using the file object to generate the report
if aFile.file_object is None:
LOG.error("Unable to continue with comparison because file a (" + pathsTemp[A_FILE_KEY] + ") could not be opened.")
sys.exit(1)
LOG.info("Processing File B:")
(no author)
committed
bFile = dataobj.FileInfo(pathsTemp[B_FILE_KEY])
files[B_FILE_TITLE_KEY] = bFile.get_old_info_dictionary() # FUTURE move to actually using the file object to generate the report
if bFile.file_object is None:
LOG.error("Unable to continue with comparison because file b (" + pathsTemp[B_FILE_KEY] + ") could not be opened.")
sys.exit(1)
# get information about the names the user requested
(no author)
committed
finalNames, nameStats = config_organizer.resolve_names(aFile.file_object,
bFile.file_object,
defaultValues,
requestedNames,
usedConfigFile)
# get info on the global attributes
globalAttrInfo = {}
globalAttrInfo[A_FILE_TITLE_KEY] = aFile.file_object.get_global_attributes()
globalAttrInfo[B_FILE_TITLE_KEY] = bFile.file_object.get_global_attributes()
(no author)
committed
LOG.debug("output dir: " + str(pathsTemp[OUT_FILE_KEY]))
(no author)
committed
# return for lon_lat_data variables will be in the form
(no author)
committed
#{LON_KEY: longitude_data, LAT_KEY: latitude_data, INVALID_MASK_KEY: spaciallyInvalidMaskData}
(no author)
committed
# or { } if there is no lon/lat info
lon_lat_data = { }
spatialInfo = { }
try :
(no author)
committed
lon_lat_data, spatialInfo = handle_lon_lat_info (runInfo, aFile, bFile, pathsTemp[OUT_FILE_KEY],
should_make_images = runInfo[DO_MAKE_IMAGES_KEY],
fullDPI=runInfo[DETAIL_DPI_KEY], thumbDPI=runInfo[THUMBNAIL_DPI_KEY])
LOG.error("Error while loading longitude or latitude: ")
LOG.error(str(vle))
exit(1)
except VariableComparisonError as vce :
LOG.error("Error while comparing longitude or latitude: ")
LOG.error(str(vce))
exit(1)
(no author)
committed
# if there is an approved lon/lat shape, hang on to that for future checks
good_shape_from_lon_lat = None
(no author)
committed
good_shape_from_lon_lat = lon_lat_data[COMMON_KEY][LON_KEY].shape
(no author)
committed
# this will hold information for the summary report
# it will be in the form
(no author)
committed
# [displayName] = {
# PASSED_EPSILON_PERCENT_KEY: percent ok with this epsilon,
# FINITE_SIMILAR_PERCENT_KEY: percent with the same finiteness,
# R_SQUARED_COEFF_VALUE_KEY: the r squared correlation coefficient,
# VARIABLE_RUN_INFO_KEY: the detailed variable run information
# }
variableComparisons = { }
# we will also be hanging on to some variable stats for the concise reports
variableStatsCollection = { }
# go through each of the possible variables in our files
# and make a report section with images for whichever ones we can
for displayName in finalNames:
try:
# pull out the information for this variable analysis run
varRunInfo = finalNames[displayName].copy()
# get the various names
technical_name, b_variable_technical_name, \
explanationName = _get_name_info_for_variable(displayName, varRunInfo)
(no author)
committed
# make sure that it's possible to load this variable
if not(aFile.file_object.is_loadable_type(technical_name)) or not(bFile.file_object.is_loadable_type(b_variable_technical_name)) :
LOG.warn(displayName + " is of a type that cannot be loaded using current file handling libraries included with Glance." +
" Skipping " + displayName + ".")
continue
LOG.info('analyzing: ' + explanationName)
# load the variable data
Eva Schiffer
committed
try:
aData = load_variable_data(aFile.file_object, technical_name,
dataFilter = varRunInfo[FILTER_FUNCTION_A_KEY] if FILTER_FUNCTION_A_KEY in varRunInfo else None,
variableToFilterOn = varRunInfo[VAR_FILTER_NAME_A_KEY] if VAR_FILTER_NAME_A_KEY in varRunInfo else None,
variableBasedFilter = varRunInfo[VAR_FILTER_FUNCTION_A_KEY] if VAR_FILTER_FUNCTION_A_KEY in varRunInfo else None,
altVariableFileObject = dataobj.FileInfo(varRunInfo[VAR_FILTER_ALT_FILE_A_KEY]).file_object if VAR_FILTER_ALT_FILE_A_KEY in varRunInfo else None,
fileDescriptionForDisplay = "file A")
bData = load_variable_data(bFile.file_object, b_variable_technical_name,
dataFilter = varRunInfo[FILTER_FUNCTION_B_KEY] if FILTER_FUNCTION_B_KEY in varRunInfo else None,
variableToFilterOn = varRunInfo[VAR_FILTER_NAME_B_KEY] if VAR_FILTER_NAME_B_KEY in varRunInfo else None,
variableBasedFilter = varRunInfo[VAR_FILTER_FUNCTION_B_KEY] if VAR_FILTER_FUNCTION_B_KEY in varRunInfo else None,
altVariableFileObject = dataobj.FileInfo(varRunInfo[VAR_FILTER_ALT_FILE_B_KEY]).file_object if VAR_FILTER_ALT_FILE_B_KEY in varRunInfo else None,
fileDescriptionForDisplay = "file B")
except Exception as e:
LOG.warn(
displayName + " data could not be loaded. This variable will not be included in the output report. " +
"The following error was encountered while trying to load this variable:\n" + str(e))
continue
# get variable attribute information for this variable
attributeInfo = {}
attributeInfo[A_FILE_TITLE_KEY] = aFile.file_object.get_variable_attributes(technical_name)
attributeInfo[B_FILE_TITLE_KEY] = bFile.file_object.get_variable_attributes(b_variable_technical_name)
# pre-check if this data should be plotted and if it should be compared to the longitude and latitude
Eva Schiffer
committed
include_images_for_this_variable = ((DO_MAKE_IMAGES_KEY not in runInfo) or (runInfo[DO_MAKE_IMAGES_KEY]))
if DO_MAKE_IMAGES_KEY in varRunInfo :
include_images_for_this_variable = varRunInfo[DO_MAKE_IMAGES_KEY]
do_not_test_with_lon_lat = (not include_images_for_this_variable) or (len(lon_lat_data) <= 0)
# handle vector data
isVectorData = (MAGNITUDE_VAR_NAME_KEY in varRunInfo) and (DIRECTION_VAR_NAME_KEY in varRunInfo)
# check if this data can be displayed but
# don't compare lon/lat sizes if we won't be plotting
if ( (aData.shape == bData.shape)
and
( do_not_test_with_lon_lat
or
((aData.shape == good_shape_from_lon_lat) and (bData.shape == good_shape_from_lon_lat)) ) ) :
# check to see if there is a directory to put information about this variable in,
# if not then create it
variableDir = os.path.join(pathsTemp[OUT_FILE_KEY], './' + displayName)
varRunInfo[VARIABLE_DIRECTORY_KEY] = variableDir
varRunInfo[VAR_REPORT_PATH_KEY] = quote(os.path.join(displayName, 'index.html'))
LOG.debug ("Directory selected for variable information: " + varRunInfo[VAR_REPORT_PATH_KEY])
setup_dir_if_needed(variableDir, "variable")
(no author)
committed
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
# form the doc and config paths relative to where the variable is
upwardPath = './'
for number in range(len(displayName.split('/'))) : # TODO this is not general to windows
upwardPath = os.path.join(upwardPath, '../')
varRunInfo[DOCUMENTATION_PATH_KEY] = quote(os.path.join(upwardPath, 'doc.html'))
if CONFIG_FILE_NAME_KEY in runInfo :
varRunInfo[CONFIG_FILE_PATH_KEY] = quote(os.path.join(upwardPath, runInfo[CONFIG_FILE_NAME_KEY]))
# figure out the masks we want, and then do our statistical analysis
mask_a_to_use = None if do_not_test_with_lon_lat else lon_lat_data[A_FILE_KEY][INVALID_MASK_KEY]
mask_b_to_use = None if do_not_test_with_lon_lat else lon_lat_data[B_FILE_KEY][INVALID_MASK_KEY]
LOG.debug("Analyzing " + displayName + " statistically.")
variable_stats = statistics.StatisticalAnalysis.withSimpleData(aData, bData,
varRunInfo[FILL_VALUE_KEY], varRunInfo[FILL_VALUE_ALT_IN_B_KEY],
mask_a_to_use, mask_b_to_use,
varRunInfo[EPSILON_KEY], varRunInfo[EPSILON_PERCENT_KEY])
# add a little additional info to our variable run info before we squirrel it away
varRunInfo[TIME_INFO_KEY] = datetime.datetime.ctime(datetime.datetime.now()) # todo is this needed?
didPass, epsilon_failed_fraction, \
non_finite_fail_fraction, \
r_squared_value = variable_stats.check_pass_or_fail(epsilon_failure_tolerance=varRunInfo[EPSILON_FAIL_TOLERANCE_KEY] if EPSILON_FAIL_TOLERANCE_KEY in varRunInfo else numpy.nan,
epsilon_failure_tolerance_default=defaultValues[EPSILON_FAIL_TOLERANCE_KEY],
non_finite_data_tolerance=varRunInfo[NONFINITE_TOLERANCE_KEY] if NONFINITE_TOLERANCE_KEY in varRunInfo else numpy.nan,
non_finite_data_tolerance_default=defaultValues[NONFINITE_TOLERANCE_KEY],
total_data_failure_tolerance=varRunInfo[TOTAL_FAIL_TOLERANCE_KEY] if TOTAL_FAIL_TOLERANCE_KEY in varRunInfo else numpy.nan,
total_data_failure_tolerance_default=defaultValues[TOTAL_FAIL_TOLERANCE_KEY],
min_acceptable_r_squared=varRunInfo[MIN_OK_R_SQUARED_COEFF_KEY] if MIN_OK_R_SQUARED_COEFF_KEY in varRunInfo else numpy.nan,
min_acceptable_r_squared_default=defaultValues[MIN_OK_R_SQUARED_COEFF_KEY],
)
varRunInfo[DID_VARIABLE_PASS_KEY] = didPass
# update the overall pass status
if didPass is not None :
Eva Schiffer
committed
didPassAll = didPassAll and didPass
# based on the settings and whether the variable passed or failed,
# should we include images for this variable?
Eva Schiffer
committed
temp_images_only_on_fail = runInfo[DO_IMAGES_ONLY_ON_FAIL_KEY] if DO_IMAGES_ONLY_ON_FAIL_KEY in runInfo else False
temp_images_only_on_fail = varRunInfo[DO_IMAGES_ONLY_ON_FAIL_KEY] if DO_IMAGES_ONLY_ON_FAIL_KEY in varRunInfo else temp_images_only_on_fail
if temp_images_only_on_fail :
include_images_for_this_variable = include_images_for_this_variable and (not didPass)
varRunInfo[DO_MAKE_IMAGES_KEY] = include_images_for_this_variable
(no author)
committed
# to hold the names of any images created
image_names = {
ORIGINAL_IMAGES_KEY: [ ],
COMPARED_IMAGES_KEY: [ ]
}
# create the images for this variable
if (include_images_for_this_variable) :
(no author)
committed
plotFunctionGenerationObjects = [ ]
(no author)
committed
# if there's magnitude and direction data, figure out the u and v, otherwise these will be None
aUData, aVData = get_UV_info_from_magnitude_direction_info (aFile.file_object,
varRunInfo[MAGNITUDE_VAR_NAME_KEY] if (MAGNITUDE_VAR_NAME_KEY) in varRunInfo else None,
varRunInfo[DIRECTION_VAR_NAME_KEY] if (DIRECTION_VAR_NAME_KEY) in varRunInfo else None,
lon_lat_data[A_FILE_KEY][INVALID_MASK_KEY]
if (A_FILE_KEY in lon_lat_data) and (INVALID_MASK_KEY in lon_lat_data[A_FILE_KEY]) else None)
bUData, bVData = get_UV_info_from_magnitude_direction_info (bFile.file_object,
varRunInfo[MAGNITUDE_B_VAR_NAME_KEY] if (MAGNITUDE_B_VAR_NAME_KEY) in varRunInfo else varRunInfo[MAGNITUDE_VAR_NAME_KEY] if (MAGNITUDE_VAR_NAME_KEY) in varRunInfo else None,
varRunInfo[DIRECTION_B_VAR_NAME_KEY] if (DIRECTION_B_VAR_NAME_KEY) in varRunInfo else varRunInfo[DIRECTION_VAR_NAME_KEY] if (DIRECTION_VAR_NAME_KEY) in varRunInfo else None,
lon_lat_data[B_FILE_KEY][INVALID_MASK_KEY]
if (B_FILE_KEY in lon_lat_data) and (INVALID_MASK_KEY in lon_lat_data[B_FILE_KEY]) else None)
(no author)
committed
# if the data is the same size, we can always make our basic statistical comparison plots
if (aData.shape == bData.shape) :