Skip to content
Snippets Groups Projects
Commit 3ced4322 authored by (no author)'s avatar (no author)
Browse files

adding pass fail control in reportGen; adding _delta statistics and renaming...

adding pass fail control in reportGen; adding _delta statistics and renaming some others to be consistent; fixed calculation of the standard deviation; comment out some print statements in the io module

git-svn-id: https://svn.ssec.wisc.edu/repos/glance/trunk@149 8a9318a1-56ba-4d59-b755-99d26321be01
parent f53f0c37
No related branches found
No related tags found
No related merge requests found
......@@ -1268,6 +1268,7 @@ def reportGen_library_call (a_path, b_path, var_list=[ ],
# have all the variables passed test criteria set for them?
# if no criteria were set then this will be true
didPassAll = True
do_pass_fail = options_set['usePassFail'] # todo, this is a temporary hack, should be loaded with other options
# load the user settings from either the command line or a user defined config file
pathsTemp, runInfo, defaultValues, requestedNames, usedConfigFile = _load_config_or_options(a_path, b_path,
......@@ -1283,7 +1284,10 @@ def reportGen_library_call (a_path, b_path, var_list=[ ],
if (not runInfo['shouldIncludeImages']) and (not runInfo['shouldIncludeReport']) :
LOG.warn("User selection of no image generation and no report generation will result in no " +
"content being generated. Aborting generation function.")
return 0 # nothing went wrong, we just had nothing to do!
if do_pass_fail :
return 0 # nothing went wrong, we just had nothing to do!
else :
return
# hang onto info to identify who/what/when/where/etc. the report is being run by/for
runInfo.update(_get_run_identification_info( ))
......@@ -1577,8 +1581,10 @@ def reportGen_library_call (a_path, b_path, var_list=[ ],
returnCode = 0 if didPassAll else 2 # return 2 only if some of the variables failed
LOG.debug("Pass/Fail return code: " + str(returnCode))
return returnCode
# if we are reporting the pass / fail, return an appropriate status code
if do_pass_fail :
LOG.debug("Pass/Fail return code: " + str(returnCode))
return returnCode
def stats_library_call(afn, bfn, var_list=[ ],
options_set={ },
......@@ -1854,6 +1860,7 @@ python -m glance
tempOptions['lonlatepsilon'] = options.lonlatepsilon
tempOptions['epsilon'] = options.epsilon
tempOptions['missing'] = options.missing
tempOptions['usePassFail'] = options.usePassFail
a_path = _clean_path(args[0])
b_path = _clean_path(args[1])
......
......@@ -375,9 +375,9 @@ def create_histogram(data, bins, title, xLabel, yLabel, displayStats=False, unit
# info on the basic stats
tempMask = ones(data.shape, dtype=bool)
tempStats = statistics.NumericalComparisonStatistics.basic_analysis(data, tempMask)
medianVal = tempStats['median_diff']
meanVal = tempStats['mean_diff']
stdVal = tempStats['std_diff']
medianVal = tempStats['median_delta']
meanVal = tempStats['mean_delta']
stdVal = tempStats['std_val']
numPts = data.size
# info on the display of our statistics
......
......@@ -97,13 +97,13 @@ class hdf(SD):
data_type = None
scaling_method = None
print ("***** getting " + name + " from file")
#print ("***** getting " + name + " from file")
# get the variable object and use it to
# get our raw data and scaling info
variable_object = self.get_variable_object(name)
raw_data_copy = variable_object[:]
print ("****** raw data loaded")
#print ("****** raw data loaded")
try :
# TODO, this currently won't work with geocat data, work around it for now
scale_factor, scale_factor_error, add_offset, add_offset_error, data_type = SDS.getcal(variable_object)
......@@ -120,7 +120,7 @@ class hdf(SD):
scaling_method = temp_attributes['scaling_method']
SDS.endaccess(variable_object)
print ("***** scaling information loaded")
#print ("***** scaling information loaded")
# don't do lots of work if we don't need to scale things
if (scale_factor == 1.0) and (add_offset == 0.0) :
......
......@@ -417,11 +417,15 @@ class NumericalComparisonStatistics (StatisticalData) :
It may also contain additional statistics. This is indicated by the does_include_simple boolean.
The possible additional statistics include:
rms_diff - the root mean squared of the absolute difference between the two data sets
std_diff - the standard deviation of the absolute difference between the two data sets
mean_diff - the mean of the absolute difference between the two data sets
median_diff - the median of the absolute difference between the two data sets
max_diff - the maximum of the absolute difference between the two data sets
rms_val - the root mean squared of the difference between the two data sets
std_val - the standard deviation of the difference between the two data sets
mean_diff - the mean of the absolute difference between the two data sets
median_diff - the median of the absolute difference between the two data sets
max_diff - the maximum of the absolute difference between the two data sets
mean_delta - the mean of the difference between the two data sets
median_delta - the median of the difference between the two data sets
max_delta - the maximum of the difference between the two data sets
min_delta - the minimum of the difference between the two data sets
These statistics can also be generated separately in dictionary form by calling the
basic_analysis method on this class.
......@@ -434,14 +438,20 @@ class NumericalComparisonStatistics (StatisticalData) :
"both have been defined",
'diff_outside_epsilon_fraction': "fraction of finite differences falling outside acceptable epsilon " +
"definitions (out of common_finite_count)",
'max_diff': "Maximum difference of finite values",
'mean_diff': "mean difference of finite values",
'median_diff': "median difference of finite values",
'max_diff': "maximum absolute valued difference of the finite values",
'mean_diff': "mean of the absolute value difference of the finite values",
'median_diff': "median of the absolute value difference of the finite values",
'mean_delta': "mean of the subtractive difference of the finite values",
'median_delta': "median of the subtractive difference of the finite values",
'max_delta': "maximum finite data value from the data set of B file - A file",
'min_delta': "minimum finite data value from the data set of B file - A file",
'perfect_match_count': "number of perfectly matched finite data points between A and B",
'perfect_match_fraction': "fraction of finite values perfectly matching between A and B (out of common_finite_count)",
'rms_diff': "root mean square (RMS) difference of finite values",
'rms_val': "root mean square (RMS) difference of finite values",
'r-squared correlation': "the square of the r correlation (see correlation)",
'std_diff': "standard deviation of difference of finite values",
'std_val': "standard deviation of difference of finite values",
'mismatch_points_count': 'number of points that differ in finite/missing status between the input data sets A and B,' +
' or are unacceptable when compared according to the current epsilon definitions',
'mismatch_points_fraction': 'fraction of points that differ in finite/missing status between the input data sets A and B,' +
......@@ -497,17 +507,27 @@ class NumericalComparisonStatistics (StatisticalData) :
basic_dict = NumericalComparisonStatistics.basic_analysis(diffInfoObject.diff_data_object.data,
valid_in_both)
if len(basic_dict) > 0 :
self.rms_diff = basic_dict['rms_diff']
self.std_diff = basic_dict['std_diff']
self.rms_val = basic_dict['rms_val']
self.std_val = basic_dict['std_val']
self.mean_diff = basic_dict['mean_diff']
self.median_diff = basic_dict['median_diff']
self.max_diff = basic_dict['max_diff']
self.mean_delta = basic_dict['mean_delta']
self.median_delta = basic_dict['median_delta']
self.max_delta = basic_dict['max_delta']
self.min_delta = basic_dict['min_delta']
else :
self.rms_diff = np.nan
self.std_diff = np.nan
self.rms_val = np.nan
self.std_val = np.nan
self.mean_diff = np.nan
self.median_diff = np.nan
self.max_diff = np.nan
self.mean_delta = np.nan
self.median_delta = np.nan
self.max_delta = np.nan
self.min_delta = np.nan
self.temp_analysis = basic_dict
def dictionary_form(self) :
......@@ -550,13 +570,20 @@ class NumericalComparisonStatistics (StatisticalData) :
return { }
# calculate our statistics
absDiffData = abs(diffData)
root_mean_square_value = delta.calculate_root_mean_square(diffData, valid_mask)
return { 'rms_diff': root_mean_square_value,
'std_diff': np.std(absDiffData[valid_mask]),
'mean_diff': np.mean(absDiffData[valid_mask]),
'median_diff': np.median(absDiffData[valid_mask]),
'max_diff': np.max(absDiffData[valid_mask])
tempDiffData = diffData[valid_mask]
absDiffData = np.abs(tempDiffData)
return { 'rms_val': root_mean_square_value,
'std_val': np.std(tempDiffData),
'mean_diff': np.mean(absDiffData),
'median_diff': np.median(absDiffData),
'max_diff': np.max(absDiffData),
'mean_delta': np.mean(tempDiffData),
'median_delta': np.median(tempDiffData),
'max_delta': np.max(tempDiffData),
'min_delta': np.min(tempDiffData)
}
@staticmethod
......
......@@ -22,7 +22,7 @@ easy_install -d $HOME/Library/Python -vi http://larch.ssec.wisc.edu/eggs/repos g
from setuptools import setup, find_packages
setup( name="glance",
version="0.2.6.28",
version="0.2.6.29",
zip_safe = True,
entry_points = { 'console_scripts': [ 'glance = glance.compare:main' ] },
packages = find_packages('.'),
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment