Skip to content
Snippets Groups Projects
Commit cf0177d1 authored by Geoff Cureton's avatar Geoff Cureton
Browse files

Latest changes to griddingAndGranulation.c brought in from the CSPP VIIRS EDR...

Latest changes to griddingAndGranulation.c brought in from the CSPP VIIRS EDR project. From the same project we add LandWaterMask.py and PrecipWater.py as examples of the use of grid2gran_nearest() and grid2gran_bilinearInterp() respectively.
parent e4149d51
Branches
No related tags found
No related merge requests found
#!/usr/bin/env python
### System libraries
import time, string, sys
from os import path
import numpy as np
from bisect import bisect,bisect_right,bisect_left
def binary_search_SP(cpsval,size,cpsArr):
status = 0
if ((cpsval > cpsArr[0]) and (cpsval < cpsArr[-1])):
#-- This is for unit offset
#l_index = 0
#u_index = size+1
#-- This is for zero offset
l_index = -1
u_index = size
while ((u_index - l_index) > 1):
search_index = (u_index + l_index)/2
if (cpsval <= cpsArr[search_index]):
u_index = search_index
else :
l_index = search_index
elif (cpsval <= cpsArr[0]):
print "cpsval <= minimum in LUT"
l_index = 0
u_index = 1
status = 1
elif (cpsval >= cpsArr[-1]):
print "cpsval >= maximum in LUT"
l_index = size-2
u_index = size-1
status = 1
else :
print "Some situation not captured, passing!!!"
pass
return [u_index, l_index, status]
def COTconvert(nCOT,COTlut,CPS,COTvis):
cpsArr = COTlut[:,0]
visIrEfficRatio = COTlut[:,1]
# If CPS exceeds largest value in LUT, assign to COTir
# the last COT value in LUT
if ( CPS > cpsArr[-1]) :
s = visIrEfficRatio[-1]
else :
u_index,l_index,status = binary_search_SP(CPS, nCOT, cpsArr)
print "Finished binary search..."
print "l_index = ",l_index
print "u_index = ",u_index
m = (visIrEfficRatio[u_index]-visIrEfficRatio[l_index])/(cpsArr[u_index]-cpsArr[l_index])
b = visIrEfficRatio[u_index]-m*cpsArr[u_index]
s = m*CPS+b
print "cpsArr[l_index] = ",cpsArr[l_index]
print "cpsArr[u_index] = ",cpsArr[u_index]
print "visIrEfficRatio[l_index] = ",visIrEfficRatio[l_index]
print "visIrEfficRatio[u_index] = ",visIrEfficRatio[u_index]
print "s = ",s
# Compute the IR COT
COTir = COTvis/s
return COTir
def readCOTfile(file):
results = []
file=str(file)
f = open(file,'r')
lines = f.readlines()[1:]
f.close()
for l in lines:
fields = l.split()
CPS = float(fields[0])
COT = float(fields[1])
all = [CPS,COT]
results.append(all)
results = np.array(results)
nCOT = np.shape(results)[0]
return results,nCOT
def main(cot,cps):
file = '/data/geoffc/MODIS_NPOESS/trunk/LEOCAT/data/VIIRS/LUT/CTP/COTlut.db'
COTlut,nCOT, = readCOTfile(file)
COTir = COTconvert(nCOT,COTlut,cps,cot)
print "COTir = ",COTir
if __name__=='__main__':
cot = float(sys.argv[1])
cps = float(sys.argv[2])
print "Input cloud optical thickness: ",cot
print "Input cloud particle size: ",cps,"microns"
main(cot,cps)
This diff is collapsed.
#!/usr/bin/env python
# encoding: utf-8
"""
PrecipWater.py
* DESCRIPTION: Class to granulate the ViirsAncPrecipWater data product
Created by Geoff Cureton on 2013-02-25.
Copyright (c) 2011 University of Wisconsin SSEC. All rights reserved.
"""
file_Date = '$Date: 2014-09-19 15:13:47 -0500 (Fri, 19 Sep 2014) $'
file_Revision = '$Revision: 2235 $'
file_Author = '$Author: geoffc $'
file_HeadURL = '$HeadURL: https://svn.ssec.wisc.edu/repos/jpss_adl/trunk/scripts/edr/ANC/PrecipWater.py $'
file_Id = '$Id: PrecipWater.py 2235 2014-09-19 20:13:47Z geoffc $'
__author__ = 'G.P. Cureton <geoff.cureton@ssec.wisc.edu>'
__version__ = '$Id: PrecipWater.py 2235 2014-09-19 20:13:47Z geoffc $'
__docformat__ = 'Epytext'
import os, sys, logging, traceback
from os import path,uname,environ
import string
import re
import uuid
from glob import glob
from time import time
from datetime import datetime,timedelta
from scipy import round_
import numpy as np
from numpy import ma
import copy
from bisect import bisect_left,bisect_right
import ctypes
from numpy.ctypeslib import ndpointer
import ViirsData
from NCEPtoBlob import NCEPclass
# skim and convert routines for reading .asc metadata fields of interest
import adl_blob2 as adl_blob
import adl_asc
from adl_asc import skim_dir, contiguous_granule_groups, granule_groups_contain, \
effective_anc_contains,eliminate_duplicates,_is_contiguous, \
RDR_REQUIRED_KEYS, POLARWANDER_REQUIRED_KEYS
from adl_common import ADL_HOME, CSPP_RT_HOME, CSPP_RT_ANC_PATH, \
CSPP_RT_ANC_CACHE_DIR, COMMON_LOG_CHECK_TABLE
# every module should have a LOG object
try :
sourcename= file_Id.split(" ")
LOG = logging.getLogger(sourcename[1])
except :
LOG = logging.getLogger('PrecipWater')
from Utils import getURID, getAscLine, getAscStructs, findDatelineCrossings, \
shipOutToFile
class PrecipWater() :
def __init__(self,inDir=None, sdrEndian=None, ancEndian=None):
self.collectionShortName = 'VIIRS-ANC-Preci-Wtr-Mod-Gran'
self.xmlName = 'VIIRS_ANC_PRECI_WTR_MOD_GRAN.xml'
self.blobDatasetName = 'totalPrecipitableWater'
self.dataType = 'float32'
self.sourceType = 'NCEP_ANC_Int'
self.sourceList = ['']
self.trimObj = ViirsData.ViirsTrimTable()
if inDir is None :
self.inDir = path.abspath(path.curdir)
else :
self.inDir = inDir
if sdrEndian is None :
self.sdrEndian = adl_blob.LITTLE_ENDIAN
else :
self.sdrEndian = sdrEndian
if ancEndian is None :
self.ancEndian = adl_blob.LITTLE_ENDIAN
else :
self.ancEndian = ancEndian
def ingest(self,ancBlob=None):
'''
Ingest the ancillary dataset.
'''
dates = []
ncepBlobFiles = []
for gridBlobStruct in ancBlob:
timeObj = gridBlobStruct[0]
ncepBlobFile = gridBlobStruct[1]
LOG.debug("VIIRS-ANC-Preci-Wtr-Mod-Gran %s --> %s" % \
(ncepBlobFile,timeObj.strftime("%Y-%m-%d %H:%M:%S:%f")))
dates.append(timeObj)
ncepBlobFiles.append(ncepBlobFile)
self.date_0 = dates[0]
self.date_1 = dates[1]
LOG.debug("Minimum NCEP date is: %s" %(self.date_0.strftime("%Y-%m-%d %H:%M:%S:%f")))
LOG.debug("Maximum NCEP date is: %s" %(self.date_1.strftime("%Y-%m-%d %H:%M:%S:%f")))
ncepBlobFile_0 = ncepBlobFiles[0]
ncepBlobFile_1 = ncepBlobFiles[1]
self.gridData_0 = getattr(ncepBlobFile_0,self.blobDatasetName).astype(self.dataType)
self.gridData_1 = getattr(ncepBlobFile_1,self.blobDatasetName).astype(self.dataType)
def setGeolocationInfo(self,dicts):
'''
Populate this class instance with the geolocation data for a single granule
'''
# Set some environment variables and paths
ANC_SCRIPTS_PATH = path.join(CSPP_RT_HOME,'viirs')
ADL_ASC_TEMPLATES = path.join(ANC_SCRIPTS_PATH,'asc_templates')
# Collect some data from the geolocation dictionary
self.geoDict = dicts
URID = dicts['URID']
geo_Collection_ShortName = dicts['N_Collection_Short_Name']
N_Granule_ID = dicts['N_Granule_ID']
ObservedStartTimeObj = dicts['ObservedStartTime']
geoFiles = glob('%s/%s*' % (self.inDir,URID))
geoFiles.sort()
LOG.debug("###########################")
LOG.debug(" Geolocation Information ")
LOG.debug("###########################")
LOG.debug("N_Granule_ID : %r" % (N_Granule_ID))
LOG.debug("ObservedStartTime : %s" % (ObservedStartTimeObj.__str__()))
LOG.debug("N_Collection_Short_Name : %s" %(geo_Collection_ShortName))
LOG.debug("URID : %r" % (URID))
LOG.debug("geoFiles : %r" % (geoFiles))
LOG.debug("###########################")
timeDelta = (self.date_1 - self.date_0).total_seconds()
LOG.debug("timeDelta is %r seconds" %(timeDelta))
timePrime = (ObservedStartTimeObj - self.date_0).total_seconds()
LOG.debug("timePrime is %r seconds (%f percent along time interval)" % \
(timePrime,(timePrime/timeDelta)*100.))
delta_gridData = self.gridData_1 - self.gridData_0
self.gridData = (delta_gridData/timeDelta) * timePrime + self.gridData_0
gridData_0_avg = np.average(self.gridData_0)
gridData_1_avg = np.average(self.gridData_1)
gridData_avg = np.average(self.gridData)
LOG.debug("average(gridData_0) = %f" %(np.average(self.gridData_0)))
LOG.debug("average(gridData_1) = %f" %(np.average(self.gridData_1)))
LOG.debug("average(gridData) = %f" %(np.average(self.gridData)))
# Do we have terrain corrected geolocation?
terrainCorrectedGeo = True if 'GEO-TC' in geo_Collection_ShortName else False
# Do we have long or short style geolocation field names?
if (geo_Collection_ShortName=='VIIRS-MOD-GEO-TC' or geo_Collection_ShortName=='VIIRS-MOD-RGEO') :
longFormGeoNames = True
LOG.debug("We have long form geolocation names")
elif (geo_Collection_ShortName=='VIIRS-MOD-GEO' or geo_Collection_ShortName=='VIIRS-MOD-RGEO-TC') :
LOG.debug("We have short form geolocation names")
longFormGeoNames = False
else :
LOG.error("Invalid geolocation shortname: %s" %(geo_Collection_ShortName))
return -1
# Get the geolocation xml file
geoXmlFile = "%s.xml" % (string.replace(geo_Collection_ShortName,'-','_'))
geoXmlFile = path.join(ADL_HOME,'xml/VIIRS',geoXmlFile)
if path.exists(geoXmlFile):
LOG.debug("We are using for %s: %s,%s" %(geo_Collection_ShortName,geoXmlFile,geoFiles[0]))
# Open the geolocation blob and get the latitude and longitude
endian = self.sdrEndian
geoBlobObj = adl_blob.map(geoXmlFile,geoFiles[0], endian=endian)
# Get scan_mode to find any bad scans
scanMode = geoBlobObj.scan_mode[:]
badScanIdx = np.where(scanMode==254)[0]
LOG.debug("Bad Scans: %r" % (badScanIdx))
# Detemine the min, max and range of the latitude and longitude,
# taking care to exclude any fill values.
if longFormGeoNames :
if endian==adl_blob.BIG_ENDIAN:
latitude = getattr(geoBlobObj,'latitude').byteswap()
longitude = getattr(geoBlobObj,'longitude').byteswap()
latitude = latitude.astype('float')
longitude = longitude.astype('float')
else:
latitude = getattr(geoBlobObj,'latitude').astype('float')
longitude = getattr(geoBlobObj,'longitude').astype('float')
else :
latitude = getattr(geoBlobObj,'lat').astype('float')
longitude = getattr(geoBlobObj,'lon').astype('float')
latitude = ma.masked_less(latitude,-800.)
latMin,latMax = np.min(latitude),np.max(latitude)
latRange = latMax-latMin
longitude = ma.masked_less(longitude,-800.)
lonMin,lonMax = np.min(longitude),np.max(longitude)
lonRange = lonMax-lonMin
LOG.debug("min,max,range of latitide: %f %f %f" % (latMin,latMax,latRange))
LOG.debug("min,max,range of longitude: %f %f %f" % (lonMin,lonMax,lonRange))
# Determine the latitude and longitude fill masks, so we can restore the
# fill values after we have scaled...
latMask = latitude.mask
lonMask = longitude.mask
# Check if the geolocation is in radians, convert to degrees
if 'RGEO' in geo_Collection_ShortName :
LOG.debug("Geolocation is in radians, convert to degrees...")
latitude = np.degrees(latitude)
longitude = np.degrees(longitude)
latMin,latMax = np.min(latitude),np.max(latitude)
latRange = latMax-latMin
lonMin,lonMax = np.min(longitude),np.max(longitude)
lonRange = lonMax-lonMin
LOG.debug("New min,max,range of latitude: %f %f %f" % (latMin,latMax,latRange))
LOG.debug("New min,max,range of longitude: %f %f %f" % (lonMin,lonMax,lonRange))
# Restore fill values to masked pixels in geolocation
geoFillValue = self.trimObj.sdrTypeFill['VDNE_FLOAT64_FILL'][latitude.dtype.name]
latitude = ma.array(latitude,mask=latMask,fill_value=geoFillValue)
self.latitude = latitude.filled()
geoFillValue = self.trimObj.sdrTypeFill['VDNE_FLOAT64_FILL'][longitude.dtype.name]
longitude = ma.array(longitude,mask=lonMask,fill_value=geoFillValue)
self.longitude = longitude.filled()
# Record the corners, taking care to exclude any bad scans...
nDetectors = 16
firstGoodScan = np.where(scanMode<=2)[0][0]
lastGoodScan = np.where(scanMode<=2)[0][-1]
firstGoodRow = firstGoodScan * nDetectors
lastGoodRow = lastGoodScan * nDetectors + nDetectors - 1
latCrnList = [latitude[firstGoodRow,0],latitude[firstGoodRow,-1],latitude[lastGoodRow,0],latitude[lastGoodRow,-1]]
lonCrnList = [longitude[firstGoodRow,0],longitude[firstGoodRow,-1],longitude[lastGoodRow,0],longitude[lastGoodRow,-1]]
# Check for dateline/pole crossings
num180Crossings = findDatelineCrossings(latCrnList,lonCrnList)
LOG.debug("We have %d dateline crossings."%(num180Crossings))
# Copy the geolocation information to the class object
self.latMin = latMin
self.latMax = latMax
self.latRange = latRange
self.lonMin = lonMin
self.lonMax = lonMax
self.lonRange = lonRange
self.scanMode = scanMode
self.latitude = latitude
self.longitude = longitude
self.latCrnList = latCrnList
self.lonCrnList = lonCrnList
self.num180Crossings = num180Crossings
# Parse the geolocation asc file to get struct information which will be
# written to the ancillary asc files
geoAscFileName = path.join(self.inDir,URID+".asc")
LOG.debug("\nOpening %s..." % (geoAscFileName))
geoAscFile = open(geoAscFileName,'rt')
self.RangeDateTimeStr = getAscLine(geoAscFile,"ObservedDateTime")
self.RangeDateTimeStr = string.replace(self.RangeDateTimeStr,"ObservedDateTime","RangeDateTime")
self.GRingLatitudeStr = getAscStructs(geoAscFile,"GRingLatitude",12)
self.GRingLongitudeStr = getAscStructs(geoAscFile,"GRingLongitude",12)
geoAscFile.close()
def _grid2Gran_bilinearInterp(self,dataLat, dataLon, gridData, gridLat, gridLon):
'''Granulates a gridded dataset using an input geolocation'''
nData = np.int64(dataLat.size)
gridRows = np.int32(gridLat.shape[0])
gridCols = np.int32(gridLat.shape[1])
data = np.ones(np.shape(dataLat),dtype=np.float64)* -999.9
dataIdx = np.ones(np.shape(dataLat),dtype=np.int64) * -99999
ANC_SCRIPTS_PATH = path.join(CSPP_RT_HOME,'viirs')
libFile = path.join(ANC_SCRIPTS_PATH,'libgriddingAndGranulation.so.1.0.1')
LOG.debug("Gridding and granulation library file: %s" % (libFile))
lib = ctypes.cdll.LoadLibrary(libFile)
grid2gran_bilinearInterp = lib.grid2gran_bilinearInterp
grid2gran_bilinearInterp.restype = None
grid2gran_bilinearInterp.argtypes = [
ndpointer(ctypes.c_double,ndim=1,shape=(nData),flags='C_CONTIGUOUS'),
ndpointer(ctypes.c_double,ndim=1,shape=(nData),flags='C_CONTIGUOUS'),
ndpointer(ctypes.c_double,ndim=1,shape=(nData),flags='C_CONTIGUOUS'),
ctypes.c_int64,
ndpointer(ctypes.c_double,ndim=2,shape=(gridRows,gridCols),flags='C_CONTIGUOUS'),
ndpointer(ctypes.c_double,ndim=2,shape=(gridRows,gridCols),flags='C_CONTIGUOUS'),
ndpointer(ctypes.c_double,ndim=2,shape=(gridRows,gridCols),flags='C_CONTIGUOUS'),
ndpointer(ctypes.c_int64,ndim=1,shape=(nData),flags='C_CONTIGUOUS'),
ctypes.c_int32,
ctypes.c_int32
]
'''
int snapGrid_ctypes(double *lat,
double *lon,
double *data,
long nData,
double *gridLat,
double *gridLon,
double *gridData,
long *gridDataIdx,
int nGridRows,
int nGridCols
)
'''
LOG.debug("Calling C routine grid2gran_bilinearInterp()...")
retVal = grid2gran_bilinearInterp(dataLat,
dataLon,
data,
nData,
gridLat,
gridLon,
gridData,
dataIdx,
gridRows,
gridCols)
LOG.debug("Returning from C routine grid2gran_bilinearInterp()")
return data,dataIdx
def granulate(self,ANC_objects):
'''
Granulate the ancillary dataset.
'''
LOG.info("Granulating %s ..." % (self.collectionShortName))
degInc = 0.5
lats = np.arange(361.)*degInc - 90.
lons = np.arange(720.)*degInc - 180.
latitude = self.latitude
longitude = self.longitude
gridData = self.gridData[:,:]
if self.num180Crossings != 2 :
#gridData = np.roll(gridData,360) # old
gridData = np.roll(gridData,360,axis=1) # new
gridLon,gridLat = np.meshgrid(lons,lats)
LOG.debug("start,end NCEP Grid Latitude values : %f,%f"%(gridLat[0,0],gridLat[-1,0]))
LOG.debug("start,end NCEP Grid Longitude values : %f,%f"%(gridLon[0,0],gridLon[0,-1]))
else :
negLonIdx = np.where(lons<0)
lons[negLonIdx] += 360.
lons = np.roll(lons,360)
gridLon,gridLat = np.meshgrid(lons,lats)
longitudeNegIdx = np.where(longitude < 0.)
longitude[longitudeNegIdx] += 360.
LOG.debug("start,end NCEP Grid Latitude values : %f,%f"%(gridLat[0,0],gridLat[-1,0]))
LOG.debug("start,end NCEP Grid Longitude values : %f,%f"%(gridLon[0,0],gridLon[0,-1]))
LOG.debug("min of gridData = %r"%(np.min(gridData)))
LOG.debug("max of gridData = %r"%(np.max(gridData)))
t1 = time()
data,dataIdx = self._grid2Gran_bilinearInterp(np.ravel(latitude),
np.ravel(longitude),
gridData.astype(np.float64),
gridLat,
gridLon)
t2 = time()
elapsedTime = t2-t1
LOG.info("Granulation took %f seconds for %d points" % (elapsedTime,latitude.size))
data = data.reshape(latitude.shape)
dataIdx = dataIdx.reshape(latitude.shape)
LOG.debug("Shape of granulated %s data is %s" % (self.collectionShortName,np.shape(data)))
LOG.debug("Shape of granulated %s dataIdx is %s" % (self.collectionShortName,np.shape(dataIdx)))
# Moderate resolution trim table arrays. These are
# bool arrays, and the trim pixels are set to True.
modTrimMask = self.trimObj.createModTrimArray(nscans=48,trimType=bool)
# Fill the required pixel trim rows in the granulated NCEP data with
# the ONBOARD_PT_FILL value for the correct data type
fillValue = self.trimObj.sdrTypeFill['ONBOARD_PT_FILL'][self.dataType]
data = ma.array(data,mask=modTrimMask,fill_value=fillValue)
self.data = data.filled()
def shipOutToFile(self):
''' Pass the current class instance to this Utils method to generate
a blob/asc file pair from the input ancillary data object.'''
return shipOutToFile(self)
#!/usr/bin/env python
import numpy as np
from matplotlib.colors import Colormap, normalize, LinearSegmentedColormap
from types import IntType, FloatType, ListType
from scipy import interpolate
def cmap_discretize(cmap, N):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: Number of colors.
Example
x = resize(arange(100), (5,100))
djet = cmap_discretize(cm.jet, 5)
imshow(x, cmap=djet)
"""
cdict = cmap._segmentdata.copy()
# N colors
colors_i = np.linspace(0,1.,N)
# N+1 indices
indices = np.linspace(0,1.,N+1)
for key in ('red','green','blue'):
# Find the N colors
D = np.array(cdict[key])
I = interpolate.interp1d(D[:,0], D[:,1])
#I = np.interp(indices,D[:,0], D[:,1])
colors = I(colors_i)
# Place these colors at the correct indices.
A = np.zeros((N+1,3), float)
A[:,0] = indices
A[1:,1] = colors
A[:-1,2] = colors
# Create a tuple for the dictionary.
L = []
for l in A:
L.append(tuple(l))
cdict[key] = tuple(L)
# Return colormap object.
return LinearSegmentedColormap('colormap',cdict,1024)
def cmap_reverse(my_cmap):
my_newdict = {}
for items in my_cmap._segmentdata :
item_tuple = my_cmap._segmentdata[items]
new_item_tuple = []
item_tuple_revIter = reversed(item_tuple)
for segments in item_tuple :
dummyTuple = item_tuple_revIter.next()
new_item_tuple.append(tuple((1.-dummyTuple[0],dummyTuple[2],dummyTuple[1])))
my_newdict[items] = tuple(new_item_tuple)
return LinearSegmentedColormap('my_newcolormap',my_newdict,256)
####################################################################
### Inserts black segment into colourmap up to splitVal, ###
### compressing rest of colourmap towards high end. Colours ###
### in colourmap are preserved. ###
####################################################################
def cmap_preTruncate(my_cmap,splitVal):
my_newdict = {}
for items in my_cmap._segmentdata :
item_tuple = my_cmap._segmentdata[items]
new_item_tuple = []
new_item_tuple.append(tuple((0.0,0.0,0.0)))
new_item_tuple.append(tuple((splitVal,0.0,item_tuple[0][2])))
item_tuple_Iter = iter(item_tuple)
item_tuple_Iter.next()
while 1 :
try :
dummyTuple = item_tuple_Iter.next()
newVal = splitVal + dummyTuple[0]*(1.0 - splitVal)
new_item_tuple.append(tuple((newVal,dummyTuple[2],dummyTuple[1])))
except StopIteration :
break
my_newdict[items] = tuple(new_item_tuple)
return LinearSegmentedColormap('my_newcolormap',my_newdict,256)
####################################################################
### Replaces colourmap upto splitVal with black, and rescales ###
### colourmap so that the transition occurs at startVal. ###
### Colours in original colourmap up to splitval are lost. ###
####################################################################
def cmap_preTruncate2(my_cmap,startVal,splitVal):
my_newdict = {}
for items in my_cmap._segmentdata :
item_tuple = my_cmap._segmentdata[items]
colourTuple = []
colourTupleNew = []
# Create tuple containing tuples with colorbar coord > splitVal
item_tuple_Iter = iter(item_tuple)
while 1 :
try :
dummyTuple = item_tuple_Iter.next()
if (dummyTuple[0] > splitVal) :
colourTuple.append([dummyTuple[0],dummyTuple[1],dummyTuple[2]])
except StopIteration :
break
# Rescale colorbar coord to range [ startVal-->1.0 ]
oldRange = colourTuple[-1][0] - colourTuple[0][0]
newRange = 1.0 - startVal
for tup in np.arange(np.shape(colourTuple)[0]) :
newCbarCoord = newRange*(colourTuple[tup][0]-colourTuple[0][0])/oldRange + startVal
colourTupleNew.append([newCbarCoord,colourTuple[tup][1],colourTuple[tup][2]])
colourTupleNew[0][1] = 0.0
new_item_tuple = []
new_item_tuple.append((0.0,0.0,0.0))
colourTupleNew_iter = iter(colourTupleNew)
while 1 :
try :
new_item_tuple.append(colourTupleNew_iter.next())
except StopIteration :
break
my_newdict[items] = tuple(new_item_tuple)
return LinearSegmentedColormap('my_newcolormap',my_newdict,256)
/* file_Date = $Date$
* file_Revision = $Revision$
* file_Author = $Author$
* file_HeadURL = $HeadURL$
* file_Id = $Id$
/* file_Date = $Date: 2014-09-08 17:09:53 -0500 (Mon, 08 Sep 2014) $
* file_Revision = $Revision: 2226 $
* file_Author = $Author: geoffc $
* file_HeadURL = $HeadURL: https://svn.ssec.wisc.edu/repos/jpss_adl/trunk/scripts/edr/griddingAndGranulation.c $
* file_Id = $Id: griddingAndGranulation.c 2226 2014-09-08 22:09:53Z geoffc $
*
* ________________________
* griddingAndGranulation.c
......@@ -385,7 +385,7 @@ int gran2grid_moments(double *lat,
/*
* _________
* grid2gran
* grid2gran_nearest
* _________
*
* This routine takes the regular grid gridData[nGridRows*nGridCols],
......@@ -395,8 +395,8 @@ int gran2grid_moments(double *lat,
*
* Inputs:
*
* gridLat : Non-regular latitude grid (nGridRows*nGridCols)
* gridLon : Non-regular longitude grid (nGridRows*nGridCols)
* gridLat : Regular latitude grid (nGridRows*nGridCols)
* gridLon : Regular longitude grid (nGridRows*nGridCols)
* gridData : Data array on grid defined by gridLat and gridLon
* grid (nGridRows*nGridCols)
* lat : Non-regular latitude grid (nData)
......@@ -413,17 +413,17 @@ int gran2grid_moments(double *lat,
*
*
*/
int grid2gran(double *lat,
double *lon,
double *data,
long nData,
double *gridLat,
double *gridLon,
double *gridData,
long *gridDataIdx,
int nGridRows,
int nGridCols
)
int grid2gran_nearest(double *lat,
double *lon,
double *data,
long nData,
double *gridLat,
double *gridLon,
double *gridData,
long *gridDataIdx,
int nGridRows,
int nGridCols
)
{
long idx;
......@@ -439,30 +439,30 @@ int grid2gran(double *lat,
int crnrPt,snapCrnrPt;
int rowInBounds,colInBounds;
printf("Shape of data is (%ld)\n",nData);
printf("Shape of gridData is (%d, %d)\n", nGridRows,nGridCols);
printf("TRUE = %d\n", TRUE);
printf("FALSE = %d\n", FALSE);
/*printf("Shape of data is (%ld)\n",nData);*/
/*printf("Shape of gridData is (%d, %d)\n", nGridRows,nGridCols);*/
/*printf("TRUE = %d\n", TRUE);*/
/*printf("FALSE = %d\n", FALSE);*/
int numShow = 10;
int i;
printf("\nDisplaying lat,lon,data...\n");
for (i=0;i<numShow;i++){
printf("%6.1f %6.1f %6.1f\n",lat[i],lon[i],data[i]);
}
/*printf("\nDisplaying lat,lon,data...\n");*/
/*for (i=0;i<numShow;i++){*/
/*printf("%6.1f %6.1f %6.1f\n",lat[i],lon[i],data[i]);*/
/*}*/
// Determine the lat and lon grid spacings
printf("nGridRows = %d\n", nGridRows);
printf("nGridCols = %d\n", nGridCols);
printf("gridLat[nGridCols] = %f\n", gridLat[nGridCols]);
printf("gridLat[0] = %f\n", gridLat[0]);
printf("gridLon[1] = %f\n", gridLon[1]);
printf("gridLon[0] = %f\n", gridLon[0]);
/*printf("nGridRows = %d\n", nGridRows);*/
/*printf("nGridCols = %d\n", nGridCols);*/
/*printf("gridLat[nGridCols] = %f\n", gridLat[nGridCols]);*/
/*printf("gridLat[0] = %f\n", gridLat[0]);*/
/*printf("gridLon[1] = %f\n", gridLon[1]);*/
/*printf("gridLon[0] = %f\n", gridLon[0]);*/
gridLatInc = fabs(gridLat[nGridCols]-gridLat[0]);
gridLonInc = fabs(gridLon[1]-gridLon[0]);
printf("gridLatInc,gridLonInc = (%6.1f %6.1f)\n",gridLatInc,gridLonInc);
/*printf("gridLatInc,gridLonInc = (%8.3f %8.3f)\n",gridLatInc,gridLonInc);*/
// Loop through non-gridded data points, find matching gridpoint, and assign
// gridpoint data value to that non-gridded data point.
......@@ -553,3 +553,341 @@ int grid2gran(double *lat,
return(0);
}
/*
* _________
* grid2gran_weightedAvg
* _________
*
* This routine takes the regular grid gridData[nGridRows*nGridCols],
* defined by gridLat[nGridRows*nGridCols] and gridLon[nGridRows*nGridRows],
* and regrids to the data[nData] array, defined on the non-regular grid
* defined by lat[ndata] and lon[nData].
*
* Inputs:
*
* gridLat : Non-regular latitude grid (nGridRows*nGridCols)
* gridLon : Non-regular longitude grid (nGridRows*nGridCols)
* gridData : Data array on grid defined by gridLat and gridLon
* grid (nGridRows*nGridCols)
* lat : Non-regular latitude grid (nData)
* lon : Non-regular longitude grid (nData)
* nData : Length of non-regular grid arrays
* nGridRows : Number of grid rows of the regular grid
* nGridCols : Number of grid columns of the regular grid
*
* Outputs:
*
* data : Data array on non-regular grid defined by lat and lon (nData)
* gridDataIdx : Array of indices into gridData, defined by lat and lon
* non-regular grid (nData)
*
*
*/
int grid2gran_weightedAvg(double *lat,
double *lon,
double *data,
long nData,
double *gridLat,
double *gridLon,
double *gridData,
long *gridDataIdx,
int nGridRows,
int nGridCols
)
{
long idx;
int latGridIdxLo, latGridIdxHi, lonGridIdxLo, lonGridIdxHi;
double latVal,lonVal,dataVal;
double gridLatInc,gridLonInc;
int gridLatPt,gridLonPt;
int gridLatPoints[4], gridLonPoints[4];
double weight, weights[4], weightSum, weightedSum, eps;
double wAvgData;
double minDist,dist,latDist,lonDist;
double gridLatVal,gridLonVal;
int crnrPt,snapCrnrPt;
int rowInBounds,colInBounds;
/*printf("Shape of data is (%ld)\n",nData);*/
/*printf("Shape of gridData is (%d, %d)\n", nGridRows,nGridCols);*/
/*printf("TRUE = %d\n", TRUE);*/
/*printf("FALSE = %d\n", FALSE);*/
int numShow = 10;
int i;
/*printf("\nDisplaying lat,lon,data...\n");*/
/*for (i=0;i<numShow;i++){*/
/*printf("%6.1f %6.1f %6.1f\n",lat[i],lon[i],data[i]);*/
/*}*/
// Determine the lat and lon grid spacings
/*printf("nGridRows = %d\n", nGridRows);*/
/*printf("nGridCols = %d\n", nGridCols);*/
/*printf("gridLat[nGridCols] = %f\n", gridLat[nGridCols]);*/
/*printf("gridLat[0] = %f\n", gridLat[0]);*/
/*printf("gridLon[1] = %f\n", gridLon[1]);*/
/*printf("gridLon[0] = %f\n", gridLon[0]);*/
gridLatInc = fabs(gridLat[nGridCols]-gridLat[0]);
gridLonInc = fabs(gridLon[1]-gridLon[0]);
/*printf("gridLatInc,gridLonInc = (%6.1f %6.1f)\n",gridLatInc,gridLonInc);*/
// Loop through non-gridded data points, find matching gridpoint, and assign
// gridpoint data value to that non-gridded data point.
eps = 1.0e-12;
for (idx=0;idx<nData;idx++){
/*for (idx=0;idx<numShow;idx++){*/
latVal = lat[idx];
lonVal = lon[idx];
// Determine lat/lon grid indices which bound the non-gridded point
latGridIdxLo = (int) floor((latVal-gridLat[0])/gridLatInc);
latGridIdxHi = latGridIdxLo + 1;
lonGridIdxLo = (int) floor((lonVal-gridLon[0])/gridLonInc);
lonGridIdxHi = lonGridIdxLo + 1;
rowInBounds = TRUE;
colInBounds = TRUE;
// If the grid indices bounding the non-gridded point are off the
// grid, mark this non-gridded point as out-of-bounds.
if ((latGridIdxLo<0) || (latGridIdxHi>=nGridRows)){
rowInBounds = FALSE;
/*printf("Row idx out of bounds...\n");*/
}
if ((lonGridIdxLo<0) || (lonGridIdxHi>=nGridCols)){
colInBounds = FALSE;
/*printf("Column idx out of bounds...\n");*/
}
if (rowInBounds==FALSE){
continue;
}else if (colInBounds==FALSE){
continue;
}else{
gridLatPoints[0] = latGridIdxLo;
gridLatPoints[1] = latGridIdxLo;
gridLatPoints[2] = latGridIdxHi;
gridLatPoints[3] = latGridIdxHi;
gridLonPoints[0] = lonGridIdxLo;
gridLonPoints[1] = lonGridIdxHi;
gridLonPoints[2] = lonGridIdxLo;
gridLonPoints[3] = lonGridIdxHi;
minDist = 1.e+30;
snapCrnrPt = 0;
weightSum = 0.;
weightedSum = 0.;
wAvgData = 0.;
// Loop through the corners bounding the non-gridded point,
// to calculate the inverse-distance weights.
for (crnrPt=0;crnrPt<4;crnrPt++){
gridLatPt = (int) gridLatPoints[crnrPt];
gridLonPt = (int) gridLonPoints[crnrPt];
// Get the lat and lon values at this corner
gridLatVal = gridLat[nGridCols * gridLatPt + gridLonPt];
gridLonVal = gridLon[nGridCols * gridLatPt + gridLonPt];
// The Pythagorean distance of this corner from the non-gridded
// data point...
latDist = latVal-gridLatVal;
lonDist = lonVal-gridLonVal;
dist = sqrt(latDist*latDist + lonDist*lonDist);
weight = 1./(dist + eps);
weightedSum += weight * gridData[nGridCols * gridLatPt + gridLonPt];
weightSum += weight;
}
// Assign the value of the grid point closest to the non-gridded data
// point to that non-gridded data point .
data[idx] = weightedSum / weightSum;
// Save the index of the closest grid data point to the non-gridded
// data point.
gridDataIdx[idx] = nGridCols * gridLatPt + gridLonPt;
}
}
return(0);
}
/*
* _________
* grid2gran_bilinearInterp
* _________
*
* This routine takes the regular grid gridData[nGridRows*nGridCols],
* defined by gridLat[nGridRows*nGridCols] and gridLon[nGridRows*nGridRows],
* and regrids to the data[nData] array, defined on the non-regular grid
* defined by lat[ndata] and lon[nData].
*
* Inputs:
*
* gridLat : Non-regular latitude grid (nGridRows*nGridCols)
* gridLon : Non-regular longitude grid (nGridRows*nGridCols)
* gridData : Data array on grid defined by gridLat and gridLon
* grid (nGridRows*nGridCols)
* lat : Non-regular latitude grid (nData)
* lon : Non-regular longitude grid (nData)
* nData : Length of non-regular grid arrays
* nGridRows : Number of grid rows of the regular grid
* nGridCols : Number of grid columns of the regular grid
*
* Outputs:
*
* data : Data array on non-regular grid defined by lat and lon (nData)
* gridDataIdx : Array of indices into gridData, defined by lat and lon
* non-regular grid (nData)
*
*
* The notation below is based on the description at
* http://en.wikipedia.org/wiki/Bilinear_interpolation
*
* | | |
* |Q_12 |R_2 |Q_22
* lat_hi ------o----------o----------o----- \ \
* | | | | |
* | | | |-- dlat_plus |
* | | | | |
* latVal --|--------- P ---------|----- / |-- dlat
* | | | \ |
* | | | | |
* | | | |-- dlat_plus |
* |Q_11 |R_1 |Q_21 | |
* lat_lo ------o----------o----------o----- / /
* | | |
* | | |
* | lonVal |
* lon_lo lon_hi
*
* \__________/\_________/
* | |
* dlon_minus dlon_plus
*
* \_____________________/
* |
* dlon
*
*
*/
int grid2gran_bilinearInterp(double *lat,
double *lon,
double *data,
long nData,
double *gridLat,
double *gridLon,
double *gridData,
long *gridDataIdx,
int nGridRows,
int nGridCols
)
{
long idx;
int latGridIdxLo, latGridIdxHi, lonGridIdxLo, lonGridIdxHi;
double latVal,lonVal,dataVal;
double gridLatInc,gridLonInc;
double lat_lo, lat_hi, dlat, dlat_minus, dlat_plus;
double lon_lo, lon_hi, dlon, dlon_minus, dlon_plus;
double Q_11, Q_12, Q_21, Q_22;
double f_R1, f_R2, f_P;
int gridLatPt,gridLonPt;
double eps;
double gridLatVal,gridLonVal;
int rowInBounds,colInBounds;
int numShow = 10;
int i;
// Calculate the grid lat and lon increments
gridLatInc = fabs(gridLat[nGridCols]-gridLat[0]);
gridLonInc = fabs(gridLon[1]-gridLon[0]);
// Loop through non-gridded data points, find the bounding gridpoints,
// bilinearly interpolate.
eps = 1.0e-12;
for (idx=0;idx<nData;idx++){
latVal = lat[idx];
lonVal = lon[idx];
// Determine lat/lon grid indices which bound the non-gridded point
latGridIdxLo = (int) floor((latVal-gridLat[0])/gridLatInc);
latGridIdxHi = latGridIdxLo + 1;
lonGridIdxLo = (int) floor((lonVal-gridLon[0])/gridLonInc);
lonGridIdxHi = lonGridIdxLo + 1;
rowInBounds = TRUE;
colInBounds = TRUE;
// If the grid indices bounding the non-gridded point are off the
// grid, mark this non-gridded point as out-of-bounds.
if ((latGridIdxLo<0) || (latGridIdxHi>=nGridRows)){
rowInBounds = FALSE;
}
if ((lonGridIdxLo<0) || (lonGridIdxHi>=nGridCols)){
colInBounds = FALSE;
}
if (rowInBounds==FALSE){
continue;
}else if (colInBounds==FALSE){
continue;
}else{
lat_lo = gridLat[nGridCols * latGridIdxLo + lonGridIdxLo];
lat_hi = gridLat[nGridCols * latGridIdxHi + lonGridIdxLo];
dlat_minus = latVal - lat_lo;
dlat_plus = lat_hi - latVal;
dlat = lat_hi - lat_lo;
lon_lo = gridLon[nGridCols * latGridIdxLo + lonGridIdxLo];
lon_hi = gridLon[nGridCols * latGridIdxLo + lonGridIdxHi];
dlon_minus = lonVal - lon_lo;
dlon_plus = lon_hi - lonVal;
dlon = lon_hi - lon_lo;
Q_11 = gridData[nGridCols * latGridIdxLo + lonGridIdxLo];
Q_12 = gridData[nGridCols * latGridIdxHi + lonGridIdxLo];
Q_21 = gridData[nGridCols * latGridIdxLo + lonGridIdxHi];
Q_22 = gridData[nGridCols * latGridIdxHi + lonGridIdxHi];
f_R1 = (dlon_plus * Q_11 + dlon_minus * Q_21) / dlon;
f_R2 = (dlon_plus * Q_12 + dlon_minus * Q_22) / dlon;
f_P = (dlat_plus * f_R1 + dlat_minus * f_R2) / dlat;
// Assign the value of the grid point closest to the non-gridded data
// point to that non-gridded data point .
data[idx] = f_P;
// Save the index of the closest grid data point to the non-gridded
// data point.
gridDataIdx[idx] = nGridCols * gridLatPt + gridLonPt;
}
}
return(0);
}
#!/usr/bin/env python
# encoding: utf-8
"""
netCDF2bin_imager.py
Purpose: Read band arrays from a netcdf4/cf created by pytroll/mpop, and write them to flat
binary files.
Input:
* netCDF file containing band arrays
Output:
* flat binary files for each band
Details:
*
Preconditions:
* netCDF4 python module
Optional:
*
Minimum commandline:
python netCDF2bin_imager.py --input_files=INPUTFILES
where...
INPUTFILES: The fully qualified path to the input files. May be a directory or a file glob.
Created by Geoff Cureton <geoff.cureton@ssec.wisc.edu> on 2014-03-25.
Copyright (c) 2014 University of Wisconsin Regents. All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
file_Date = '$Date: 2014-01-23 09:02:22 -0600 (Thu, 23 Jan 2014) $'
file_Revision = '$Revision: 1885 $'
file_Author = '$Author: kathys $'
file_HeadURL = '$HeadURL: https://svn.ssec.wisc.edu/repos/jpss_adl/trunk/scripts/edr/adl_viirs_edr.py $'
file_Id = '$Id: adl_viirs_edr.py 1885 2014-01-23 15:02:22Z kathys $'
__author__ = 'Geoff Cureton <geoff.cureton@ssec.wisc.edu>'
__version__ = '$Id: adl_viirs_edr.py 1885 2014-01-23 15:02:22Z kathys $'
__docformat__ = 'Epytext'
import os, sys, logging, traceback
from os import path,uname,environ
import string
import re
import uuid
from shutil import rmtree,copyfile
from glob import glob
from time import time
from datetime import datetime,timedelta
import numpy as np
from numpy import ma
import copy
import matplotlib
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap
from matplotlib.figure import Figure
matplotlib.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
# This must come *after* the backend is specified.
import matplotlib.pyplot as ppl
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset
from netCDF4 import num2date
def listData(file_attrs,file_dicts):
'''
List various file and data attributes
'''
print "\n### File Attributes ###\n"
for keys in file_attrs.keys():
print "{} : {}".format(keys,file_attrs[keys])
print "\n### File Dimensions ###\n"
print "Keys = {}".format(file_dicts['dimensions'].keys())
for keys in file_dicts['dimensions'].keys():
print "{} : {}\n".format(keys,file_dicts['dimensions'][keys])
print "\n### File Variables ###\n"
var_attrs = ['_FillValue','scale_factor','add_offset','coordinates','long_name','resolution','units','ndim','size','dtype','shape','dimensions']
print "Keys = {}".format(file_dicts['variables'].keys())
for var_key in file_dicts['variables'].keys():
print "\nvar = {}".format(var_key)
var = file_dicts['variables'][var_key]
for attr in var_attrs:
try:
var_attr = getattr(var,attr)
print "{}.{} = {}".format(var_key,attr,var_attr)
except:
pass
def plotData(data,data_mask,units,pngName,stride=1,dpi=200):
'''
Plot the input dataset in swath projection
'''
# General Setup
dataShape = data.shape
aspectRatio = float(dataShape[0])/float(dataShape[1])
figWidth,figHeight = 4.,aspectRatio*4.
ax_rect = [0.05, 0.15, 0.90, 0.80 ] # [left,bottom,width,height]
fig = Figure(figsize=(figWidth,figHeight))
canvas = FigureCanvas(fig)
ax = fig.add_axes(ax_rect)
data = ma.array(data,mask=data_mask)
im = ax.imshow(data,interpolation='nearest')
if units == '%':
txt = ax.set_title('Reflectance')
elif units == 'K':
txt = ax.set_title('Brightness Temperature')
else:
txt = ax.set_title('scaled data')
ppl.setp(ax.get_xticklines(),visible=False)
ppl.setp(ax.get_yticklines(),visible=False)
ppl.setp(ax.get_xticklabels(), visible=False)
ppl.setp(ax.get_yticklabels(), visible=False)
ax.set_aspect('equal')
cax_rect = [0.05 , 0.05, 0.90 , 0.05 ] # [left,bottom,width,height]
cax = fig.add_axes(cax_rect,frameon=False) # setup colorbar axes
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
if units == '%':
txt = cax.set_title('Reflectance (%)')
elif units == 'K':
txt = cax.set_title('Brightness Temperature (K)')
else:
txt = cax.set_title('scaled units')
# Redraw the figure
canvas.draw()
canvas.print_figure(pngName,dpi=dpi)
def plotMapData(lat,lon,data,data_mask,units,pngName,stride=1,pointSize=1.,scatterPlot=False, \
lat_0=None,lon_0=None,latMin=None,lonMin=None,latMax=None,lonMax=None,dpi=200):
'''
Plot the input dataset in mapped to particular projection
'''
# General Setup
figWidth,figHeight = 8.,8.
ax_rect = [0.05, 0.15, 0.90, 0.80 ] # [left,bottom,width,height]
fig = Figure(figsize=(figWidth,figHeight))
canvas = FigureCanvas(fig)
ax = fig.add_axes(ax_rect)
lat_0,lon_0 = 0.,0.
#print "lonMin = {} degrees".format(lonMin)
#print "lonMax = {} degrees".format(lonMax)
#print "latMin = {} degrees".format(latMin)
#print "latMax = {} degrees".format(latMax)
m = Basemap(projection='cyl',lon_0=lon_0,lat_0=lat_0,lat_ts=lat_0,ax=ax,
llcrnrlon=lonMin,
llcrnrlat=latMin,
urcrnrlon=lonMax,
urcrnrlat=latMax
)
x,y=m(lon[::stride,::stride],lat[::stride,::stride])
m.drawcoastlines()
#m.drawstates()
m.drawcountries()
m.fillcontinents(color='0.85',zorder=0)
m.drawparallels(np.arange( -90, 91,30), color = '0.25', linewidth = 0.5,labels=[1,0,1,0])
m.drawmeridians(np.arange(-180,180,30), color = '0.25', linewidth = 0.5,labels=[1,0,1,0])
data = ma.array(data[::stride,::stride],mask=data_mask[::stride,::stride])
if scatterPlot:
cs = m.scatter(x,y,s=pointSize,c=data,axes=ax,edgecolors='none')
else:
cs = m.pcolor(x,y,data,axes=ax,edgecolors='none',antialiased=False)
if units == '%':
txt = ax.set_title('Reflectance')
elif units == 'K':
txt = ax.set_title('Brightness Temperature')
else:
txt = ax.set_title('scaled data')
ppl.setp(ax.get_xticklines(),visible=False)
ppl.setp(ax.get_yticklines(),visible=False)
ppl.setp(ax.get_xticklabels(), visible=False)
ppl.setp(ax.get_yticklabels(), visible=False)
#ax.set_aspect('equal')
cax_rect = [0.05 , 0.05, 0.90 , 0.05 ] # [left,bottom,width,height]
cax = fig.add_axes(cax_rect,frameon=False) # setup colorbar axes
#cb = fig.colorbar(im, cax=cax, orientation='horizontal')
cb = fig.colorbar(cs, cax=cax, orientation='horizontal')
if units == '%':
txt = cax.set_title('Reflectance (%)')
elif units == 'K':
txt = cax.set_title('Brightness Temperature (K)')
else:
txt = cax.set_title('scaled units')
# Redraw the figure
canvas.draw()
canvas.print_figure(pngName,dpi=dpi)
class dataset():
'''
Define a "dataset" datatpe, which contains useful attributes for a particular
dataset.
'''
def __init__(self):
self.bandNum
self.units = None
self.dpi = 200
self.arrayName = None
self.mapRes = None
def _argparse():
'''
Method to encapsulate the option parsing and various setup tasks.
'''
import argparse
endianChoices = ['little','big']
dataChoices=['REF1','REF2','REF3','BT1','BT2','REF','BT','ALL']
mapRes = ['c','l','i']
defaults = {
'input_file':None,
'dataset':'ALL',
'stride':1,
'unscaled':False,
'scatter_plot':False,
'dpi':200,
'list_attrs':False,
'pointSize':1,
'make_plots':False,
'map_plot':False,
'no_binaries':False
}
description = '''Read band arrays from a netcdf4/cf created by pytroll/mpop, and write them to flat
binary files.'''
usage = "usage: %prog [mandatory args] [options]"
version = __version__
parser = argparse.ArgumentParser()
# Mandatory arguments
parser.add_argument('-i','--input_file',
action='store',
dest='input_file',
type=str,
required=True,
help="The fully qualified path to the input file."
)
# Optional arguments
parser.add_argument('--dset',
action="store",
dest="dataset",
default=defaults["dataset"],
type=str,
choices=dataChoices,
help='''The imager dataset(s) toVIIRS algorithm to run.\n\n
Possible values are...
{}.
'''.format(dataChoices.__str__()[1:-1])
)
parser.add_argument('--unscaled',
action="store_true",
dest="doUnscale",
default=defaults["unscaled"],
help="Attempt to unscale the band arrays before output to flat binary files. [default: {}]".format(defaults["unscaled"])
)
parser.add_argument('--make_plots',
action="store_true",
dest="doPlots",
default=defaults["make_plots"],
help="Plot the band arrays to png files. [default: {}]".format(defaults["make_plots"])
)
parser.add_argument('--map_plot',
action="store_true",
dest="doMapPlots",
default=defaults["map_plot"],
help="Plot the band arrays to png files, using a map projection (has no effect if --make_plots is not set). [default: {}]".format(defaults["map_plot"])
)
parser.add_argument('--no_binaries',
action="store_true",
dest="noBinaries",
default=defaults["no_binaries"],
help="Plot the band arrays to png files. [default: {}]".format(defaults["make_plots"])
)
parser.add_argument('--plotMin',
action="store",
dest="plotMin",
type=float,
help="Minimum value to plot."
)
parser.add_argument('--plotMax',
action="store",
dest="plotMax",
type=float,
help="Maximum value to plot."
)
parser.add_argument('-d','--dpi',
action="store",
dest="dpi",
default='200.',
type=float,
help="The resolution in dots per inch of the output png file. [default: {}]".format(defaults["dpi"])
)
parser.add_argument('-S','--stride',
action="store",
dest="stride",
default=defaults["stride"],
type=int,
help="Sample every STRIDE pixels in the band data. [default: {}]".format(defaults["stride"])
)
parser.add_argument('--scatter_plot',
action="store_true",
dest="doScatterPlot",
default=defaults["scatter_plot"],
help="Generate the plot using a scatterplot approach."
)
parser.add_argument('--list_attrs',
action="store_true",
dest="doListAttrs",
default=defaults["list_attrs"],
help="List some file and dataset attributes."
)
parser.add_argument('-P','--pointSize',
action="store",
dest="pointSize",
default=defaults["pointSize"],
type=float,
help="Size of the plot point used to represent each pixel. [default: {}]".format(defaults["pointSize"])
)
parser.add_argument('--lat_0',
action="store",
dest="lat_0",
type=float,
help="Center latitude of plot."
)
parser.add_argument('--lon_0',
action="store",
dest="lon_0",
type=float,
help="Center longitude of plot."
)
parser.add_argument('--latMin',
action="store",
dest="latMin",
type=float,
help="Minimum latitude to plot."
)
parser.add_argument('--latMax',
action="store",
dest="latMax",
type=float,
help="Maximum latitude to plot."
)
parser.add_argument('--lonMin',
action="store",
dest="lonMin",
type=float,
help="Minimum longitude to plot."
)
parser.add_argument('--lonMax',
action="store",
dest="lonMax",
type=float,
help="Maximum longitude to plot."
)
args = parser.parse_args()
return args
def main():
options = _argparse()
stride = options.stride
lat_0 = options.lat_0
lon_0 = options.lon_0
latMin = options.latMin
lonMin = options.lonMin
latMax = options.latMax
lonMax = options.lonMax
doScatterPlot = options.doScatterPlot
pointSize = options.pointSize
dpi = options.dpi
# Create the dataset dictionaries
#dataDict = {}
#if options.dataset == "REF":
#if options.dataset == "BT":
#if options.dataset == "ALL":
dataDict = {
'REF1',
'REF2',
'REF3',
'BT1',
'BT2',
'REF',
'BT',
'ALL'
}
# Open the file object
print "Opening {} ...".format(options.input_file)
fileObj = Dataset(options.input_file)
file_attrs = {}
file_attrs['history'] = fileObj.history
file_attrs['orbit'] = fileObj.orbit
file_attrs['platform'] = fileObj.platform
file_dicts = {}
file_dicts['dimensions'] = fileObj.dimensions
file_dicts['variables'] = fileObj.variables
# List various file and data attributes
if options.doListAttrs:
listData(file_attrs,file_dicts)
if not (options.noBinaries and not options.doPlots):
print "We are reading the file Datasets"
# Output the lats and lons
lats = file_dicts['variables']['lat0'][:,:].astype(np.float32)
lons = file_dicts['variables']['lon0'][:,:].astype(np.float32)
if not options.noBinaries:
geo_fileName = string.replace(options.input_file,'.nc','_latitude.bin')
print "Writing latitude to {}...".format(geo_fileName)
lats.tofile(geo_fileName)
geo_fileName = string.replace(options.input_file,'.nc','_longitude.bin')
print "Writing longitude to {}...".format(geo_fileName)
lons.tofile(geo_fileName)
# Output the bands
for band_var in ['Image0','Image1']:
#for band_var in ['Image0']:
nBands = file_dicts['variables'][band_var].shape[2]
dtype = file_dicts['variables'][band_var].dtype.name
units = ''
for band in range(nBands):
#for band in [0]:
var = file_dicts['variables'][band_var][:,:,band]
fillValue = file_dicts['variables'][band_var]._FillValue
print "fill value is {}".format(fillValue)
if band_var == 'Image1':
var_mask = ma.masked_inside(var,32720,32768).mask
else:
var_mask = ma.masked_equal(var,fillValue).mask
scale_factor = file_dicts['variables'][band_var].scale_factor[band]
offset = file_dicts['variables'][band_var].add_offset[band]
print "{} band {} (scale,offset) = ({},{})".format(band_var,band,scale_factor,offset)
if options.doUnscale :
dtype = 'float32'
units = file_dicts['variables'][band_var].units
var = var * scale_factor + offset
var = ma.array(var,mask=var_mask,fill_value=-999.9).filled()
print "{} band {} fill value = {}".format(band_var,band,np.min(var))
print "{} band {} min value = {}".format(band_var,band,np.min(ma.array(var,mask=var_mask)))
print "{} band {} max value = {}".format(band_var,band,np.max(ma.array(var,mask=var_mask)))
bin_fileName = string.replace(options.input_file,'.nc','')
bin_fileName = "{}_{}_band{}_{}.bin".format(bin_fileName,band_var,band,dtype)
if not options.noBinaries:
print "Writing {} band {} to {}...".format(band_var,band,bin_fileName)
var.astype(dtype).tofile(bin_fileName)
if options.doPlots:
png_fileName = string.replace(bin_fileName,'_{}.bin'.format(dtype),'.png')
print "Plotting {} band {} to {}...".format(band_var,band,png_fileName)
if options.doMapPlots:
plotMapData(lats,lons,var,var_mask,units,png_fileName,stride=stride,lat_0=lat_0,lon_0=lon_0,latMin=latMin,lonMin=lonMin,latMax=latMax,lonMax=lonMax,scatterPlot=doScatterPlot,pointSize=pointSize,dpi=dpi)
else:
plotData(var,var_mask,units,png_fileName,stride=stride,dpi=dpi)
print "Closing {} ...".format(options.input_file)
fileObj.close()
sys.exit(0)
if __name__=='__main__':
sys.exit(main())
import numpy as np
import scipy.interpolate
import scipy.ndimage
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print "[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions."
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1 :
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else :
print "Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported."
return None
function grid = snapgrid(spacing, geodata, param, first,grid)
% input:
% spacing - lat/lon grid spacing, e.g. 0.25, 0.5, 1.0, etc.
% geodata.lat, geodata.lon, geodata.satzen, geodata.sunzen
% param - the parameter you want to snap to grid, 'rad'for radiance or
% 'cld' for cloud
% first - 1=first; 0=not
% grid - regrid out put from a previous run.
% type - 1=day; 0=night
% output:
% regrid - data structure with regridded fields
%
% Nadia Smith, April 2010
% Variation on Nick Bearson's code
% =======================================================
% if this is the first granule/file, then create new grids
if first==1
% day time
grid.d_dist = ones(180/spacing, 360/spacing)*NaN;
grid.d_satzen = ones(180/spacing, 360/spacing)*NaN;
grid.d_solzen = ones(180/spacing, 360/spacing)*NaN;
grid.d_param = ones(180/spacing, 360/spacing)*NaN;
grid.d_type = ones(180/spacing, 360/spacing)*NaN;
% night time
grid.n_dist = ones(180/spacing, 360/spacing)*NaN;
grid.n_satzen = ones(180/spacing, 360/spacing)*NaN;
grid.n_solzen = ones(180/spacing, 360/spacing)*NaN;
grid.n_param = ones(180/spacing, 360/spacing)*NaN;
grid.n_type = ones(180/spacing, 360/spacing)*NaN;
end
for i=1:length(geodata.lat(:,1))
for j=1:length(geodata.lat(1,:))
% get the lat/lon of the data point
lat = geodata.lat(i, j);
lon = geodata.lon(i, j);
if isnan(lat) == 0 && isnan(lon) == 0
% adjust the lon/lat to be 0-360 and 0-180, and then
% adjust them to our spacing grid for easier calculation
adjlat = ((lat + 90) / spacing);
adjlon = ((lon + 180) / spacing);
% now find the closest grid crossing
latbin = round(adjlat);
lonbin = round(adjlon);
% We don't want to reference the 0 spot, so we'll use the 180/360 instead (lat/lon)
if latbin == 0
latbin = 180 / spacing;
end
if lonbin == 0
lonbin = 360 / spacing;
end
% determine our point's distance from the grid crossing
% note that we're finding the hypotenuse distance, as that differs from just adding the two
dist = sqrt(((latbin - adjlat)^2) + ((lonbin - adjlon)^2));
if geodata.sunzen(i, j) < 84 % day time grid
% now we're ready to get the current distance in that bin and see if ours is closer
if (dist < (1/spacing)/4) && (isnan(grid.d_satzen(latbin, lonbin)) == 1 ||...
abs(geodata.satzen(i, j)) < abs(grid.d_satzen(latbin, lonbin)))
grid.d_dist(latbin, lonbin) = dist;
grid.d_satzen(latbin, lonbin) = geodata.satzen(i, j);
grid.d_sunzen(latbin, lonbin) = geodata.sunzen(i, j);
grid.d_param(latbin, lonbin) = param(i, j);
end
else % night time grid
% now we're ready to get the current distance in that bin and see if ours is closer
if (dist < (1/spacing)/4) && (isnan(grid.n_satzen(latbin, lonbin)) == 1 ||...
abs(geodata.satzen(i, j)) < abs(grid.n_satzen(latbin, lonbin)))
grid.n_dist(latbin, lonbin) = dist;
grid.n_satzen(latbin, lonbin) = geodata.satzen(i, j);
grid.n_sunzen(latbin, lonbin) = geodata.sunzen(i, j);
grid.n_param(latbin, lonbin) = param(i, j);
end
end
end
end
end
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment