sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def get_aux_files(basename):
"""
Look for and return all the aux files that are associated witht this filename.
Will look for:
background (_bkg.fits)
rms (_rms.fits)
mask (.mim)
catalogue (_comp.fits)
psf map (_psf.fits)
will return filenames if they exist, or None where they do not.
Parameters
----------
basename : str
The name/path of the input image.
Returns
-------
aux : dict
Dict of filenames or None with keys (bkg, rms, mask, cat, psf)
"""
base = os.path.splitext(basename)[0]
files = {"bkg": base + "_bkg.fits",
"rms": base + "_rms.fits",
"mask": base + ".mim",
"cat": base + "_comp.fits",
"psf": base + "_psf.fits"}
for k in files.keys():
if not os.path.exists(files[k]):
files[k] = None
return files | Look for and return all the aux files that are associated witht this filename.
Will look for:
background (_bkg.fits)
rms (_rms.fits)
mask (.mim)
catalogue (_comp.fits)
psf map (_psf.fits)
will return filenames if they exist, or None where they do not.
Parameters
----------
basename : str
The name/path of the input image.
Returns
-------
aux : dict
Dict of filenames or None with keys (bkg, rms, mask, cat, psf) | entailment |
def _gen_flood_wrap(self, data, rmsimg, innerclip, outerclip=None, domask=False):
"""
Generator function.
Segment an image into islands and return one island at a time.
Needs to work for entire image, and also for components within an island.
Parameters
----------
data : 2d-array
Image array.
rmsimg : 2d-array
Noise image.
innerclip, outerclip :float
Seed (inner) and flood (outer) clipping values.
domask : bool
If True then look for a region mask in globals, only return islands that are within the region.
Default = False.
Yields
------
data_box : 2d-array
A island of sources with subthreshold values masked.
xmin, xmax, ymin, ymax : int
The corners of the data_box within the initial data array.
"""
if outerclip is None:
outerclip = innerclip
# compute SNR image (data has already been background subtracted)
snr = abs(data) / rmsimg
# mask of pixles that are above the outerclip
a = snr >= outerclip
# segmentation a la scipy
l, n = label(a)
f = find_objects(l)
if n == 0:
self.log.debug("There are no pixels above the clipping limit")
return
self.log.debug("{1} Found {0} islands total above flood limit".format(n, data.shape))
# Yield values as before, though they are not sorted by flux
for i in range(n):
xmin, xmax = f[i][0].start, f[i][0].stop
ymin, ymax = f[i][1].start, f[i][1].stop
if np.any(snr[xmin:xmax, ymin:ymax] > innerclip): # obey inner clip constraint
# self.log.info("{1} Island {0} is above the inner clip limit".format(i, data.shape))
data_box = copy.copy(data[xmin:xmax, ymin:ymax]) # copy so that we don't blank the master data
data_box[np.where(
snr[xmin:xmax, ymin:ymax] < outerclip)] = np.nan # blank pixels that are outside the outerclip
data_box[np.where(l[xmin:xmax, ymin:ymax] != i + 1)] = np.nan # blank out other summits
# check if there are any pixels left unmasked
if not np.any(np.isfinite(data_box)):
# self.log.info("{1} Island {0} has no non-masked pixels".format(i,data.shape))
continue
if domask and (self.global_data.region is not None):
y, x = np.where(snr[xmin:xmax, ymin:ymax] >= outerclip)
# convert indices of this sub region to indices in the greater image
yx = list(zip(y + ymin, x + xmin))
ra, dec = self.global_data.wcshelper.wcs.wcs_pix2world(yx, 1).transpose()
mask = self.global_data.region.sky_within(ra, dec, degin=True)
# if there are no un-masked pixels within the region then we skip this island.
if not np.any(mask):
continue
self.log.debug("Mask {0}".format(mask))
# self.log.info("{1} Island {0} will be fit".format(i, data.shape))
yield data_box, xmin, xmax, ymin, ymax | Generator function.
Segment an image into islands and return one island at a time.
Needs to work for entire image, and also for components within an island.
Parameters
----------
data : 2d-array
Image array.
rmsimg : 2d-array
Noise image.
innerclip, outerclip :float
Seed (inner) and flood (outer) clipping values.
domask : bool
If True then look for a region mask in globals, only return islands that are within the region.
Default = False.
Yields
------
data_box : 2d-array
A island of sources with subthreshold values masked.
xmin, xmax, ymin, ymax : int
The corners of the data_box within the initial data array. | entailment |
def estimate_lmfit_parinfo(self, data, rmsimg, curve, beam, innerclip, outerclip=None, offsets=(0, 0),
max_summits=None):
"""
Estimates the number of sources in an island and returns initial parameters for the fit as well as
limits on those parameters.
Parameters
----------
data : 2d-array
(sub) image of flux values. Background should be subtracted.
rmsimg : 2d-array
Image of 1sigma values
curve : 2d-array
Image of curvature values [-1,0,+1]
beam : :class:`AegeanTools.fits_image.Beam`
The beam information for the image.
innerclip, outerclip : float
Inerr and outer level for clipping (sigmas).
offsets : (int, int)
The (x,y) offset of data within it's parent image
max_summits : int
If not None, only this many summits/components will be fit. More components may be
present in the island, but subsequent components will not have free parameters.
Returns
-------
model : lmfit.Parameters
The initial estimate of parameters for the components within this island.
"""
debug_on = self.log.isEnabledFor(logging.DEBUG)
is_flag = 0
global_data = self.global_data
# check to see if this island is a negative peak since we need to treat such cases slightly differently
isnegative = max(data[np.where(np.isfinite(data))]) < 0
if isnegative:
self.log.debug("[is a negative island]")
if outerclip is None:
outerclip = innerclip
self.log.debug(" - shape {0}".format(data.shape))
if not data.shape == curve.shape:
self.log.error("data and curvature are mismatched")
self.log.error("data:{0} curve:{1}".format(data.shape, curve.shape))
raise AssertionError()
# For small islands we can't do a 6 param fit
# Don't count the NaN values as part of the island
non_nan_pix = len(data[np.where(np.isfinite(data))].ravel())
if 4 <= non_nan_pix <= 6:
self.log.debug("FIXED2PSF")
is_flag |= flags.FIXED2PSF
elif non_nan_pix < 4:
self.log.debug("FITERRSMALL!")
is_flag |= flags.FITERRSMALL
else:
is_flag = 0
if debug_on:
self.log.debug(" - size {0}".format(len(data.ravel())))
if min(data.shape) <= 2 or (is_flag & flags.FITERRSMALL) or (is_flag & flags.FIXED2PSF):
# 1d islands or small islands only get one source
if debug_on:
self.log.debug("Tiny summit detected")
self.log.debug("{0}".format(data))
summits = [[data, 0, data.shape[0], 0, data.shape[1]]]
# and are constrained to be point sources
is_flag |= flags.FIXED2PSF
else:
if isnegative:
# the summit should be able to include all pixels within the island not just those above innerclip
kappa_sigma = np.where(curve > 0.5, np.where(data + outerclip * rmsimg < 0, data, np.nan), np.nan)
else:
kappa_sigma = np.where(-1 * curve > 0.5, np.where(data - outerclip * rmsimg > 0, data, np.nan), np.nan)
summits = list(self._gen_flood_wrap(kappa_sigma, np.ones(kappa_sigma.shape), 0, domask=False))
params = lmfit.Parameters()
i = 0
summits_considered = 0
# This can happen when the image contains regions of nans
# the data/noise indicate an island, but the curvature doesn't back it up.
if len(summits) < 1:
self.log.debug("Island has {0} summits".format(len(summits)))
return None
# add summits in reverse order of peak SNR - ie brightest first
for summit, xmin, xmax, ymin, ymax in sorted(summits, key=lambda x: np.nanmax(-1. * abs(x[0]))):
summits_considered += 1
summit_flag = is_flag
if debug_on:
self.log.debug(
"Summit({5}) - shape:{0} x:[{1}-{2}] y:[{3}-{4}]".format(summit.shape, ymin, ymax, xmin, xmax, i))
try:
if isnegative:
amp = np.nanmin(summit)
xpeak, ypeak = np.unravel_index(np.nanargmin(summit), summit.shape)
else:
amp = np.nanmax(summit)
xpeak, ypeak = np.unravel_index(np.nanargmax(summit), summit.shape)
except ValueError as e:
if "All-NaN" in e.message:
self.log.warn("Summit of nan's detected - this shouldn't happen")
continue
else:
raise e
if debug_on:
self.log.debug(" - max is {0:f}".format(amp))
self.log.debug(" - peak at {0},{1}".format(xpeak, ypeak))
yo = ypeak + ymin
xo = xpeak + xmin
# Summits are allowed to include pixels that are between the outer and inner clip
# This means that sometimes we get a summit that has all it's pixels below the inner clip
# So we test for that here.
snr = np.nanmax(abs(data[xmin:xmax + 1, ymin:ymax + 1] / rmsimg[xmin:xmax + 1, ymin:ymax + 1]))
if snr < innerclip:
self.log.debug("Summit has SNR {0} < innerclip {1}: skipping".format(snr, innerclip))
continue
# allow amp to be 5% or (innerclip) sigma higher
# TODO: the 5% should depend on the beam sampling
# note: when innerclip is 400 this becomes rather stupid
if amp > 0:
amp_min, amp_max = 0.95 * min(outerclip * rmsimg[xo, yo], amp), amp * 1.05 + innerclip * rmsimg[xo, yo]
else:
amp_max, amp_min = 0.95 * max(-outerclip * rmsimg[xo, yo], amp), amp * 1.05 - innerclip * rmsimg[xo, yo]
if debug_on:
self.log.debug("a_min {0}, a_max {1}".format(amp_min, amp_max))
pixbeam = global_data.psfhelper.get_pixbeam_pixel(yo + offsets[0], xo + offsets[1])
if pixbeam is None:
self.log.debug(" Summit has invalid WCS/Beam - Skipping.")
continue
# set a square limit based on the size of the pixbeam
xo_lim = 0.5 * np.hypot(pixbeam.a, pixbeam.b)
yo_lim = xo_lim
yo_min, yo_max = yo - yo_lim, yo + yo_lim
# if yo_min == yo_max: # if we have a 1d summit then allow the position to vary by +/-0.5pix
# yo_min, yo_max = yo_min - 0.5, yo_max + 0.5
xo_min, xo_max = xo - xo_lim, xo + xo_lim
# if xo_min == xo_max: # if we have a 1d summit then allow the position to vary by +/-0.5pix
# xo_min, xo_max = xo_min - 0.5, xo_max + 0.5
# the size of the island
xsize = data.shape[0]
ysize = data.shape[1]
# initial shape is the psf
sx = pixbeam.a * FWHM2CC
sy = pixbeam.b * FWHM2CC
# lmfit does silly things if we start with these two parameters being equal
sx = max(sx, sy * 1.01)
# constraints are based on the shape of the island
# sx,sy can become flipped so we set the min/max account for this
sx_min, sx_max = sy * 0.8, max((max(xsize, ysize) + 1) * math.sqrt(2) * FWHM2CC, sx * 1.1)
sy_min, sy_max = sy * 0.8, max((max(xsize, ysize) + 1) * math.sqrt(2) * FWHM2CC, sx * 1.1)
theta = pixbeam.pa # Degrees
flag = summit_flag
# check to see if we are going to fit this component
if max_summits is not None:
maxxed = i >= max_summits
else:
maxxed = False
# components that are not fit need appropriate flags
if maxxed:
summit_flag |= flags.NOTFIT
summit_flag |= flags.FIXED2PSF
if debug_on:
self.log.debug(" - var val min max | min max")
self.log.debug(" - amp {0} {1} {2} ".format(amp, amp_min, amp_max))
self.log.debug(" - xo {0} {1} {2} ".format(xo, xo_min, xo_max))
self.log.debug(" - yo {0} {1} {2} ".format(yo, yo_min, yo_max))
self.log.debug(" - sx {0} {1} {2} | {3} {4}".format(sx, sx_min, sx_max, sx_min * CC2FHWM,
sx_max * CC2FHWM))
self.log.debug(" - sy {0} {1} {2} | {3} {4}".format(sy, sy_min, sy_max, sy_min * CC2FHWM,
sy_max * CC2FHWM))
self.log.debug(" - theta {0} {1} {2}".format(theta, -180, 180))
self.log.debug(" - flags {0}".format(flag))
self.log.debug(" - fit? {0}".format(not maxxed))
# TODO: figure out how incorporate the circular constraint on sx/sy
prefix = "c{0}_".format(i)
params.add(prefix + 'amp', value=amp, min=amp_min, max=amp_max, vary=not maxxed)
params.add(prefix + 'xo', value=xo, min=float(xo_min), max=float(xo_max), vary=not maxxed)
params.add(prefix + 'yo', value=yo, min=float(yo_min), max=float(yo_max), vary=not maxxed)
if summit_flag & flags.FIXED2PSF > 0:
psf_vary = False
else:
psf_vary = not maxxed
params.add(prefix + 'sx', value=sx, min=sx_min, max=sx_max, vary=psf_vary)
params.add(prefix + 'sy', value=sy, min=sy_min, max=sy_max, vary=psf_vary)
params.add(prefix + 'theta', value=theta, vary=psf_vary)
params.add(prefix + 'flags', value=summit_flag, vary=False)
# starting at zero allows the maj/min axes to be fit better.
# if params[prefix + 'theta'].vary:
# params[prefix + 'theta'].value = 0
i += 1
if debug_on:
self.log.debug("Estimated sources: {0}".format(i))
# remember how many components are fit.
params.add('components', value=i, vary=False)
# params.components=i
if params['components'].value < 1:
self.log.debug("Considered {0} summits, accepted {1}".format(summits_considered, i))
return params | Estimates the number of sources in an island and returns initial parameters for the fit as well as
limits on those parameters.
Parameters
----------
data : 2d-array
(sub) image of flux values. Background should be subtracted.
rmsimg : 2d-array
Image of 1sigma values
curve : 2d-array
Image of curvature values [-1,0,+1]
beam : :class:`AegeanTools.fits_image.Beam`
The beam information for the image.
innerclip, outerclip : float
Inerr and outer level for clipping (sigmas).
offsets : (int, int)
The (x,y) offset of data within it's parent image
max_summits : int
If not None, only this many summits/components will be fit. More components may be
present in the island, but subsequent components will not have free parameters.
Returns
-------
model : lmfit.Parameters
The initial estimate of parameters for the components within this island. | entailment |
def result_to_components(self, result, model, island_data, isflags):
"""
Convert fitting results into a set of components
Parameters
----------
result : lmfit.MinimizerResult
The fitting results.
model : lmfit.Parameters
The model that was fit.
island_data : :class:`AegeanTools.models.IslandFittingData`
Data about the island that was fit.
isflags : int
Flags that should be added to this island (in addition to those within the model)
Returns
-------
sources : list
A list of components, and islands if requested.
"""
global_data = self.global_data
# island data
isle_num = island_data.isle_num
idata = island_data.i
xmin, xmax, ymin, ymax = island_data.offsets
box = slice(int(xmin), int(xmax)), slice(int(ymin), int(ymax))
rms = global_data.rmsimg[box]
bkg = global_data.bkgimg[box]
residual = np.median(result.residual), np.std(result.residual)
is_flag = isflags
sources = []
j = 0
for j in range(model['components'].value):
src_flags = is_flag
source = OutputSource()
source.island = isle_num
source.source = j
self.log.debug(" component {0}".format(j))
prefix = "c{0}_".format(j)
xo = model[prefix + 'xo'].value
yo = model[prefix + 'yo'].value
sx = model[prefix + 'sx'].value
sy = model[prefix + 'sy'].value
theta = model[prefix + 'theta'].value
amp = model[prefix + 'amp'].value
src_flags |= model[prefix + 'flags'].value
# these are goodness of fit statistics for the entire island.
source.residual_mean = residual[0]
source.residual_std = residual[1]
# set the flags
source.flags = src_flags
# #pixel pos within island +
# island offset within region +
# region offset within image +
# 1 for luck
# (pyfits->fits conversion = luck)
x_pix = xo + xmin + 1
y_pix = yo + ymin + 1
# update the source xo/yo so the error calculations can be done correctly
# Note that you have to update the max or the value you set will be clipped at the max allowed value
model[prefix + 'xo'].set(value=x_pix, max=np.inf)
model[prefix + 'yo'].set(value=y_pix, max=np.inf)
# ------ extract source parameters ------
# fluxes
# the background is taken from background map
# Clamp the pixel location to the edge of the background map
y = max(min(int(round(y_pix - ymin)), bkg.shape[1] - 1), 0)
x = max(min(int(round(x_pix - xmin)), bkg.shape[0] - 1), 0)
source.background = bkg[x, y]
source.local_rms = rms[x, y]
source.peak_flux = amp
# all params are in degrees
source.ra, source.dec, source.a, source.b, source.pa = global_data.wcshelper.pix2sky_ellipse((x_pix, y_pix),
sx * CC2FHWM,
sy * CC2FHWM,
theta)
source.a *= 3600 # arcseconds
source.b *= 3600
# force a>=b
fix_shape(source)
# limit the pa to be in (-90,90]
source.pa = pa_limit(source.pa)
# if one of these values are nan then there has been some problem with the WCS handling
if not all(np.isfinite((source.ra, source.dec, source.a, source.b, source.pa))):
src_flags |= flags.WCSERR
# negative degrees is valid for RA, but I don't want them.
if source.ra < 0:
source.ra += 360
source.ra_str = dec2hms(source.ra)
source.dec_str = dec2dms(source.dec)
# calculate integrated flux
source.int_flux = source.peak_flux * sx * sy * CC2FHWM ** 2 * np.pi
# scale Jy/beam -> Jy using the area of the beam
source.int_flux /= global_data.psfhelper.get_beamarea_pix(source.ra, source.dec)
# Calculate errors for params that were fit (as well as int_flux)
errors(source, model, global_data.wcshelper)
source.flags = src_flags
# add psf info
local_beam = global_data.psfhelper.get_beam(source.ra, source.dec)
if local_beam is not None:
source.psf_a = local_beam.a * 3600
source.psf_b = local_beam.b * 3600
source.psf_pa = local_beam.pa
else:
source.psf_a = 0
source.psf_b = 0
source.psf_pa = 0
sources.append(source)
self.log.debug(source)
if global_data.blank:
outerclip = island_data.scalars[1]
idx, idy = np.where(abs(idata) - outerclip * rms > 0)
idx += xmin
idy += ymin
self.global_data.img._pixels[[idx, idy]] = np.nan
# calculate the integrated island flux if required
if island_data.doislandflux:
_, outerclip, _ = island_data.scalars
self.log.debug("Integrated flux for island {0}".format(isle_num))
kappa_sigma = np.where(abs(idata) - outerclip * rms > 0, idata, np.NaN)
self.log.debug("- island shape is {0}".format(kappa_sigma.shape))
source = IslandSource()
source.flags = 0
source.island = isle_num
source.components = j + 1
source.peak_flux = np.nanmax(kappa_sigma)
# check for negative islands
if source.peak_flux < 0:
source.peak_flux = np.nanmin(kappa_sigma)
self.log.debug("- peak flux {0}".format(source.peak_flux))
# positions and background
if np.isfinite(source.peak_flux):
positions = np.where(kappa_sigma == source.peak_flux)
else: # if a component has been refit then it might have flux = np.nan
positions = [[kappa_sigma.shape[0] / 2], [kappa_sigma.shape[1] / 2]]
xy = positions[0][0] + xmin, positions[1][0] + ymin
radec = global_data.wcshelper.pix2sky(xy)
source.ra = radec[0]
# convert negative ra's to positive ones
if source.ra < 0:
source.ra += 360
source.dec = radec[1]
source.ra_str = dec2hms(source.ra)
source.dec_str = dec2dms(source.dec)
source.background = bkg[positions[0][0], positions[1][0]]
source.local_rms = rms[positions[0][0], positions[1][0]]
source.x_width, source.y_width = idata.shape
source.pixels = int(sum(np.isfinite(kappa_sigma).ravel() * 1.0))
source.extent = [xmin, xmax, ymin, ymax]
# TODO: investigate what happens when the sky coords are skewed w.r.t the pixel coords
# calculate the area of the island as a fraction of the area of the bounding box
bl = global_data.wcshelper.pix2sky([xmax, ymin])
tl = global_data.wcshelper.pix2sky([xmax, ymax])
tr = global_data.wcshelper.pix2sky([xmin, ymax])
height = gcd(tl[0], tl[1], bl[0], bl[1])
width = gcd(tl[0], tl[1], tr[0], tr[1])
area = height * width
source.area = area * source.pixels / source.x_width / source.y_width # area is in deg^2
# create contours
msq = MarchingSquares(idata)
source.contour = [(a[0] + xmin, a[1] + ymin) for a in msq.perimeter]
# calculate the maximum angular size of this island, brute force method
source.max_angular_size = 0
for i, pos1 in enumerate(source.contour):
radec1 = global_data.wcshelper.pix2sky(pos1)
for j, pos2 in enumerate(source.contour[i:]):
radec2 = global_data.wcshelper.pix2sky(pos2)
dist = gcd(radec1[0], radec1[1], radec2[0], radec2[1])
if dist > source.max_angular_size:
source.max_angular_size = dist
source.pa = bear(radec1[0], radec1[1], radec2[0], radec2[1])
source.max_angular_size_anchors = [pos1[0], pos1[1], pos2[0], pos2[1]]
self.log.debug("- peak position {0}, {1} [{2},{3}]".format(source.ra_str, source.dec_str, positions[0][0],
positions[1][0]))
# integrated flux
beam_area = global_data.psfhelper.get_beamarea_deg2(source.ra, source.dec) # beam in deg^2
# get_beamarea_pix(source.ra, source.dec) # beam is in pix^2
isize = source.pixels # number of non zero pixels
self.log.debug("- pixels used {0}".format(isize))
source.int_flux = np.nansum(kappa_sigma) # total flux Jy/beam
self.log.debug("- sum of pixles {0}".format(source.int_flux))
source.int_flux *= beam_area # total flux in Jy
self.log.debug("- integrated flux {0}".format(source.int_flux))
eta = erf(np.sqrt(-1 * np.log(abs(source.local_rms * outerclip / source.peak_flux)))) ** 2
self.log.debug("- eta {0}".format(eta))
source.eta = eta
source.beam_area = beam_area
# I don't know how to calculate this error so we'll set it to nan
source.err_int_flux = np.nan
sources.append(source)
return sources | Convert fitting results into a set of components
Parameters
----------
result : lmfit.MinimizerResult
The fitting results.
model : lmfit.Parameters
The model that was fit.
island_data : :class:`AegeanTools.models.IslandFittingData`
Data about the island that was fit.
isflags : int
Flags that should be added to this island (in addition to those within the model)
Returns
-------
sources : list
A list of components, and islands if requested. | entailment |
def load_globals(self, filename, hdu_index=0, bkgin=None, rmsin=None, beam=None, verb=False, rms=None, bkg=None, cores=1,
do_curve=True, mask=None, lat=None, psf=None, blank=False, docov=True, cube_index=None):
"""
Populate the global_data object by loading or calculating the various components
Parameters
----------
filename : str or HDUList
Main image which source finding is run on
hdu_index : int
HDU index of the image within the fits file, default is 0 (first)
bkgin, rmsin : str or HDUList
background and noise image filename or HDUList
beam : :class:`AegeanTools.fits_image.Beam`
Beam object representing the synthsized beam. Will replace what is in the FITS header.
verb : bool
Verbose. Write extra lines to INFO level log.
rms, bkg : float
A float that represents a constant rms/bkg levels for the entire image.
Default = None, which causes the rms/bkg to be loaded or calculated.
cores : int
Number of cores to use if different from what is autodetected.
do_curve : bool
If True a curvature map will be created, default=True.
mask : str or :class:`AegeanTools.regions.Region`
filename or Region object
lat : float
Latitude of the observing telescope (declination of zenith)
psf : str or HDUList
Filename or HDUList of a psf image
blank : bool
True = blank output image where islands are found.
Default = False.
docov : bool
True = use covariance matrix in fitting.
Default = True.
cube_index : int
For an image cube, which slice to use.
"""
# don't reload already loaded data
if self.global_data.img is not None:
return
img = FitsImage(filename, hdu_index=hdu_index, beam=beam, cube_index=cube_index)
beam = img.beam
debug = logging.getLogger('Aegean').isEnabledFor(logging.DEBUG)
if mask is None:
self.global_data.region = None
else:
# allow users to supply and object instead of a filename
if isinstance(mask, Region):
self.global_data.region = mask
elif os.path.exists(mask):
self.log.info("Loading mask from {0}".format(mask))
self.global_data.region = Region.load(mask)
else:
self.log.error("File {0} not found for loading".format(mask))
self.global_data.region = None
self.global_data.wcshelper = WCSHelper.from_header(img.get_hdu_header(), beam, lat)
self.global_data.psfhelper = PSFHelper(psf, self.global_data.wcshelper)
self.global_data.beam = self.global_data.wcshelper.beam
self.global_data.img = img
self.global_data.data_pix = img.get_pixels()
self.global_data.dtype = type(self.global_data.data_pix[0][0])
self.global_data.bkgimg = np.zeros(self.global_data.data_pix.shape, dtype=self.global_data.dtype)
self.global_data.rmsimg = np.zeros(self.global_data.data_pix.shape, dtype=self.global_data.dtype)
self.global_data.pixarea = img.pixarea
self.global_data.dcurve = None
if do_curve:
self.log.info("Calculating curvature")
# calculate curvature but store it as -1,0,+1
dcurve = np.zeros(self.global_data.data_pix.shape, dtype=np.int8)
peaks = scipy.ndimage.filters.maximum_filter(self.global_data.data_pix, size=3)
troughs = scipy.ndimage.filters.minimum_filter(self.global_data.data_pix, size=3)
pmask = np.where(self.global_data.data_pix == peaks)
tmask = np.where(self.global_data.data_pix == troughs)
dcurve[pmask] = -1
dcurve[tmask] = 1
self.global_data.dcurve = dcurve
# if either of rms or bkg images are not supplied then calculate them both
if not (rmsin and bkgin):
if verb:
self.log.info("Calculating background and rms data")
self._make_bkg_rms(mesh_size=20, forced_rms=rms, forced_bkg=bkg, cores=cores)
# replace the calculated images with input versions, if the user has supplied them.
if bkgin:
if verb:
self.log.info("Loading background data from file {0}".format(bkgin))
self.global_data.bkgimg = self._load_aux_image(img, bkgin)
if rmsin:
if verb:
self.log.info("Loading rms data from file {0}".format(rmsin))
self.global_data.rmsimg = self._load_aux_image(img, rmsin)
# subtract the background image from the data image and save
if verb and debug:
self.log.debug("Data max is {0}".format(img.get_pixels()[np.isfinite(img.get_pixels())].max()))
self.log.debug("Doing background subtraction")
img.set_pixels(img.get_pixels() - self.global_data.bkgimg)
self.global_data.data_pix = img.get_pixels()
if verb and debug:
self.log.debug("Data max is {0}".format(img.get_pixels()[np.isfinite(img.get_pixels())].max()))
self.global_data.blank = blank
self.global_data.docov = docov
# Default to false until I can verify that this is working
self.global_data.dobias = False
# check if the WCS is galactic
if 'lon' in self.global_data.img._header['CTYPE1'].lower():
self.log.info("Galactic coordinates detected and noted")
SimpleSource.galactic = True
return | Populate the global_data object by loading or calculating the various components
Parameters
----------
filename : str or HDUList
Main image which source finding is run on
hdu_index : int
HDU index of the image within the fits file, default is 0 (first)
bkgin, rmsin : str or HDUList
background and noise image filename or HDUList
beam : :class:`AegeanTools.fits_image.Beam`
Beam object representing the synthsized beam. Will replace what is in the FITS header.
verb : bool
Verbose. Write extra lines to INFO level log.
rms, bkg : float
A float that represents a constant rms/bkg levels for the entire image.
Default = None, which causes the rms/bkg to be loaded or calculated.
cores : int
Number of cores to use if different from what is autodetected.
do_curve : bool
If True a curvature map will be created, default=True.
mask : str or :class:`AegeanTools.regions.Region`
filename or Region object
lat : float
Latitude of the observing telescope (declination of zenith)
psf : str or HDUList
Filename or HDUList of a psf image
blank : bool
True = blank output image where islands are found.
Default = False.
docov : bool
True = use covariance matrix in fitting.
Default = True.
cube_index : int
For an image cube, which slice to use. | entailment |
def save_background_files(self, image_filename, hdu_index=0, bkgin=None, rmsin=None, beam=None, rms=None, bkg=None, cores=1,
outbase=None):
"""
Generate and save the background and RMS maps as FITS files.
They are saved in the current directly as aegean-background.fits and aegean-rms.fits.
Parameters
----------
image_filename : str or HDUList
Input image.
hdu_index : int
If fits file has more than one hdu, it can be specified here.
Default = 0.
bkgin, rmsin : str or HDUList
Background and noise image filename or HDUList
beam : :class:`AegeanTools.fits_image.Beam`
Beam object representing the synthsized beam. Will replace what is in the FITS header.
rms, bkg : float
A float that represents a constant rms/bkg level for the entire image.
Default = None, which causes the rms/bkg to be loaded or calculated.
cores : int
Number of cores to use if different from what is autodetected.
outbase : str
Basename for output files.
"""
self.log.info("Saving background / RMS maps")
# load image, and load/create background/rms images
self.load_globals(image_filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, verb=True, rms=rms, bkg=bkg,
cores=cores, do_curve=True)
img = self.global_data.img
bkgimg, rmsimg = self.global_data.bkgimg, self.global_data.rmsimg
curve = np.array(self.global_data.dcurve, dtype=bkgimg.dtype)
# mask these arrays have the same mask the same as the data
mask = np.where(np.isnan(self.global_data.data_pix))
bkgimg[mask] = np.NaN
rmsimg[mask] = np.NaN
curve[mask] = np.NaN
# Generate the new FITS files by copying the existing HDU and assigning new data.
# This gives the new files the same WCS projection and other header fields.
new_hdu = img.hdu
# Set the ORIGIN to indicate Aegean made this file
new_hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__)
for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']:
if c in new_hdu.header:
del new_hdu.header[c]
if outbase is None:
outbase, _ = os.path.splitext(os.path.basename(image_filename))
noise_out = outbase + '_rms.fits'
background_out = outbase + '_bkg.fits'
curve_out = outbase + '_crv.fits'
snr_out = outbase + '_snr.fits'
new_hdu.data = bkgimg
new_hdu.writeto(background_out, overwrite=True)
self.log.info("Wrote {0}".format(background_out))
new_hdu.data = rmsimg
new_hdu.writeto(noise_out, overwrite=True)
self.log.info("Wrote {0}".format(noise_out))
new_hdu.data = curve
new_hdu.writeto(curve_out, overwrite=True)
self.log.info("Wrote {0}".format(curve_out))
new_hdu.data = self.global_data.data_pix / rmsimg
new_hdu.writeto(snr_out, overwrite=True)
self.log.info("Wrote {0}".format(snr_out))
return | Generate and save the background and RMS maps as FITS files.
They are saved in the current directly as aegean-background.fits and aegean-rms.fits.
Parameters
----------
image_filename : str or HDUList
Input image.
hdu_index : int
If fits file has more than one hdu, it can be specified here.
Default = 0.
bkgin, rmsin : str or HDUList
Background and noise image filename or HDUList
beam : :class:`AegeanTools.fits_image.Beam`
Beam object representing the synthsized beam. Will replace what is in the FITS header.
rms, bkg : float
A float that represents a constant rms/bkg level for the entire image.
Default = None, which causes the rms/bkg to be loaded or calculated.
cores : int
Number of cores to use if different from what is autodetected.
outbase : str
Basename for output files. | entailment |
def save_image(self, outname):
"""
Save the image data.
This is probably only useful if the image data has been blanked.
Parameters
----------
outname : str
Name for the output file.
"""
hdu = self.global_data.img.hdu
hdu.data = self.global_data.img._pixels
hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__)
# delete some axes that we aren't going to need
for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']:
if c in hdu.header:
del hdu.header[c]
hdu.writeto(outname, overwrite=True)
self.log.info("Wrote {0}".format(outname))
return | Save the image data.
This is probably only useful if the image data has been blanked.
Parameters
----------
outname : str
Name for the output file. | entailment |
def _make_bkg_rms(self, mesh_size=20, forced_rms=None, forced_bkg=None, cores=None):
"""
Calculate an rms image and a bkg image.
Parameters
----------
mesh_size : int
Number of beams per box default = 20
forced_rms : float
The rms of the image.
If None: calculate the rms level (default).
Otherwise assume a constant rms.
forced_bkg : float
The background level of the image.
If None: calculate the background level (default).
Otherwise assume a constant background.
cores: int
Number of cores to use if different from what is autodetected.
"""
if (forced_rms is not None):
self.log.info("Forcing rms = {0}".format(forced_rms))
self.global_data.rmsimg[:] = forced_rms
if (forced_bkg is not None):
self.log.info("Forcing bkg = {0}".format(forced_bkg))
self.global_data.bkgimg[:] = forced_bkg
# If we known both the rms and the bkg then there is nothing to compute
if (forced_rms is not None) and (forced_bkg is not None):
return
data = self.global_data.data_pix
beam = self.global_data.beam
img_x, img_y = data.shape
xcen = int(img_x / 2)
ycen = int(img_y / 2)
# calculate a local beam from the center of the data
pixbeam = self.global_data.psfhelper.get_pixbeam_pixel(xcen, ycen)
if pixbeam is None:
self.log.error("Cannot determine the beam shape at the image center")
sys.exit(1)
width_x = mesh_size * max(abs(math.cos(np.radians(pixbeam.pa)) * pixbeam.a),
abs(math.sin(np.radians(pixbeam.pa)) * pixbeam.b))
width_x = int(width_x)
width_y = mesh_size * max(abs(math.sin(np.radians(pixbeam.pa)) * pixbeam.a),
abs(math.cos(np.radians(pixbeam.pa)) * pixbeam.b))
width_y = int(width_y)
self.log.debug("image size x,y:{0},{1}".format(img_x, img_y))
self.log.debug("beam: {0}".format(beam))
self.log.debug("mesh width (pix) x,y: {0},{1}".format(width_x, width_y))
# box centered at image center then tilling outwards
xstart = int(xcen - width_x / 2) % width_x # the starting point of the first "full" box
ystart = int(ycen - width_y / 2) % width_y
xend = img_x - int(img_x - xstart) % width_x # the end point of the last "full" box
yend = img_y - int(img_y - ystart) % width_y
xmins = [0]
xmins.extend(list(range(xstart, xend, width_x)))
xmins.append(xend)
xmaxs = [xstart]
xmaxs.extend(list(range(xstart + width_x, xend + 1, width_x)))
xmaxs.append(img_x)
ymins = [0]
ymins.extend(list(range(ystart, yend, width_y)))
ymins.append(yend)
ymaxs = [ystart]
ymaxs.extend(list(range(ystart + width_y, yend + 1, width_y)))
ymaxs.append(img_y)
# if the image is smaller than our ideal mesh size, just use the whole image instead
if width_x >= img_x:
xmins = [0]
xmaxs = [img_x]
if width_y >= img_y:
ymins = [0]
ymaxs = [img_y]
if cores > 1:
# set up the queue
queue = pprocess.Queue(limit=cores, reuse=1)
estimate = queue.manage(pprocess.MakeReusable(self._estimate_bkg_rms))
# populate the queue
for xmin, xmax in zip(xmins, xmaxs):
for ymin, ymax in zip(ymins, ymaxs):
estimate(ymin, ymax, xmin, xmax)
else:
queue = []
for xmin, xmax in zip(xmins, xmaxs):
for ymin, ymax in zip(ymins, ymaxs):
queue.append(self._estimate_bkg_rms(xmin, xmax, ymin, ymax))
# only copy across the bkg/rms if they are not already set
# queue can only be traversed once so we have to put the if inside the loop
for ymin, ymax, xmin, xmax, bkg, rms in queue:
if (forced_rms is None):
self.global_data.rmsimg[ymin:ymax, xmin:xmax] = rms
if (forced_rms is None):
self.global_data.bkgimg[ymin:ymax, xmin:xmax] = bkg
return | Calculate an rms image and a bkg image.
Parameters
----------
mesh_size : int
Number of beams per box default = 20
forced_rms : float
The rms of the image.
If None: calculate the rms level (default).
Otherwise assume a constant rms.
forced_bkg : float
The background level of the image.
If None: calculate the background level (default).
Otherwise assume a constant background.
cores: int
Number of cores to use if different from what is autodetected. | entailment |
def _estimate_bkg_rms(self, xmin, xmax, ymin, ymax):
"""
Estimate the background noise mean and RMS.
The mean is estimated as the median of data.
The RMS is estimated as the IQR of data / 1.34896.
Parameters
----------
xmin, xmax, ymin, ymax : int
The bounding region over which the bkg/rms will be calculated.
Returns
-------
ymin, ymax, xmin, xmax : int
A copy of the input parameters
bkg, rms : float
The calculated background and noise.
"""
data = self.global_data.data_pix[ymin:ymax, xmin:xmax]
pixels = np.extract(np.isfinite(data), data).ravel()
if len(pixels) < 4:
bkg, rms = np.NaN, np.NaN
else:
pixels.sort()
p25 = pixels[int(pixels.size / 4)]
p50 = pixels[int(pixels.size / 2)]
p75 = pixels[int(pixels.size / 4 * 3)]
iqr = p75 - p25
bkg, rms = p50, iqr / 1.34896
# return the input and output data so we know what we are doing
# when compiling the results of multiple processes
return ymin, ymax, xmin, xmax, bkg, rms | Estimate the background noise mean and RMS.
The mean is estimated as the median of data.
The RMS is estimated as the IQR of data / 1.34896.
Parameters
----------
xmin, xmax, ymin, ymax : int
The bounding region over which the bkg/rms will be calculated.
Returns
-------
ymin, ymax, xmin, xmax : int
A copy of the input parameters
bkg, rms : float
The calculated background and noise. | entailment |
def _load_aux_image(self, image, auxfile):
"""
Load a fits file (bkg/rms/curve) and make sure that
it is the same shape as the main image.
Parameters
----------
image : :class:`AegeanTools.fits_image.FitsImage`
The main image that has already been loaded.
auxfile : str or HDUList
The auxiliary file to be loaded.
Returns
-------
aux : :class:`AegeanTools.fits_image.FitsImage`
The loaded image.
"""
auximg = FitsImage(auxfile, beam=self.global_data.beam).get_pixels()
if auximg.shape != image.get_pixels().shape:
self.log.error("file {0} is not the same size as the image map".format(auxfile))
self.log.error("{0}= {1}, image = {2}".format(auxfile, auximg.shape, image.get_pixels().shape))
sys.exit(1)
return auximg | Load a fits file (bkg/rms/curve) and make sure that
it is the same shape as the main image.
Parameters
----------
image : :class:`AegeanTools.fits_image.FitsImage`
The main image that has already been loaded.
auxfile : str or HDUList
The auxiliary file to be loaded.
Returns
-------
aux : :class:`AegeanTools.fits_image.FitsImage`
The loaded image. | entailment |
def _refit_islands(self, group, stage, outerclip=None, istart=0):
"""
Do island refitting (priorized fitting) on a group of islands.
Parameters
----------
group : list
A list of components grouped by island.
stage : int
Refitting stage.
outerclip : float
Ignored, placed holder for future development.
istart : int
The starting island number.
Returns
-------
sources : list
List of sources (and islands).
"""
global_data = self.global_data
sources = []
data = global_data.data_pix
rmsimg = global_data.rmsimg
for inum, isle in enumerate(group, start=istart):
self.log.debug("-=-")
self.log.debug("input island = {0}, {1} components".format(isle[0].island, len(isle)))
# set up the parameters for each of the sources within the island
i = 0
params = lmfit.Parameters()
shape = data.shape
xmin, ymin = shape
xmax = ymax = 0
# island_mask = []
src_valid_psf = None
# keep track of the sources that are actually being refit
# this may be a subset of all sources in the island
included_sources = []
for src in isle:
pixbeam = global_data.psfhelper.get_pixbeam(src.ra, src.dec)
# find the right pixels from the ra/dec
source_x, source_y = global_data.wcshelper.sky2pix([src.ra, src.dec])
source_x -= 1
source_y -= 1
x = int(round(source_x))
y = int(round(source_y))
self.log.debug("pixel location ({0:5.2f},{1:5.2f})".format(source_x, source_y))
# reject sources that are outside the image bounds, or which have nan data/rms values
if not 0 <= x < shape[0] or not 0 <= y < shape[1] or \
not np.isfinite(data[x, y]) or \
not np.isfinite(rmsimg[x, y]) or \
pixbeam is None:
self.log.debug("Source ({0},{1}) not within usable region: skipping".format(src.island, src.source))
continue
else:
# Keep track of the last source to have a valid psf so that we can use it later on
src_valid_psf = src
# determine the shape parameters in pixel values
_, _, sx, sy, theta = global_data.wcshelper.sky2pix_ellipse([src.ra, src.dec], src.a / 3600,
src.b / 3600, src.pa)
sx *= FWHM2CC
sy *= FWHM2CC
self.log.debug("Source shape [sky coords] {0:5.2f}x{1:5.2f}@{2:05.2f}".format(src.a, src.b, src.pa))
self.log.debug("Source shape [pixel coords] {0:4.2f}x{1:4.2f}@{2:05.2f}".format(sx, sy, theta))
# choose a region that is 2x the major axis of the source, 4x semimajor axis a
width = 4 * sx
ywidth = int(round(width)) + 1
xwidth = int(round(width)) + 1
# adjust the size of the island to include this source
xmin = min(xmin, max(0, x - xwidth / 2))
ymin = min(ymin, max(0, y - ywidth / 2))
xmax = max(xmax, min(shape[0], x + xwidth / 2 + 1))
ymax = max(ymax, min(shape[1], y + ywidth / 2 + 1))
s_lims = [0.8 * min(sx, pixbeam.b * FWHM2CC), max(sy, sx) * 1.25]
# Set up the parameters for the fit, including constraints
prefix = "c{0}_".format(i)
params.add(prefix + 'amp', value=src.peak_flux, vary=True)
# for now the xo/yo are locations within the main image, we correct this later
params.add(prefix + 'xo', value=source_x, min=source_x - sx / 2., max=source_x + sx / 2.,
vary=stage >= 2)
params.add(prefix + 'yo', value=source_y, min=source_y - sy / 2., max=source_y + sy / 2.,
vary=stage >= 2)
params.add(prefix + 'sx', value=sx, min=s_lims[0], max=s_lims[1], vary=stage >= 3)
params.add(prefix + 'sy', value=sy, min=s_lims[0], max=s_lims[1], vary=stage >= 3)
params.add(prefix + 'theta', value=theta, vary=stage >= 3)
params.add(prefix + 'flags', value=0, vary=False)
# this source is being refit so add it to the list
included_sources.append(src)
i += 1
# TODO: Allow this mask to be used in conjunction with the FWHM mask that is defined further on
# # Use pixels above outerclip sigmas..
# if outerclip>=0:
# mask = np.where(data[xmin:xmax,ymin:ymax]-outerclip*rmsimg[xmin:xmax,ymin:ymax]>0)
# else: # negative outer clip means use all the pixels
# mask = np.where(data[xmin:xmax,ymin:ymax])
#
# # convert the pixel indices to be pixels within the parent data set
# xmask = mask[0] + xmin
# ymask = mask[1] + ymin
# island_mask.extend(zip(xmask,ymask))
if i == 0:
self.log.debug("No sources found in island {0}".format(src.island))
continue
params.add('components', value=i, vary=False)
# params.components = i
self.log.debug(" {0} components being fit".format(i))
# now we correct the xo/yo positions to be relative to the sub-image
self.log.debug("xmxxymyx {0} {1} {2} {3}".format(xmin, xmax, ymin, ymax))
for i in range(params['components'].value):
prefix = "c{0}_".format(i)
params[prefix + 'xo'].value -= xmin
params[prefix + 'xo'].min -= xmin
params[prefix + 'xo'].max -= xmin
params[prefix + 'yo'].value -= ymin
params[prefix + 'yo'].min -= ymin
params[prefix + 'yo'].max -= ymin
# self.log.debug(params)
# don't fit if there are no sources
if params['components'].value < 1:
self.log.info("Island {0} has no components".format(src.island))
continue
# this .copy() will stop us from modifying the parent region when we later apply our mask.
idata = data[int(xmin):int(xmax), int(ymin):int(ymax)].copy()
# now convert these back to indices within the idata region
# island_mask = np.array([(x-xmin, y-ymin) for x, y in island_mask])
allx, ally = np.indices(idata.shape)
# mask to include pixels that are withn the FWHM of the sources being fit
mask_params = copy.deepcopy(params)
for i in range(mask_params['components'].value):
prefix = 'c{0}_'.format(i)
mask_params[prefix + 'amp'].value = 1
mask_model = ntwodgaussian_lmfit(mask_params)
mask = np.where(mask_model(allx.ravel(), ally.ravel()) <= 0.1)
mask = allx.ravel()[mask], ally.ravel()[mask]
del mask_params
idata[mask] = np.nan
mx, my = np.where(np.isfinite(idata))
non_nan_pix = len(mx)
total_pix = len(allx.ravel())
self.log.debug("island extracted:")
self.log.debug(" x[{0}:{1}] y[{2}:{3}]".format(xmin, xmax, ymin, ymax))
self.log.debug(" max = {0}".format(np.nanmax(idata)))
self.log.debug(
" total {0}, masked {1}, not masked {2}".format(total_pix, total_pix - non_nan_pix, non_nan_pix))
# Check to see that each component has some data within the central 3x3 pixels of it's location
# If not then we don't fit that component
for i in range(params['components'].value):
prefix = "c{0}_".format(i)
# figure out a box around the center of this
cx, cy = params[prefix + 'xo'].value, params[prefix + 'yo'].value # central pixel coords
self.log.debug(" comp {0}".format(i))
self.log.debug(" x0, y0 {0} {1}".format(cx, cy))
xmx = int(round(np.clip(cx + 2, 0, idata.shape[0])))
xmn = int(round(np.clip(cx - 1, 0, idata.shape[0])))
ymx = int(round(np.clip(cy + 2, 0, idata.shape[1])))
ymn = int(round(np.clip(cy - 1, 0, idata.shape[1])))
square = idata[xmn:xmx, ymn:ymx]
# if there are no not-nan pixels in this region then don't vary any parameters
if not np.any(np.isfinite(square)):
self.log.debug(" not fitting component {0}".format(i))
params[prefix + 'amp'].value = np.nan
for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']:
params[prefix + p].vary = False
params[prefix + p].stderr = np.nan # this results in an error of -1 later on
params[prefix + 'flags'].value |= flags.NOTFIT
# determine the number of free parameters and if we have enough data for a fit
nfree = np.count_nonzero([params[p].vary for p in params.keys()])
self.log.debug(params)
if nfree < 1:
self.log.debug(" Island has no components to fit")
result = DummyLM()
model = params
else:
if non_nan_pix < nfree:
self.log.debug("More free parameters {0} than available pixels {1}".format(nfree, non_nan_pix))
if non_nan_pix >= params['components'].value:
self.log.debug("Fixing all parameters except amplitudes")
for p in params.keys():
if 'amp' not in p:
params[p].vary = False
else:
self.log.debug(" no not-masked pixels, skipping")
continue
# do the fit
# if the pixel beam is not valid, then recalculate using the location of the last source to have a valid psf
if pixbeam is None:
if src_valid_psf is not None:
pixbeam = global_data.psfhelper.get_pixbeam(src_valid_psf.ra, src_valid_psf.dec)
else:
self.log.critical("Cannot determine pixel beam")
fac = 1 / np.sqrt(2)
if self.global_data.docov:
C = Cmatrix(mx, my, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa)
B = Bmatrix(C)
else:
C = B = None
errs = np.nanmax(rmsimg[int(xmin):int(xmax), int(ymin):int(ymax)])
result, _ = do_lmfit(idata, params, B=B)
model = covar_errors(result.params, idata, errs=errs, B=B, C=C)
# convert the results to a source object
offsets = (xmin, xmax, ymin, ymax)
# TODO allow for island fluxes in the refitting.
island_data = IslandFittingData(inum, i=idata, offsets=offsets, doislandflux=False, scalars=(4, 4, None))
new_src = self.result_to_components(result, model, island_data, src.flags)
for ns, s in zip(new_src, included_sources):
# preserve the uuid so we can do exact matching between catalogs
ns.uuid = s.uuid
# flag the sources as having been priorized
ns.flags |= flags.PRIORIZED
# if the position wasn't fit then copy the errors from the input catalog
if stage < 2:
ns.err_ra = s.err_ra
ns.err_dec = s.err_dec
ns.flags |= flags.FIXED2PSF
# if the shape wasn't fit then copy the errors from the input catalog
if stage < 3:
ns.err_a = s.err_a
ns.err_b = s.err_b
ns.err_pa = s.err_pa
sources.extend(new_src)
return sources | Do island refitting (priorized fitting) on a group of islands.
Parameters
----------
group : list
A list of components grouped by island.
stage : int
Refitting stage.
outerclip : float
Ignored, placed holder for future development.
istart : int
The starting island number.
Returns
-------
sources : list
List of sources (and islands). | entailment |
def _fit_island(self, island_data):
"""
Take an Island, do all the parameter estimation and fitting.
Parameters
----------
island_data : :class:`AegeanTools.models.IslandFittingData`
The island to be fit.
Returns
-------
sources : list
The sources that were fit.
"""
global_data = self.global_data
# global data
dcurve = global_data.dcurve
rmsimg = global_data.rmsimg
# island data
isle_num = island_data.isle_num
idata = island_data.i
innerclip, outerclip, max_summits = island_data.scalars
xmin, xmax, ymin, ymax = island_data.offsets
# get the beam parameters at the center of this island
midra, middec = global_data.wcshelper.pix2sky([0.5 * (xmax + xmin), 0.5 * (ymax + ymin)])
beam = global_data.psfhelper.get_psf_pix(midra, middec)
del middec, midra
icurve = dcurve[xmin:xmax, ymin:ymax]
rms = rmsimg[xmin:xmax, ymin:ymax]
is_flag = 0
pixbeam = global_data.psfhelper.get_pixbeam_pixel((xmin + xmax) / 2., (ymin + ymax) / 2.)
if pixbeam is None:
# This island is not 'on' the sky, ignore it
return []
self.log.debug("=====")
self.log.debug("Island ({0})".format(isle_num))
params = self.estimate_lmfit_parinfo(idata, rms, icurve, beam, innerclip, outerclip, offsets=[xmin, ymin],
max_summits=max_summits)
# islands at the edge of a region of nans
# result in no components
if params is None or params['components'].value < 1:
return []
self.log.debug("Rms is {0}".format(np.shape(rms)))
self.log.debug("Isle is {0}".format(np.shape(idata)))
self.log.debug(" of which {0} are masked".format(sum(np.isnan(idata).ravel() * 1)))
# Check that there is enough data to do the fit
mx, my = np.where(np.isfinite(idata))
non_blank_pix = len(mx)
free_vars = len([1 for a in params.keys() if params[a].vary])
if non_blank_pix < free_vars or free_vars == 0:
self.log.debug("Island {0} doesn't have enough pixels to fit the given model".format(isle_num))
self.log.debug("non_blank_pix {0}, free_vars {1}".format(non_blank_pix, free_vars))
result = DummyLM()
model = params
is_flag |= flags.NOTFIT
else:
# Model is the fitted parameters
fac = 1 / np.sqrt(2)
if self.global_data.docov:
C = Cmatrix(mx, my, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa)
B = Bmatrix(C)
else:
C = B = None
self.log.debug(
"C({0},{1},{2},{3},{4})".format(len(mx), len(my), pixbeam.a * FWHM2CC, pixbeam.b * FWHM2CC, pixbeam.pa))
errs = np.nanmax(rms)
self.log.debug("Initial params")
self.log.debug(params)
result, _ = do_lmfit(idata, params, B=B)
if not result.errorbars:
is_flag |= flags.FITERR
# get the real (sky) parameter errors
model = covar_errors(result.params, idata, errs=errs, B=B, C=C)
if self.global_data.dobias and self.global_data.docov:
x, y = np.indices(idata.shape)
acf = elliptical_gaussian(x, y, 1, 0, 0, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac,
pixbeam.pa)
bias_correct(model, idata, acf=acf * errs ** 2)
if not result.success:
is_flag |= flags.FITERR
self.log.debug("Final params")
self.log.debug(model)
# convert the fitting results to a list of sources [and islands]
sources = self.result_to_components(result, model, island_data, is_flag)
return sources | Take an Island, do all the parameter estimation and fitting.
Parameters
----------
island_data : :class:`AegeanTools.models.IslandFittingData`
The island to be fit.
Returns
-------
sources : list
The sources that were fit. | entailment |
def _fit_islands(self, islands):
"""
Execute fitting on a list of islands
This function just wraps around fit_island, so that when we do multiprocesing
a single process will fit multiple islands before returning results.
Parameters
----------
islands : list of :class:`AegeanTools.models.IslandFittingData`
The islands to be fit.
Returns
-------
sources : list
The sources that were fit.
"""
self.log.debug("Fitting group of {0} islands".format(len(islands)))
sources = []
for island in islands:
res = self._fit_island(island)
sources.extend(res)
return sources | Execute fitting on a list of islands
This function just wraps around fit_island, so that when we do multiprocesing
a single process will fit multiple islands before returning results.
Parameters
----------
islands : list of :class:`AegeanTools.models.IslandFittingData`
The islands to be fit.
Returns
-------
sources : list
The sources that were fit. | entailment |
def find_sources_in_image(self, filename, hdu_index=0, outfile=None, rms=None, bkg=None, max_summits=None, innerclip=5,
outerclip=4, cores=None, rmsin=None, bkgin=None, beam=None, doislandflux=False,
nopositive=False, nonegative=False, mask=None, lat=None, imgpsf=None, blank=False,
docov=True, cube_index=None):
"""
Run the Aegean source finder.
Parameters
----------
filename : str or HDUList
Image filename or HDUList.
hdu_index : int
The index of the FITS HDU (extension).
outfile : str
file for printing catalog (NOT a table, just a text file of my own design)
rms : float
Use this rms for the entire image (will also assume that background is 0)
max_summits : int
Fit up to this many components to each island (extras are included but not fit)
innerclip, outerclip : float
The seed (inner) and flood (outer) clipping level (sigmas).
cores : int
Number of CPU cores to use. None means all cores.
rmsin, bkgin : str or HDUList
Filename or HDUList for the noise and background images.
If either are None, then it will be calculated internally.
beam : (major, minor, pa)
Floats representing the synthesised beam (degrees).
Replaces whatever is given in the FITS header.
If the FITS header has no BMAJ/BMIN then this is required.
doislandflux : bool
If True then each island will also be characterized.
nopositive, nonegative : bool
Whether to return positive or negative sources.
Default nopositive=False, nonegative=True.
mask : str
The filename of a region file created by MIMAS.
Islands outside of this region will be ignored.
lat : float
The latitude of the telescope (declination of zenith).
imgpsf : str or HDUList
Filename or HDUList for a psf image.
blank : bool
Cause the output image to be blanked where islands are found.
docov : bool
If True then include covariance matrix in the fitting process. (default=True)
cube_index : int
For image cubes, cube_index determines which slice is used.
Returns
-------
sources : list
List of sources found.
"""
# Tell numpy to be quiet
np.seterr(invalid='ignore')
if cores is not None:
if not (cores >= 1): raise AssertionError("cores must be one or more")
self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, rms=rms, bkg=bkg, cores=cores,
verb=True, mask=mask, lat=lat, psf=imgpsf, blank=blank, docov=docov, cube_index=cube_index)
global_data = self.global_data
rmsimg = global_data.rmsimg
data = global_data.data_pix
self.log.info("beam = {0:5.2f}'' x {1:5.2f}'' at {2:5.2f}deg".format(
global_data.beam.a * 3600, global_data.beam.b * 3600, global_data.beam.pa))
# stop people from doing silly things.
if outerclip > innerclip:
outerclip = innerclip
self.log.info("seedclip={0}".format(innerclip))
self.log.info("floodclip={0}".format(outerclip))
isle_num = 0
if cores == 1: # single-threaded, no parallel processing
queue = []
else:
queue = pprocess.Queue(limit=cores, reuse=1)
fit_parallel = queue.manage(pprocess.MakeReusable(self._fit_islands))
island_group = []
group_size = 20
for i, xmin, xmax, ymin, ymax in self._gen_flood_wrap(data, rmsimg, innerclip, outerclip, domask=True):
# ignore empty islands
# This should now be impossible to trigger
if np.size(i) < 1:
self.log.warn("Empty island detected, this should be imposisble.")
continue
isle_num += 1
scalars = (innerclip, outerclip, max_summits)
offsets = (xmin, xmax, ymin, ymax)
island_data = IslandFittingData(isle_num, i, scalars, offsets, doislandflux)
# If cores==1 run fitting in main process. Otherwise build up groups of islands
# and submit to queue for subprocesses. Passing a group of islands is more
# efficient than passing single islands to the subprocesses.
if cores == 1:
res = self._fit_island(island_data)
queue.append(res)
else:
island_group.append(island_data)
# If the island group is full queue it for the subprocesses to fit
if len(island_group) >= group_size:
fit_parallel(island_group)
island_group = []
# The last partially-filled island group also needs to be queued for fitting
if len(island_group) > 0:
fit_parallel(island_group)
# Write the output to the output file
if outfile:
print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile)
print(OutputSource.header, file=outfile)
sources = []
for srcs in queue:
if srcs: # ignore empty lists
for src in srcs:
# ignore sources that we have been told to ignore
if (src.peak_flux > 0 and nopositive) or (src.peak_flux < 0 and nonegative):
continue
sources.append(src)
if outfile:
print(str(src), file=outfile)
self.sources.extend(sources)
return sources | Run the Aegean source finder.
Parameters
----------
filename : str or HDUList
Image filename or HDUList.
hdu_index : int
The index of the FITS HDU (extension).
outfile : str
file for printing catalog (NOT a table, just a text file of my own design)
rms : float
Use this rms for the entire image (will also assume that background is 0)
max_summits : int
Fit up to this many components to each island (extras are included but not fit)
innerclip, outerclip : float
The seed (inner) and flood (outer) clipping level (sigmas).
cores : int
Number of CPU cores to use. None means all cores.
rmsin, bkgin : str or HDUList
Filename or HDUList for the noise and background images.
If either are None, then it will be calculated internally.
beam : (major, minor, pa)
Floats representing the synthesised beam (degrees).
Replaces whatever is given in the FITS header.
If the FITS header has no BMAJ/BMIN then this is required.
doislandflux : bool
If True then each island will also be characterized.
nopositive, nonegative : bool
Whether to return positive or negative sources.
Default nopositive=False, nonegative=True.
mask : str
The filename of a region file created by MIMAS.
Islands outside of this region will be ignored.
lat : float
The latitude of the telescope (declination of zenith).
imgpsf : str or HDUList
Filename or HDUList for a psf image.
blank : bool
Cause the output image to be blanked where islands are found.
docov : bool
If True then include covariance matrix in the fitting process. (default=True)
cube_index : int
For image cubes, cube_index determines which slice is used.
Returns
-------
sources : list
List of sources found. | entailment |
def priorized_fit_islands(self, filename, catalogue, hdu_index=0, outfile=None, bkgin=None, rmsin=None, cores=1,
rms=None, bkg=None, beam=None, lat=None, imgpsf=None, catpsf=None, stage=3, ratio=None, outerclip=3,
doregroup=True, docov=True, cube_index=None):
"""
Take an input catalog, and image, and optional background/noise images
fit the flux and ra/dec for each of the given sources, keeping the morphology fixed
if doregroup is true the groups will be recreated based on a matching radius/probability.
if doregroup is false then the islands of the input catalog will be preserved.
Multiple cores can be specified, and will be used.
Parameters
----------
filename : str or HDUList
Image filename or HDUList.
catalogue : str or list
Input catalogue file name or list of OutputSource objects.
hdu_index : int
The index of the FITS HDU (extension).
outfile : str
file for printing catalog (NOT a table, just a text file of my own design)
rmsin, bkgin : str or HDUList
Filename or HDUList for the noise and background images.
If either are None, then it will be calculated internally.
cores : int
Number of CPU cores to use. None means all cores.
rms : float
Use this rms for the entire image (will also assume that background is 0)
beam : (major, minor, pa)
Floats representing the synthesised beam (degrees).
Replaces whatever is given in the FITS header.
If the FITS header has no BMAJ/BMIN then this is required.
lat : float
The latitude of the telescope (declination of zenith).
imgpsf : str or HDUList
Filename or HDUList for a psf image.
catpsf : str or HDUList
Filename or HDUList for the catalogue psf image.
stage : int
Refitting stage
ratio : float
If not None - ratio of image psf to catalog psf, otherwise interpret from catalogue or image if possible
innerclip, outerclip : float
The seed (inner) and flood (outer) clipping level (sigmas).
docov : bool
If True then include covariance matrix in the fitting process. (default=True)
cube_index : int
For image cubes, slice determines which slice is used.
Returns
-------
sources : list
List of sources measured.
"""
from AegeanTools.cluster import regroup
self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, rms=rms, bkg=bkg, cores=cores, verb=True,
do_curve=False, beam=beam, lat=lat, psf=imgpsf, docov=docov, cube_index=cube_index)
global_data = self.global_data
far = 10 * global_data.beam.a # degrees
# load the table and convert to an input source list
if isinstance(catalogue, six.string_types):
input_table = load_table(catalogue)
input_sources = np.array(table_to_source_list(input_table))
else:
input_sources = np.array(catalogue)
if len(input_sources) < 1:
self.log.debug("No input sources for priorized fitting")
return []
# reject sources with missing params
ok = True
for param in ['ra', 'dec', 'peak_flux', 'a', 'b', 'pa']:
if np.isnan(getattr(input_sources[0], param)):
self.log.info("Source 0, is missing param '{0}'".format(param))
ok = False
if not ok:
self.log.error("Missing parameters! Not fitting.")
self.log.error("Maybe your table is missing or mis-labeled columns?")
return []
del ok
src_mask = np.ones(len(input_sources), dtype=bool)
# check to see if the input catalog contains psf information
has_psf = getattr(input_sources[0], 'psf_a', None) is not None
# the input sources are the initial conditions for our fits.
# Expand each source size if needed.
# If ratio is provided we just the psf by this amount
if ratio is not None:
self.log.info("Using ratio of {0} to scale input source shapes".format(ratio))
far *= ratio
for i, src in enumerate(input_sources):
# Sources with an unknown psf are rejected as they are either outside the image
# or outside the region covered by the psf
skybeam = global_data.psfhelper.get_beam(src.ra, src.dec)
if skybeam is None:
src_mask[i] = False
self.log.info("Excluding source ({0.island},{0.source}) due to lack of psf knowledge".format(src))
continue
# the new source size is the previous size, convolved with the expanded psf
src.a = np.sqrt(src.a ** 2 + (skybeam.a * 3600) ** 2 * (1 - 1 / ratio ** 2))
src.b = np.sqrt(src.b ** 2 + (skybeam.b * 3600) ** 2 * (1 - 1 / ratio ** 2))
# source with funky a/b are also rejected
if not np.all(np.isfinite((src.a, src.b))):
self.log.info("Excluding source ({0.island},{0.source}) due to funky psf ({0.a},{0.b},{0.pa})".format(src))
src_mask[i] = False
# if we know the psf from the input catalogue (has_psf), or if it was provided via a psf map
# then we use that psf.
elif catpsf is not None or has_psf:
if catpsf is not None:
self.log.info("Using catalog PSF from {0}".format(catpsf))
psf_helper = PSFHelper(catpsf, None) # might need to set the WCSHelper to be not None
else:
self.log.info("Using catalog PSF from input catalog")
psf_helper = None
for i, src in enumerate(input_sources):
if (src.psf_a <=0) or (src.psf_b <=0):
src_mask[i] = False
self.log.info("Excluding source ({0.island},{0.source}) due to psf_a/b <=0".format(src))
continue
if has_psf:
catbeam = Beam(src.psf_a / 3600, src.psf_b / 3600, src.psf_pa)
else:
catbeam = psf_helper.get_beam(src.ra, src.dec)
imbeam = global_data.psfhelper.get_beam(src.ra, src.dec)
# If either of the above are None then we skip this source.
if catbeam is None or imbeam is None:
src_mask[i] = False
self.log.info("Excluding source ({0.island},{0.source}) due to lack of psf knowledge".format(src))
continue
# TODO: The following assumes that the various psf's are scaled versions of each other
# and makes no account for differing position angles. This needs to be checked and/or addressed.
# deconvolve the source shape from the catalogue psf
src.a = (src.a / 3600) ** 2 - catbeam.a ** 2 + imbeam.a ** 2 # degrees
# clip the minimum source shape to be the image psf
if src.a < 0:
src.a = imbeam.a * 3600 # arcsec
else:
src.a = np.sqrt(src.a) * 3600 # arcsec
src.b = (src.b / 3600) ** 2 - catbeam.b ** 2 + imbeam.b ** 2
if src.b < 0:
src.b = imbeam.b * 3600 # arcsec
else:
src.b = np.sqrt(src.b) * 3600 # arcsec
else:
self.log.info("Not scaling input source sizes")
self.log.info("{0} sources in catalog".format(len(input_sources)))
self.log.info("{0} sources accepted".format(sum(src_mask)))
if len(src_mask) < 1:
self.log.debug("No sources accepted for priorized fitting")
return []
input_sources = input_sources[src_mask]
# redo the grouping if required
if doregroup:
groups = regroup(input_sources, eps=np.sqrt(2), far=far)
else:
groups = list(island_itergen(input_sources))
if cores == 1: # single-threaded, no parallel processing
queue = []
else:
queue = pprocess.Queue(limit=cores, reuse=1)
fit_parallel = queue.manage(pprocess.MakeReusable(self._refit_islands))
sources = []
island_group = []
group_size = 20
for i, island in enumerate(groups):
island_group.append(island)
# If the island group is full queue it for the subprocesses to fit
if len(island_group) >= group_size:
if cores > 1:
fit_parallel(island_group, stage, outerclip, istart=i)
else:
res = self._refit_islands(island_group, stage, outerclip, istart=i)
queue.append(res)
island_group = []
# The last partially-filled island group also needs to be queued for fitting
if len(island_group) > 0:
if cores > 1:
fit_parallel(island_group, stage, outerclip, istart=i)
else:
res = self._refit_islands(island_group, stage, outerclip, istart=i)
queue.append(res)
# now unpack the fitting results in to a list of sources
for s in queue:
sources.extend(s)
sources = sorted(sources)
# Write the output to the output file
if outfile:
print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile)
print(OutputSource.header, file=outfile)
components = 0
for source in sources:
if isinstance(source, OutputSource):
components += 1
if outfile:
print(str(source), file=outfile)
self.log.info("fit {0} components".format(components))
self.sources.extend(sources)
return sources | Take an input catalog, and image, and optional background/noise images
fit the flux and ra/dec for each of the given sources, keeping the morphology fixed
if doregroup is true the groups will be recreated based on a matching radius/probability.
if doregroup is false then the islands of the input catalog will be preserved.
Multiple cores can be specified, and will be used.
Parameters
----------
filename : str or HDUList
Image filename or HDUList.
catalogue : str or list
Input catalogue file name or list of OutputSource objects.
hdu_index : int
The index of the FITS HDU (extension).
outfile : str
file for printing catalog (NOT a table, just a text file of my own design)
rmsin, bkgin : str or HDUList
Filename or HDUList for the noise and background images.
If either are None, then it will be calculated internally.
cores : int
Number of CPU cores to use. None means all cores.
rms : float
Use this rms for the entire image (will also assume that background is 0)
beam : (major, minor, pa)
Floats representing the synthesised beam (degrees).
Replaces whatever is given in the FITS header.
If the FITS header has no BMAJ/BMIN then this is required.
lat : float
The latitude of the telescope (declination of zenith).
imgpsf : str or HDUList
Filename or HDUList for a psf image.
catpsf : str or HDUList
Filename or HDUList for the catalogue psf image.
stage : int
Refitting stage
ratio : float
If not None - ratio of image psf to catalog psf, otherwise interpret from catalogue or image if possible
innerclip, outerclip : float
The seed (inner) and flood (outer) clipping level (sigmas).
docov : bool
If True then include covariance matrix in the fitting process. (default=True)
cube_index : int
For image cubes, slice determines which slice is used.
Returns
-------
sources : list
List of sources measured. | entailment |
def check_table_formats(files):
"""
Determine whether a list of files are of a recognizable output type.
Parameters
----------
files : str
A list of file names
Returns
-------
result : bool
True if *all* the file names are supported
"""
cont = True
formats = get_table_formats()
for t in files.split(','):
_, ext = os.path.splitext(t)
ext = ext[1:].lower()
if ext not in formats:
cont = False
log.warn("Format not supported for {0} ({1})".format(t, ext))
if not cont:
log.error("Invalid table format specified.")
return cont | Determine whether a list of files are of a recognizable output type.
Parameters
----------
files : str
A list of file names
Returns
-------
result : bool
True if *all* the file names are supported | entailment |
def show_formats():
"""
Print a list of all the file formats that are supported for writing.
The file formats are determined by their extensions.
Returns
-------
None
"""
fmts = {
"ann": "Kvis annotation",
"reg": "DS9 regions file",
"fits": "FITS Binary Table",
"csv": "Comma separated values",
"tab": "tabe separated values",
"tex": "LaTeX table format",
"html": "HTML table",
"vot": "VO-Table",
"xml": "VO-Table",
"db": "Sqlite3 database",
"sqlite": "Sqlite3 database"}
supported = get_table_formats()
print("Extension | Description | Supported?")
for k in sorted(fmts.keys()):
print("{0:10s} {1:24s} {2}".format(k, fmts[k], k in supported))
return | Print a list of all the file formats that are supported for writing.
The file formats are determined by their extensions.
Returns
-------
None | entailment |
def update_meta_data(meta=None):
"""
Modify the metadata dictionary.
DATE, PROGRAM, and PROGVER are added/modified.
Parameters
----------
meta : dict
The dictionary to be modified, default = None (empty)
Returns
-------
An updated dictionary.
"""
if meta is None:
meta = {}
if 'DATE' not in meta:
meta['DATE'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if 'PROGRAM' not in meta:
meta['PROGRAM'] = "AegeanTools.catalogs"
meta['PROGVER'] = "{0}-({1})".format(__version__, __date__)
return meta | Modify the metadata dictionary.
DATE, PROGRAM, and PROGVER are added/modified.
Parameters
----------
meta : dict
The dictionary to be modified, default = None (empty)
Returns
-------
An updated dictionary. | entailment |
def save_catalog(filename, catalog, meta=None, prefix=None):
"""
Save a catalogue of sources using filename as a model. Meta data can be written to some file types
(fits, votable).
Each type of source will be in a separate file:
- base_comp.ext :class:`AegeanTools.models.OutputSource`
- base_isle.ext :class:`AegeanTools.models.IslandSource`
- base_simp.ext :class:`AegeanTools.models.SimpleSource`
Where filename = `base.ext`
Parameters
----------
filename : str
Name of file to write, format is determined by extension.
catalog : list
A list of sources to write. Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
prefix : str
Prepend each column name with "prefix_". Default is to prepend nothing.
meta : dict
Meta data to be written to the output file. Support for metadata depends on file type.
Returns
-------
None
"""
ascii_table_formats = {'csv': 'csv', 'tab': 'tab', 'tex': 'latex', 'html': 'html'}
# .ann and .reg are handled by me
meta = update_meta_data(meta)
extension = os.path.splitext(filename)[1][1:].lower()
if extension in ['ann', 'reg']:
writeAnn(filename, catalog, extension)
elif extension in ['db', 'sqlite']:
writeDB(filename, catalog, meta)
elif extension in ['hdf5', 'fits', 'vo', 'vot', 'xml']:
write_catalog(filename, catalog, extension, meta, prefix=prefix)
elif extension in ascii_table_formats.keys():
write_catalog(filename, catalog, fmt=ascii_table_formats[extension], meta=meta, prefix=prefix)
else:
log.warning("extension not recognised {0}".format(extension))
log.warning("You get tab format")
write_catalog(filename, catalog, fmt='tab', prefix=prefix)
return | Save a catalogue of sources using filename as a model. Meta data can be written to some file types
(fits, votable).
Each type of source will be in a separate file:
- base_comp.ext :class:`AegeanTools.models.OutputSource`
- base_isle.ext :class:`AegeanTools.models.IslandSource`
- base_simp.ext :class:`AegeanTools.models.SimpleSource`
Where filename = `base.ext`
Parameters
----------
filename : str
Name of file to write, format is determined by extension.
catalog : list
A list of sources to write. Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
prefix : str
Prepend each column name with "prefix_". Default is to prepend nothing.
meta : dict
Meta data to be written to the output file. Support for metadata depends on file type.
Returns
-------
None | entailment |
def load_catalog(filename):
"""
Load a catalogue and extract the source positions (only)
Parameters
----------
filename : str
Filename to read. Supported types are csv, tab, tex, vo, vot, and xml.
Returns
-------
catalogue : list
A list of [ (ra, dec), ...]
"""
supported = get_table_formats()
fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.'
if fmt in ['csv', 'tab', 'tex'] and fmt in supported:
log.info("Reading file {0}".format(filename))
t = ascii.read(filename)
catalog = list(zip(t.columns['ra'], t.columns['dec']))
elif fmt in ['vo', 'vot', 'xml'] and fmt in supported:
log.info("Reading file {0}".format(filename))
t = parse_single_table(filename)
catalog = list(zip(t.array['ra'].tolist(), t.array['dec'].tolist()))
else:
log.info("Assuming ascii format, reading first two columns")
lines = [a.strip().split() for a in open(filename, 'r').readlines() if not a.startswith('#')]
try:
catalog = [(float(a[0]), float(a[1])) for a in lines]
except:
log.error("Expecting two columns of floats but failed to parse")
log.error("Catalog file {0} not loaded".format(filename))
raise Exception("Could not determine file format")
return catalog | Load a catalogue and extract the source positions (only)
Parameters
----------
filename : str
Filename to read. Supported types are csv, tab, tex, vo, vot, and xml.
Returns
-------
catalogue : list
A list of [ (ra, dec), ...] | entailment |
def load_table(filename):
"""
Load a table from a given file.
Supports csv, tab, tex, vo, vot, xml, fits, and hdf5.
Parameters
----------
filename : str
File to read
Returns
-------
table : Table
Table of data.
"""
supported = get_table_formats()
fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.'
if fmt in ['csv', 'tab', 'tex'] and fmt in supported:
log.info("Reading file {0}".format(filename))
t = ascii.read(filename)
elif fmt in ['vo', 'vot', 'xml', 'fits', 'hdf5'] and fmt in supported:
log.info("Reading file {0}".format(filename))
t = Table.read(filename)
else:
log.error("Table format not recognized or supported")
log.error("{0} [{1}]".format(filename, fmt))
raise Exception("Table format not recognized or supported")
return t | Load a table from a given file.
Supports csv, tab, tex, vo, vot, xml, fits, and hdf5.
Parameters
----------
filename : str
File to read
Returns
-------
table : Table
Table of data. | entailment |
def write_table(table, filename):
"""
Write a table to a file.
Parameters
----------
table : Table
Table to be written
filename : str
Destination for saving table.
Returns
-------
None
"""
try:
if os.path.exists(filename):
os.remove(filename)
table.write(filename)
log.info("Wrote {0}".format(filename))
except Exception as e:
if "Format could not be identified" not in e.message:
raise e
else:
fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.'
raise Exception("Cannot auto-determine format for {0}".format(fmt))
return | Write a table to a file.
Parameters
----------
table : Table
Table to be written
filename : str
Destination for saving table.
Returns
-------
None | entailment |
def table_to_source_list(table, src_type=OutputSource):
"""
Convert a table of data into a list of sources.
A single table must have consistent source types given by src_type. src_type should be one of
:class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`,
or :class:`AegeanTools.models.IslandSource`.
Parameters
----------
table : Table
Table of sources
src_type : class
Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
Returns
-------
sources : list
A list of objects of the given type.
"""
source_list = []
if table is None:
return source_list
for row in table:
# Initialise our object
src = src_type()
# look for the columns required by our source object
for param in src_type.names:
if param in table.colnames:
# copy the value to our object
val = row[param]
# hack around float32's broken-ness
if isinstance(val, np.float32):
val = np.float64(val)
setattr(src, param, val)
# save this object to our list of sources
source_list.append(src)
return source_list | Convert a table of data into a list of sources.
A single table must have consistent source types given by src_type. src_type should be one of
:class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`,
or :class:`AegeanTools.models.IslandSource`.
Parameters
----------
table : Table
Table of sources
src_type : class
Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
Returns
-------
sources : list
A list of objects of the given type. | entailment |
def write_catalog(filename, catalog, fmt=None, meta=None, prefix=None):
"""
Write a catalog (list of sources) to a file with format determined by extension.
Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
Parameters
----------
filename : str
Base name for file to write. `_simp`, `_comp`, or `_isle` will be added to differentiate
the different types of sources that are being written.
catalog : list
A list of source objects. Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
fmt : str
The file format extension.
prefix : str
Prepend each column name with "prefix_". Default is to prepend nothing.
meta : dict
A dictionary to be used as metadata for some file types (fits, VOTable).
Returns
-------
None
"""
if meta is None:
meta = {}
if prefix is None:
pre=''
else:
pre = prefix + '_'
def writer(filename, catalog, fmt=None):
"""
construct a dict of the data
this method preserves the data types in the VOTable
"""
tab_dict = {}
name_list = []
for name in catalog[0].names:
col_name = name
if catalog[0].galactic:
if name.startswith('ra'):
col_name = 'lon'+name[2:]
elif name.endswith('ra'):
col_name = name[:-2] + 'lon'
elif name.startswith('dec'):
col_name = 'lat'+name[3:]
elif name.endswith('dec'):
col_name = name[:-3] + 'lat'
col_name = pre + col_name
tab_dict[col_name] = [getattr(c, name, None) for c in catalog]
name_list.append(col_name)
t = Table(tab_dict, meta=meta)
# re-order the columns
t = t[[n for n in name_list]]
if fmt is not None:
if fmt in ["vot", "vo", "xml"]:
vot = from_table(t)
# description of this votable
vot.description = repr(meta)
writetoVO(vot, filename)
elif fmt in ['hdf5']:
t.write(filename, path='data', overwrite=True)
elif fmt in ['fits']:
writeFITSTable(filename, t)
else:
ascii.write(t, filename, fmt, overwrite=True)
else:
ascii.write(t, filename, overwrite=True)
return
# sort the sources into types and then write them out individually
components, islands, simples = classify_catalog(catalog)
if len(components) > 0:
new_name = "{1}{0}{2}".format('_comp', *os.path.splitext(filename))
writer(new_name, components, fmt)
log.info("wrote {0}".format(new_name))
if len(islands) > 0:
new_name = "{1}{0}{2}".format('_isle', *os.path.splitext(filename))
writer(new_name, islands, fmt)
log.info("wrote {0}".format(new_name))
if len(simples) > 0:
new_name = "{1}{0}{2}".format('_simp', *os.path.splitext(filename))
writer(new_name, simples, fmt)
log.info("wrote {0}".format(new_name))
return | Write a catalog (list of sources) to a file with format determined by extension.
Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
Parameters
----------
filename : str
Base name for file to write. `_simp`, `_comp`, or `_isle` will be added to differentiate
the different types of sources that are being written.
catalog : list
A list of source objects. Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
fmt : str
The file format extension.
prefix : str
Prepend each column name with "prefix_". Default is to prepend nothing.
meta : dict
A dictionary to be used as metadata for some file types (fits, VOTable).
Returns
-------
None | entailment |
def writeFITSTable(filename, table):
"""
Convert a table into a FITSTable and then write to disk.
Parameters
----------
filename : str
Filename to write.
table : Table
Table to write.
Returns
-------
None
Notes
-----
Due to a bug in numpy, `int32` and `float32` are converted to `int64` and `float64` before writing.
"""
def FITSTableType(val):
"""
Return the FITSTable type corresponding to each named parameter in obj
"""
if isinstance(val, bool):
types = "L"
elif isinstance(val, (int, np.int64, np.int32)):
types = "J"
elif isinstance(val, (float, np.float64, np.float32)):
types = "E"
elif isinstance(val, six.string_types):
types = "{0}A".format(len(val))
else:
log.warning("Column {0} is of unknown type {1}".format(val, type(val)))
log.warning("Using 5A")
types = "5A"
return types
cols = []
for name in table.colnames:
cols.append(fits.Column(name=name, format=FITSTableType(table[name][0]), array=table[name]))
cols = fits.ColDefs(cols)
tbhdu = fits.BinTableHDU.from_columns(cols)
for k in table.meta:
tbhdu.header['HISTORY'] = ':'.join((k, table.meta[k]))
tbhdu.writeto(filename, overwrite=True) | Convert a table into a FITSTable and then write to disk.
Parameters
----------
filename : str
Filename to write.
table : Table
Table to write.
Returns
-------
None
Notes
-----
Due to a bug in numpy, `int32` and `float32` are converted to `int64` and `float64` before writing. | entailment |
def writeIslandContours(filename, catalog, fmt='reg'):
"""
Write an output file in ds9 .reg format that outlines the boundaries of each island.
Parameters
----------
filename : str
Filename to write.
catalog : list
List of sources. Only those of type :class:`AegeanTools.models.IslandSource` will have contours drawn.
fmt : str
Output format type. Currently only 'reg' is supported (default)
Returns
-------
None
See Also
--------
:func:`AegeanTools.catalogs.writeIslandBoxes`
"""
if fmt != 'reg':
log.warning("Format {0} not yet supported".format(fmt))
log.warning("not writing anything")
return
out = open(filename, 'w')
print("#Aegean island contours", file=out)
print("#AegeanTools.catalogs version {0}-({1})".format(__version__, __date__), file=out)
line_fmt = 'image;line({0},{1},{2},{3})'
text_fmt = 'fk5; text({0},{1}) # text={{{2}}}'
mas_fmt = 'image; line({1},{0},{3},{2}) #color = yellow'
x_fmt = 'image; point({1},{0}) # point=x'
for c in catalog:
contour = c.contour
if len(contour) > 1:
for p1, p2 in zip(contour[:-1], contour[1:]):
print(line_fmt.format(p1[1] + 0.5, p1[0] + 0.5, p2[1] + 0.5, p2[0] + 0.5), file=out)
print(line_fmt.format(contour[-1][1] + 0.5, contour[-1][0] + 0.5, contour[0][1] + 0.5,
contour[0][0] + 0.5), file=out)
# comment out lines that have invalid ra/dec (WCS problems)
if np.nan in [c.ra, c.dec]:
print('#', end=' ', file=out)
# some islands may not have anchors because they don't have any contours
if len(c.max_angular_size_anchors) == 4:
print(text_fmt.format(c.ra, c.dec, c.island), file=out)
print(mas_fmt.format(*[a + 0.5 for a in c.max_angular_size_anchors]), file=out)
for p1, p2 in c.pix_mask:
# DS9 uses 1-based instead of 0-based indexing
print(x_fmt.format(p1 + 1, p2 + 1), file=out)
out.close()
return | Write an output file in ds9 .reg format that outlines the boundaries of each island.
Parameters
----------
filename : str
Filename to write.
catalog : list
List of sources. Only those of type :class:`AegeanTools.models.IslandSource` will have contours drawn.
fmt : str
Output format type. Currently only 'reg' is supported (default)
Returns
-------
None
See Also
--------
:func:`AegeanTools.catalogs.writeIslandBoxes` | entailment |
def writeIslandBoxes(filename, catalog, fmt):
"""
Write an output file in ds9 .reg, or kvis .ann format that contains bounding boxes for all the islands.
Parameters
----------
filename : str
Filename to write.
catalog : list
List of sources. Only those of type :class:`AegeanTools.models.IslandSource` will have contours drawn.
fmt : str
Output format type. Currently only 'reg' and 'ann' are supported. Default = 'reg'.
Returns
-------
None
See Also
--------
:func:`AegeanTools.catalogs.writeIslandContours`
"""
if fmt not in ['reg', 'ann']:
log.warning("Format not supported for island boxes{0}".format(fmt))
return # fmt not supported
out = open(filename, 'w')
print("#Aegean Islands", file=out)
print("#Aegean version {0}-({1})".format(__version__, __date__), file=out)
if fmt == 'reg':
print("IMAGE", file=out)
box_fmt = 'box({0},{1},{2},{3}) #{4}'
else:
print("COORD P", file=out)
box_fmt = 'box P {0} {1} {2} {3} #{4}'
for c in catalog:
# x/y swap for pyfits/numpy translation
ymin, ymax, xmin, xmax = c.extent
# +1 for array/image offset
xcen = (xmin + xmax) / 2.0 + 1
# + 0.5 in each direction to make lines run 'between' DS9 pixels
xwidth = xmax - xmin + 1
ycen = (ymin + ymax) / 2.0 + 1
ywidth = ymax - ymin + 1
print(box_fmt.format(xcen, ycen, xwidth, ywidth, c.island), file=out)
out.close()
return | Write an output file in ds9 .reg, or kvis .ann format that contains bounding boxes for all the islands.
Parameters
----------
filename : str
Filename to write.
catalog : list
List of sources. Only those of type :class:`AegeanTools.models.IslandSource` will have contours drawn.
fmt : str
Output format type. Currently only 'reg' and 'ann' are supported. Default = 'reg'.
Returns
-------
None
See Also
--------
:func:`AegeanTools.catalogs.writeIslandContours` | entailment |
def writeAnn(filename, catalog, fmt):
"""
Write an annotation file that can be read by Kvis (.ann) or DS9 (.reg).
Uses ra/dec from catalog.
Draws ellipses if bmaj/bmin/pa are in catalog. Draws 30" circles otherwise.
Only :class:`AegeanTools.models.OutputSource` will appear in the annotation file
unless there are none, in which case :class:`AegeanTools.models.SimpleSource` (if present)
will be written. If any :class:`AegeanTools.models.IslandSource` objects are present then
an island contours file will be written.
Parameters
----------
filename : str
Output filename base.
catalog : list
List of sources.
fmt : ['ann', 'reg']
Output file type.
Returns
-------
None
See Also
--------
AegeanTools.catalogs.writeIslandContours
"""
if fmt not in ['reg', 'ann']:
log.warning("Format not supported for island boxes{0}".format(fmt))
return # fmt not supported
components, islands, simples = classify_catalog(catalog)
if len(components) > 0:
cat = sorted(components)
suffix = "comp"
elif len(simples) > 0:
cat = simples
suffix = "simp"
else:
cat = []
if len(cat) > 0:
ras = [a.ra for a in cat]
decs = [a.dec for a in cat]
if not hasattr(cat[0], 'a'): # a being the variable that I used for bmaj.
bmajs = [30 / 3600.0 for a in cat]
bmins = bmajs
pas = [0 for a in cat]
else:
bmajs = [a.a / 3600.0 for a in cat]
bmins = [a.b / 3600.0 for a in cat]
pas = [a.pa for a in cat]
names = [a.__repr__() for a in cat]
if fmt == 'ann':
new_file = re.sub('.ann$', '_{0}.ann'.format(suffix), filename)
out = open(new_file, 'w')
print("#Aegean version {0}-({1})".format(__version__, __date__), file=out)
print('PA SKY', file=out)
print('FONT hershey12', file=out)
print('COORD W', file=out)
formatter = "ELLIPSE W {0} {1} {2} {3} {4:+07.3f} #{5}\nTEXT W {0} {1} {5}"
else: # reg
new_file = re.sub('.reg$', '_{0}.reg'.format(suffix), filename)
out = open(new_file, 'w')
print("#Aegean version {0}-({1})".format(__version__, __date__), file=out)
print("fk5", file=out)
formatter = 'ellipse {0} {1} {2:.9f}d {3:.9f}d {4:+07.3f}d # text="{5}"'
# DS9 has some strange ideas about position angle
pas = [a - 90 for a in pas]
for ra, dec, bmaj, bmin, pa, name in zip(ras, decs, bmajs, bmins, pas, names):
# comment out lines that have invalid or stupid entries
if np.nan in [ra, dec, bmaj, bmin, pa] or bmaj >= 180:
print('#', end=' ', file=out)
print(formatter.format(ra, dec, bmaj, bmin, pa, name), file=out)
out.close()
log.info("wrote {0}".format(new_file))
if len(islands) > 0:
if fmt == 'reg':
new_file = re.sub('.reg$', '_isle.reg', filename)
elif fmt == 'ann':
log.warning('kvis islands are currently not working')
return
else:
log.warning('format {0} not supported for island annotations'.format(fmt))
return
writeIslandContours(new_file, islands, fmt)
log.info("wrote {0}".format(new_file))
return | Write an annotation file that can be read by Kvis (.ann) or DS9 (.reg).
Uses ra/dec from catalog.
Draws ellipses if bmaj/bmin/pa are in catalog. Draws 30" circles otherwise.
Only :class:`AegeanTools.models.OutputSource` will appear in the annotation file
unless there are none, in which case :class:`AegeanTools.models.SimpleSource` (if present)
will be written. If any :class:`AegeanTools.models.IslandSource` objects are present then
an island contours file will be written.
Parameters
----------
filename : str
Output filename base.
catalog : list
List of sources.
fmt : ['ann', 'reg']
Output file type.
Returns
-------
None
See Also
--------
AegeanTools.catalogs.writeIslandContours | entailment |
def writeDB(filename, catalog, meta=None):
"""
Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None
"""
def sqlTypes(obj, names):
"""
Return the sql type corresponding to each named parameter in obj
"""
types = []
for n in names:
val = getattr(obj, n)
if isinstance(val, bool):
types.append("BOOL")
elif isinstance(val, (int, np.int64, np.int32)):
types.append("INT")
elif isinstance(val, (float, np.float64, np.float32)): # float32 is bugged and claims not to be a float
types.append("FLOAT")
elif isinstance(val, six.string_types):
types.append("VARCHAR")
else:
log.warning("Column {0} is of unknown type {1}".format(n, type(n)))
log.warning("Using VARCHAR")
types.append("VARCHAR")
return types
if os.path.exists(filename):
log.warning("overwriting {0}".format(filename))
os.remove(filename)
conn = sqlite3.connect(filename)
db = conn.cursor()
# determine the column names by inspecting the catalog class
for t, tn in zip(classify_catalog(catalog), ["components", "islands", "simples"]):
if len(t) < 1:
continue #don't write empty tables
col_names = t[0].names
col_types = sqlTypes(t[0], col_names)
stmnt = ','.join(["{0} {1}".format(a, b) for a, b in zip(col_names, col_types)])
db.execute('CREATE TABLE {0} ({1})'.format(tn, stmnt))
stmnt = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(tn, ','.join(col_names), ','.join(['?' for i in col_names]))
# expend the iterators that are created by python 3+
data = list(map(nulls, list(r.as_list() for r in t)))
db.executemany(stmnt, data)
log.info("Created table {0}".format(tn))
# metadata add some meta data
db.execute("CREATE TABLE meta (key VARCHAR, val VARCHAR)")
for k in meta:
db.execute("INSERT INTO meta (key, val) VALUES (?,?)", (k, meta[k]))
conn.commit()
log.info(db.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall())
conn.close()
log.info("Wrote file {0}".format(filename))
return | Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None | entailment |
def norm_dist(src1, src2):
"""
Calculate the normalised distance between two sources.
Sources are elliptical Gaussians.
The normalised distance is calculated as the GCD distance between the centers,
divided by quadrature sum of the radius of each ellipse along a line joining the two ellipses.
For ellipses that touch at a single point, the normalized distance will be 1/sqrt(2).
Parameters
----------
src1, src2 : object
The two positions to compare. Objects must have the following parameters: (ra, dec, a, b, pa).
Returns
-------
dist: float
The normalised distance.
"""
if np.all(src1 == src2):
return 0
dist = gcd(src1.ra, src1.dec, src2.ra, src2.dec) # degrees
# the angle between the ellipse centers
phi = bear(src1.ra, src1.dec, src2.ra, src2.dec) # Degrees
# Calculate the radius of each ellipse along a line that joins their centers.
r1 = src1.a*src1.b / np.hypot(src1.a * np.sin(np.radians(phi - src1.pa)),
src1.b * np.cos(np.radians(phi - src1.pa)))
r2 = src2.a*src2.b / np.hypot(src2.a * np.sin(np.radians(180 + phi - src2.pa)),
src2.b * np.cos(np.radians(180 + phi - src2.pa)))
R = dist / (np.hypot(r1, r2) / 3600)
return R | Calculate the normalised distance between two sources.
Sources are elliptical Gaussians.
The normalised distance is calculated as the GCD distance between the centers,
divided by quadrature sum of the radius of each ellipse along a line joining the two ellipses.
For ellipses that touch at a single point, the normalized distance will be 1/sqrt(2).
Parameters
----------
src1, src2 : object
The two positions to compare. Objects must have the following parameters: (ra, dec, a, b, pa).
Returns
-------
dist: float
The normalised distance. | entailment |
def sky_dist(src1, src2):
"""
Great circle distance between two sources.
A check is made to determine if the two sources are the same object, in this case
the distance is zero.
Parameters
----------
src1, src2 : object
Two sources to check. Objects must have parameters (ra,dec) in degrees.
Returns
-------
distance : float
The distance between the two sources.
See Also
--------
:func:`AegeanTools.angle_tools.gcd`
"""
if np.all(src1 == src2):
return 0
return gcd(src1.ra, src1.dec, src2.ra, src2.dec) | Great circle distance between two sources.
A check is made to determine if the two sources are the same object, in this case
the distance is zero.
Parameters
----------
src1, src2 : object
Two sources to check. Objects must have parameters (ra,dec) in degrees.
Returns
-------
distance : float
The distance between the two sources.
See Also
--------
:func:`AegeanTools.angle_tools.gcd` | entailment |
def pairwise_ellpitical_binary(sources, eps, far=None):
"""
Do a pairwise comparison of all sources and determine if they have a normalized distance within
eps.
Form this into a matrix of shape NxN.
Parameters
----------
sources : list
A list of sources (objects with parameters: ra,dec,a,b,pa)
eps : float
Normalised distance constraint.
far : float
If sources have a dec that differs by more than this amount then they are considered to be not matched.
This is a short-cut around performing GCD calculations.
Returns
-------
prob : numpy.ndarray
A 2d array of True/False.
See Also
--------
:func:`AegeanTools.cluster.norm_dist`
"""
if far is None:
far = max(a.a/3600 for a in sources)
l = len(sources)
distances = np.zeros((l, l), dtype=bool)
for i in range(l):
for j in range(i, l):
if i == j:
distances[i, j] = False
continue
src1 = sources[i]
src2 = sources[j]
if src2.dec - src1.dec > far:
break
if abs(src2.ra - src1.ra)*np.cos(np.radians(src1.dec)) > far:
continue
distances[i, j] = norm_dist(src1, src2) > eps
distances[j, i] = distances[i, j]
return distances | Do a pairwise comparison of all sources and determine if they have a normalized distance within
eps.
Form this into a matrix of shape NxN.
Parameters
----------
sources : list
A list of sources (objects with parameters: ra,dec,a,b,pa)
eps : float
Normalised distance constraint.
far : float
If sources have a dec that differs by more than this amount then they are considered to be not matched.
This is a short-cut around performing GCD calculations.
Returns
-------
prob : numpy.ndarray
A 2d array of True/False.
See Also
--------
:func:`AegeanTools.cluster.norm_dist` | entailment |
def regroup_vectorized(srccat, eps, far=None, dist=norm_dist):
"""
Regroup the islands of a catalog according to their normalised distance.
Assumes srccat is recarray-like for efficiency.
Return a list of island groups.
Parameters
----------
srccat : np.rec.arry or pd.DataFrame
Should have the following fields[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be
grouped
far : float
(degrees) sources that are further than this distance apart will not
be grouped, and will not be tested.
Default = 0.5.
dist : func
a function that calculates the distance between a source and each
element of an array of sources.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list of lists
Each island contians integer indices for members from srccat
(in descending dec order).
"""
if far is None:
far = 0.5 # 10*max(a.a/3600 for a in srccat)
# most negative declination first
# XXX: kind='mergesort' ensures stable sorting for determinism.
# Do we need this?
order = np.argsort(srccat.dec, kind='mergesort')[::-1]
# TODO: is it better to store groups as arrays even if appends are more
# costly?
groups = [[order[0]]]
for idx in order[1:]:
rec = srccat[idx]
# TODO: Find out if groups are big enough for this to give us a speed
# gain. If not, get distance to all entries in groups above
# decmin simultaneously.
decmin = rec.dec - far
for group in reversed(groups):
# when an island's largest (last) declination is smaller than
# decmin, we don't need to look at any more islands
if srccat.dec[group[-1]] < decmin:
# new group
groups.append([idx])
rafar = far / np.cos(np.radians(rec.dec))
group_recs = np.take(srccat, group, mode='clip')
group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar]
if len(group_recs) and dist(rec, group_recs).min() < eps:
group.append(idx)
break
else:
# new group
groups.append([idx])
# TODO?: a more numpy-like interface would return only an array providing
# the mapping:
# group_idx = np.empty(len(srccat), dtype=int)
# for i, group in enumerate(groups):
# group_idx[group] = i
# return group_idx
return groups | Regroup the islands of a catalog according to their normalised distance.
Assumes srccat is recarray-like for efficiency.
Return a list of island groups.
Parameters
----------
srccat : np.rec.arry or pd.DataFrame
Should have the following fields[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be
grouped
far : float
(degrees) sources that are further than this distance apart will not
be grouped, and will not be tested.
Default = 0.5.
dist : func
a function that calculates the distance between a source and each
element of an array of sources.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list of lists
Each island contians integer indices for members from srccat
(in descending dec order). | entailment |
def regroup(catalog, eps, far=None, dist=norm_dist):
"""
Regroup the islands of a catalog according to their normalised distance.
Return a list of island groups. Sources have their (island,source) parameters relabeled.
Parameters
----------
catalog : str or object
Either a filename to read into a source list, or a list of objects with the following properties[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be grouped
far : float
(degrees) sources that are further than this distance appart will not be grouped, and will not be tested.
Default = None.
dist : func
a function that calculates the distance between two sources must accept two SimpleSource objects.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list
A list of islands. Each island is a list of sources.
See Also
--------
:func:`AegeanTools.cluster.norm_dist`
"""
if isinstance(catalog, str):
table = load_table(catalog)
srccat = table_to_source_list(table)
else:
try:
srccat = catalog
_ = catalog[0].ra, catalog[0].dec, catalog[0].a, catalog[0].b, catalog[0].pa, catalog[0].peak_flux
except AttributeError as e:
log.error("catalog is not understood.")
log.error("catalog: Should be a list of objects with the following properties[units]:\n" +
"ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]")
raise e
log.info("Regrouping islands within catalog")
log.debug("Calculating distances")
if far is None:
far = 0.5 # 10*max(a.a/3600 for a in srccat)
srccat_array = np.rec.fromrecords(
[(s.ra, s.dec, s.a, s.b, s.pa, s.peak_flux)
for s in srccat],
names=['ra', 'dec', 'a', 'b', 'pa', 'peak_flux'])
groups = regroup_vectorized(srccat_array, eps=eps, far=far, dist=dist)
groups = [[srccat[idx] for idx in group]
for group in groups]
islands = []
# now that we have the groups, we relabel the sources to have (island,component) in flux order
# note that the order of sources within an island list is not changed - just their labels
for isle, group in enumerate(groups):
for comp, src in enumerate(sorted(group, key=lambda x: -1*x.peak_flux)):
src.island = isle
src.source = comp
islands.append(group)
return islands | Regroup the islands of a catalog according to their normalised distance.
Return a list of island groups. Sources have their (island,source) parameters relabeled.
Parameters
----------
catalog : str or object
Either a filename to read into a source list, or a list of objects with the following properties[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be grouped
far : float
(degrees) sources that are further than this distance appart will not be grouped, and will not be tested.
Default = None.
dist : func
a function that calculates the distance between two sources must accept two SimpleSource objects.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list
A list of islands. Each island is a list of sources.
See Also
--------
:func:`AegeanTools.cluster.norm_dist` | entailment |
def load_file_or_hdu(filename):
"""
Load a file from disk and return an HDUList
If filename is already an HDUList return that instead
Parameters
----------
filename : str or HDUList
File or HDU to be loaded
Returns
-------
hdulist : HDUList
"""
if isinstance(filename, fits.HDUList):
hdulist = filename
else:
hdulist = fits.open(filename, ignore_missing_end=True)
return hdulist | Load a file from disk and return an HDUList
If filename is already an HDUList return that instead
Parameters
----------
filename : str or HDUList
File or HDU to be loaded
Returns
-------
hdulist : HDUList | entailment |
def compress(datafile, factor, outfile=None):
"""
Compress a file using decimation.
Parameters
----------
datafile : str or HDUList
Input data to be loaded. (HDUList will be modified if passed).
factor : int
Decimation factor.
outfile : str
File to be written. Default = None, which means don't write a file.
Returns
-------
hdulist : HDUList
A decimated HDUList
See Also
--------
:func:`AegeanTools.fits_interp.expand`
"""
if not (factor > 0 and isinstance(factor, int)):
logging.error("factor must be a positive integer")
return None
hdulist = load_file_or_hdu(datafile)
header = hdulist[0].header
data = np.squeeze(hdulist[0].data)
cx, cy = data.shape[0], data.shape[1]
nx = cx // factor
ny = cy // factor
# check to see if we will have some residual data points
lcx = cx % factor
lcy = cy % factor
if lcx > 0:
nx += 1
if lcy > 0:
ny += 1
# decimate the data
new_data = np.empty((nx + 1, ny + 1))
new_data[:nx, :ny] = data[::factor, ::factor]
# copy the last row/col across
new_data[-1, :ny] = data[-1, ::factor]
new_data[:nx, -1] = data[::factor, -1]
new_data[-1, -1] = data[-1, -1]
# TODO: Figure out what to do when CD2_1 and CD1_2 are non-zero
if 'CDELT1' in header:
header['CDELT1'] *= factor
elif 'CD1_1' in header:
header['CD1_1'] *= factor
else:
logging.error("Error: Can't find CDELT1 or CD1_1")
return None
if 'CDELT2' in header:
header['CDELT2'] *= factor
elif "CD2_2" in header:
header['CD2_2'] *= factor
else:
logging.error("Error: Can't find CDELT2 or CD2_2")
return None
# Move the reference pixel so that the WCS is correct
header['CRPIX1'] = (header['CRPIX1'] + factor - 1) / factor
header['CRPIX2'] = (header['CRPIX2'] + factor - 1) / factor
# Update the header so that we can do the correct interpolation later on
header['BN_CFAC'] = (factor, "Compression factor (grid size) used by BANE")
header['BN_NPX1'] = (header['NAXIS1'], 'original NAXIS1 value')
header['BN_NPX2'] = (header['NAXIS2'], 'original NAXIS2 value')
header['BN_RPX1'] = (lcx, 'Residual on axis 1')
header['BN_RPX2'] = (lcy, 'Residual on axis 2')
header['HISTORY'] = "Compressed by a factor of {0}".format(factor)
# save the changes
hdulist[0].data = np.array(new_data, dtype=np.float32)
hdulist[0].header = header
if outfile is not None:
hdulist.writeto(outfile, overwrite=True)
logging.info("Wrote: {0}".format(outfile))
return hdulist | Compress a file using decimation.
Parameters
----------
datafile : str or HDUList
Input data to be loaded. (HDUList will be modified if passed).
factor : int
Decimation factor.
outfile : str
File to be written. Default = None, which means don't write a file.
Returns
-------
hdulist : HDUList
A decimated HDUList
See Also
--------
:func:`AegeanTools.fits_interp.expand` | entailment |
def expand(datafile, outfile=None):
"""
Expand and interpolate the given data file using the given method.
Datafile can be a filename or an HDUList
It is assumed that the file has been compressed and that there are `BN_?` keywords in the
fits header that describe how the compression was done.
Parameters
----------
datafile : str or HDUList
filename or HDUList of file to work on
outfile : str
filename to write to (default = None)
Returns
-------
hdulist : HDUList
HDUList of the expanded data.
See Also
--------
:func:`AegeanTools.fits_interp.compress`
"""
hdulist = load_file_or_hdu(datafile)
header = hdulist[0].header
data = hdulist[0].data
# Check for the required key words, only expand if they exist
if not all(a in header for a in ['BN_CFAC', 'BN_NPX1', 'BN_NPX2', 'BN_RPX1', 'BN_RPX2']):
return hdulist
factor = header['BN_CFAC']
(gx, gy) = np.mgrid[0:header['BN_NPX2'], 0:header['BN_NPX1']]
# fix the last column of the grid to account for residuals
lcx = header['BN_RPX2']
lcy = header['BN_RPX1']
rows = (np.arange(data.shape[0]) + int(lcx/factor))*factor
cols = (np.arange(data.shape[1]) + int(lcy/factor))*factor
# Do the interpolation
hdulist[0].data = np.array(RegularGridInterpolator((rows,cols), data)((gx, gy)), dtype=np.float32)
# update the fits keywords so that the WCS is correct
header['CRPIX1'] = (header['CRPIX1'] - 1) * factor + 1
header['CRPIX2'] = (header['CRPIX2'] - 1) * factor + 1
if 'CDELT1' in header:
header['CDELT1'] /= factor
elif 'CD1_1' in header:
header['CD1_1'] /= factor
else:
logging.error("Error: Can't find CD1_1 or CDELT1")
return None
if 'CDELT2' in header:
header['CDELT2'] /= factor
elif "CD2_2" in header:
header['CD2_2'] /= factor
else:
logging.error("Error: Can't find CDELT2 or CD2_2")
return None
header['HISTORY'] = 'Expanded by factor {0}'.format(factor)
# don't need these any more so delete them.
del header['BN_CFAC'], header['BN_NPX1'], header['BN_NPX2'], header['BN_RPX1'], header['BN_RPX2']
hdulist[0].header = header
if outfile is not None:
hdulist.writeto(outfile, overwrite=True)
logging.info("Wrote: {0}".format(outfile))
return hdulist | Expand and interpolate the given data file using the given method.
Datafile can be a filename or an HDUList
It is assumed that the file has been compressed and that there are `BN_?` keywords in the
fits header that describe how the compression was done.
Parameters
----------
datafile : str or HDUList
filename or HDUList of file to work on
outfile : str
filename to write to (default = None)
Returns
-------
hdulist : HDUList
HDUList of the expanded data.
See Also
--------
:func:`AegeanTools.fits_interp.compress` | entailment |
def change_autocommit_mode(self, switch):
"""
Strip and make a string case insensitive and ensure it is either 'true' or 'false'.
If neither, prompt user for either value.
When 'true', return True, and when 'false' return False.
"""
parsed_switch = switch.strip().lower()
if not parsed_switch in ['true', 'false']:
self.send_response(
self.iopub_socket, 'stream', {
'name': 'stderr',
'text': 'autocommit must be true or false.\n\n'
}
)
switch_bool = (parsed_switch == 'true')
committed = self.switch_autocommit(switch_bool)
message = (
'committed current transaction & ' if committed else '' +
'switched autocommit mode to ' +
str(self._autocommit)
)
self.send_response(
self.iopub_socket, 'stream', {
'name': 'stderr',
'text': message,
}
) | Strip and make a string case insensitive and ensure it is either 'true' or 'false'.
If neither, prompt user for either value.
When 'true', return True, and when 'false' return False. | entailment |
def armor(data, versioned=True):
"""
Returns a string in ASCII Armor format, for the given binary data. The
output of this is compatiple with pgcrypto's armor/dearmor functions.
"""
template = '-----BEGIN PGP MESSAGE-----\n%(headers)s%(body)s\n=%(crc)s\n-----END PGP MESSAGE-----'
body = base64.b64encode(data)
# The 24-bit CRC should be in big-endian, strip off the first byte (it's already masked in crc24).
crc = base64.b64encode(struct.pack('>L', crc24(data))[1:])
return template % {
'headers': 'Version: django-pgcrypto %s\n\n' % __version__ if versioned else '\n',
'body': body.decode('ascii'),
'crc': crc.decode('ascii'),
} | Returns a string in ASCII Armor format, for the given binary data. The
output of this is compatiple with pgcrypto's armor/dearmor functions. | entailment |
def dearmor(text, verify=True):
"""
Given a string in ASCII Armor format, returns the decoded binary data.
If verify=True (the default), the CRC is decoded and checked against that
of the decoded data, otherwise it is ignored. If the checksum does not
match, a BadChecksumError exception is raised.
"""
lines = text.strip().split('\n')
data_lines = []
check_data = None
started = False
in_body = False
for line in lines:
if line.startswith('-----BEGIN'):
started = True
elif line.startswith('-----END'):
break
elif started:
if in_body:
if line.startswith('='):
# Once we get the checksum data, we're done.
check_data = line[1:5].encode('ascii')
break
else:
# This is part of the base64-encoded data.
data_lines.append(line)
else:
if line.strip():
# This is a header line, which we basically ignore for now.
pass
else:
# The data starts after an empty line.
in_body = True
b64_str = ''.join(data_lines)
# Python 3's b64decode expects bytes, not a string. We know base64 is ASCII, though.
data = base64.b64decode(b64_str.encode('ascii'))
if verify and check_data:
# The 24-bit CRC is in big-endian, so we add a null byte to the beginning.
crc = struct.unpack('>L', b'\0' + base64.b64decode(check_data))[0]
if crc != crc24(data):
raise BadChecksumError()
return data | Given a string in ASCII Armor format, returns the decoded binary data.
If verify=True (the default), the CRC is decoded and checked against that
of the decoded data, otherwise it is ignored. If the checksum does not
match, a BadChecksumError exception is raised. | entailment |
def unpad(text, block_size):
"""
Takes the last character of the text, and if it is less than the block_size,
assumes the text is padded, and removes any trailing zeros or bytes with the
value of the pad character. See http://www.di-mgt.com.au/cryptopad.html for
more information (methods 1, 3, and 4).
"""
end = len(text)
if end == 0:
return text
padch = ord_safe(text[end - 1])
if padch > block_size:
# If the last byte value is larger than the block size, it's not padded.
return text
while end > 0 and ord_safe(text[end - 1]) in (0, padch):
end -= 1
return text[:end] | Takes the last character of the text, and if it is less than the block_size,
assumes the text is padded, and removes any trailing zeros or bytes with the
value of the pad character. See http://www.di-mgt.com.au/cryptopad.html for
more information (methods 1, 3, and 4). | entailment |
def pad(text, block_size, zero=False):
"""
Given a text string and a block size, pads the text with bytes of the same value
as the number of padding bytes. This is the recommended method, and the one used
by pgcrypto. See http://www.di-mgt.com.au/cryptopad.html for more information.
"""
num = block_size - (len(text) % block_size)
ch = b'\0' if zero else chr(num).encode('latin-1')
return text + (ch * num) | Given a text string and a block size, pads the text with bytes of the same value
as the number of padding bytes. This is the recommended method, and the one used
by pgcrypto. See http://www.di-mgt.com.au/cryptopad.html for more information. | entailment |
def aes_pad_key(key):
"""
AES keys must be either 16, 24, or 32 bytes long. If a key is provided that is not
one of these lengths, pad it with zeroes (this is what pgcrypto does).
"""
if len(key) in (16, 24, 32):
return key
if len(key) < 16:
return pad(key, 16, zero=True)
elif len(key) < 24:
return pad(key, 24, zero=True)
else:
return pad(key[:32], 32, zero=True) | AES keys must be either 16, 24, or 32 bytes long. If a key is provided that is not
one of these lengths, pad it with zeroes (this is what pgcrypto does). | entailment |
def deconstruct(self):
"""
Deconstruct the field for Django 1.7+ migrations.
"""
name, path, args, kwargs = super(BaseEncryptedField, self).deconstruct()
kwargs.update({
#'key': self.cipher_key,
'cipher': self.cipher_name,
'charset': self.charset,
'check_armor': self.check_armor,
'versioned': self.versioned,
})
return name, path, args, kwargs | Deconstruct the field for Django 1.7+ migrations. | entailment |
def get_cipher(self):
"""
Return a new Cipher object for each time we want to encrypt/decrypt. This is because
pgcrypto expects a zeroed block for IV (initial value), but the IV on the cipher
object is cumulatively updated each time encrypt/decrypt is called.
"""
return self.cipher_class.new(self.cipher_key, self.cipher_class.MODE_CBC, b'\0' * self.cipher_class.block_size) | Return a new Cipher object for each time we want to encrypt/decrypt. This is because
pgcrypto expects a zeroed block for IV (initial value), but the IV on the cipher
object is cumulatively updated each time encrypt/decrypt is called. | entailment |
def find_packages_by_root_package(where):
"""Better than excluding everything that is not needed,
collect only what is needed.
"""
root_package = os.path.basename(where)
packages = [ "%s.%s" % (root_package, sub_package)
for sub_package in find_packages(where)]
packages.insert(0, root_package)
return packages | Better than excluding everything that is not needed,
collect only what is needed. | entailment |
def make_long_description(marker=None, intro=None):
"""
click_ is a framework to simplify writing composable commands for
command-line tools. This package extends the click_ functionality
by adding support for commands that use configuration files.
.. _click: https://click.pocoo.org/
EXAMPLE:
A configuration file, like:
.. code-block:: INI
# -- FILE: foo.ini
[foo]
flag = yes
name = Alice and Bob
numbers = 1 4 9 16 25
filenames = foo/xxx.txt
bar/baz/zzz.txt
[person.alice]
name = Alice
birthyear = 1995
[person.bob]
name = Bob
birthyear = 2001
can be processed with:
.. code-block:: python
# EXAMPLE:
"""
if intro is None:
intro = inspect.getdoc(make_long_description)
with open("README.rst", "r") as infile:
line = infile.readline()
while not line.strip().startswith(marker):
line = infile.readline()
# -- COLLECT REMAINING: Usage example
contents = infile.read()
text = intro +"\n" + contents
return text | click_ is a framework to simplify writing composable commands for
command-line tools. This package extends the click_ functionality
by adding support for commands that use configuration files.
.. _click: https://click.pocoo.org/
EXAMPLE:
A configuration file, like:
.. code-block:: INI
# -- FILE: foo.ini
[foo]
flag = yes
name = Alice and Bob
numbers = 1 4 9 16 25
filenames = foo/xxx.txt
bar/baz/zzz.txt
[person.alice]
name = Alice
birthyear = 1995
[person.bob]
name = Bob
birthyear = 2001
can be processed with:
.. code-block:: python
# EXAMPLE: | entailment |
def pubsub_pop_message(self, deadline=None):
"""Pops a message for a subscribed client.
Args:
deadline (int): max number of seconds to wait (None => no timeout)
Returns:
Future with the popped message as result (or None if timeout
or ConnectionError object in case of connection errors
or ClientError object if you are not subscribed)
"""
if not self.subscribed:
excep = ClientError("you must subscribe before using "
"pubsub_pop_message")
raise tornado.gen.Return(excep)
reply = None
try:
reply = self._reply_list.pop(0)
raise tornado.gen.Return(reply)
except IndexError:
pass
if deadline is not None:
td = timedelta(seconds=deadline)
yield self._condition.wait(timeout=td)
else:
yield self._condition.wait()
try:
reply = self._reply_list.pop(0)
except IndexError:
pass
raise tornado.gen.Return(reply) | Pops a message for a subscribed client.
Args:
deadline (int): max number of seconds to wait (None => no timeout)
Returns:
Future with the popped message as result (or None if timeout
or ConnectionError object in case of connection errors
or ClientError object if you are not subscribed) | entailment |
def _get_flat_ids(assigned):
"""
This is a helper function to recover the coordinates of regions that have
been labeled within an image. This function efficiently computes the
coordinate of all regions and returns the information in a memory-efficient
manner.
Parameters
-----------
assigned : ndarray[ndim=2, dtype=int]
The labeled image. For example, the result of calling
scipy.ndimage.label on a binary image
Returns
--------
I : ndarray[ndim=1, dtype=int]
Array of 1d coordinate indices of all regions in the image
region_ids : ndarray[shape=[n_features + 1], dtype=int]
Indexing array used to separate the coordinates of the different
regions. For example, region k has xy coordinates of
xy[region_ids[k]:region_ids[k+1], :]
labels : ndarray[ndim=1, dtype=int]
The labels of the regions in the image corresponding to the coordinates
For example, assigned.ravel()[I[k]] == labels[k]
"""
# MPU optimization:
# Let's segment the regions and store in a sparse format
# First, let's use where once to find all the information we want
ids_labels = np.arange(len(assigned.ravel()), 'int64')
I = ids_labels[assigned.ravel().astype(bool)]
labels = assigned.ravel()[I]
# Now sort these arrays by the label to figure out where to segment
sort_id = np.argsort(labels)
labels = labels[sort_id]
I = I[sort_id]
# this should be of size n_features-1
region_ids = np.where(labels[1:] - labels[:-1] > 0)[0] + 1
# This should be of size n_features + 1
region_ids = np.concatenate(([0], region_ids, [len(labels)]))
return [I, region_ids, labels] | This is a helper function to recover the coordinates of regions that have
been labeled within an image. This function efficiently computes the
coordinate of all regions and returns the information in a memory-efficient
manner.
Parameters
-----------
assigned : ndarray[ndim=2, dtype=int]
The labeled image. For example, the result of calling
scipy.ndimage.label on a binary image
Returns
--------
I : ndarray[ndim=1, dtype=int]
Array of 1d coordinate indices of all regions in the image
region_ids : ndarray[shape=[n_features + 1], dtype=int]
Indexing array used to separate the coordinates of the different
regions. For example, region k has xy coordinates of
xy[region_ids[k]:region_ids[k+1], :]
labels : ndarray[ndim=1, dtype=int]
The labels of the regions in the image corresponding to the coordinates
For example, assigned.ravel()[I[k]] == labels[k] | entailment |
def _tarboton_slopes_directions(data, dX, dY, facets, ang_adj):
"""
Calculate the slopes and directions based on the 8 sections from
Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf
"""
shp = np.array(data.shape) - 1
direction = np.full(data.shape, FLAT_ID_INT, 'float64')
mag = np.full(data.shape, FLAT_ID_INT, 'float64')
slc0 = [slice(1, -1), slice(1, -1)]
for ind in xrange(8):
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(1 + e1[0], shp[0] + e1[0]),
slice(1 + e1[1], shp[1] + e1[1])]
slc2 = [slice(1 + e2[0], shp[0] + e2[0]),
slice(1 + e2[1], shp[1] + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp)
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
# %%Now do the edges
# if the edge is lower than the interior, we need to copy the value
# from the interior (as an approximation)
ids1 = (direction[:, 1] > np.pi / 2) \
& (direction[:, 1] < 3 * np.pi / 2)
direction[ids1, 0] = direction[ids1, 1]
mag[ids1, 0] = mag[ids1, 1]
ids1 = (direction[:, -2] < np.pi / 2) \
| (direction[:, -2] > 3 * np.pi / 2)
direction[ids1, -1] = direction[ids1, -2]
mag[ids1, -1] = mag[ids1, -2]
ids1 = (direction[1, :] > 0) & (direction[1, :] < np.pi)
direction[0, ids1] = direction[1, ids1]
mag[0, ids1] = mag[1, ids1]
ids1 = (direction[-2, :] > np.pi) & (direction[-2, :] < 2 * np.pi)
direction[-1, ids1] = direction[-2, ids1]
mag[-1, ids1] = mag[-2, ids1]
# Now update the edges in case they are higher than the interior (i.e.
# look at the downstream angle)
# left edge
slc0 = [slice(1, -1), slice(0, 1)]
for ind in [0, 1, 6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(1 + e1[0], shp[0] + e1[0]), slice(e1[1], 1 + e1[1])]
slc2 = [slice(1 + e2[0], shp[0] + e2[0]), slice(e2[1], 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp)
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
# right edge
slc0 = [slice(1, -1), slice(-1, None)]
for ind in [2, 3, 4, 5]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(1 + e1[0], shp[0] + e1[0]),
slice(shp[1] + e1[1], shp[1] + 1 + e1[1])]
slc2 = [slice(1 + e2[0], shp[0] + e2[0]),
slice(shp[1] + e2[1], shp[1] + 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp)
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
# top edge
slc0 = [slice(0, 1), slice(1, -1)]
for ind in [4, 5, 6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(e1[0], 1 + e1[0]), slice(1 + e1[1], shp[1] + e1[1])]
slc2 = [slice(e2[0], 1 + e2[0]), slice(1 + e2[1], shp[1] + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'top')
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
# bottom edge
slc0 = [slice(-1, None), slice(1, -1)]
for ind in [0, 1, 2, 3]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]),
slice(1 + e1[1], shp[1] + e1[1])]
slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]),
slice(1 + e2[1], shp[1] + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'bot')
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
# top-left corner
slc0 = [slice(0, 1), slice(0, 1)]
for ind in [6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(e1[0], 1 + e1[0]), slice(e1[1], 1 + e1[1])]
slc2 = [slice(e2[0], 1 + e2[0]), slice(e2[1], 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'top')
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
# top-right corner
slc0 = [slice(0, 1), slice(-1, None)]
for ind in [4, 5]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(e1[0], 1 + e1[0]),
slice(shp[1] + e1[1], shp[1] + 1 + e1[1])]
slc2 = [slice(e2[0], 1 + e2[0]),
slice(shp[1] + e2[1], shp[1] + 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'top')
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
# bottom-left corner
slc0 = [slice(-1, None), slice(0, 1)]
for ind in [0, 1]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]),
slice(e1[1], 1 + e1[1])]
slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]),
slice(e2[1], 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'bot')
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
# bottom-right corner
slc0 = [slice(-1, None), slice(-1, None)]
for ind in [3, 4]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]),
slice(shp[1] + e1[1], shp[1] + 1 + e1[1])]
slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]),
slice(shp[1] + e2[1], shp[1] + 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'bot')
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
mag[mag > 0] = np.sqrt(mag[mag > 0])
return mag, direction | Calculate the slopes and directions based on the 8 sections from
Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf | entailment |
def _get_d1_d2(dX, dY, ind, e1, e2, shp, topbot=None):
"""
This finds the distances along the patch (within the eight neighboring
pixels around a central pixel) given the difference in x and y coordinates
of the real image. This is the function that allows real coordinates to be
used when calculating the magnitude and directions of slopes.
"""
if topbot == None:
if ind in [0, 3, 4, 7]:
d1 = dX[slice((e2[0] + 1) / 2, shp[0] + (e2[0] - 1) / 2)]
d2 = dY[slice((e2[0] + 1) / 2, shp[0] + (e2[0] - 1) / 2)]
if d1.size == 0:
d1 = np.array([dX[0]])
d2 = np.array([dY[0]])
else:
d2 = dX[slice((e1[0] + 1) / 2, shp[0] + (e1[0] - 1) / 2)]
d1 = dY[slice((e1[0] + 1) / 2, shp[0] + (e1[0] - 1) / 2)]
if d1.size == 0:
d2 = dX[0]
d1 = dY[0]
elif topbot == 'top':
if ind in [0, 3, 4, 7]:
d1, d2 = dX[0], dY[0]
else:
d2, d1 = dX[0], dY[0]
elif topbot == 'bot':
if ind in [0, 3, 4, 7]:
d1, d2 = dX[-1], dY[-1]
else:
d2, d1 = dX[-1], dY[-1]
theta = np.arctan2(d2, d1)
return d1.reshape(d1.size, 1), d2.reshape(d2.size, 1), theta.reshape(theta.size, 1) | This finds the distances along the patch (within the eight neighboring
pixels around a central pixel) given the difference in x and y coordinates
of the real image. This is the function that allows real coordinates to be
used when calculating the magnitude and directions of slopes. | entailment |
def _calc_direction(data, mag, direction, ang, d1, d2, theta,
slc0, slc1, slc2):
"""
This function gives the magnitude and direction of the slope based on
Tarboton's D_\infty method. This is a helper-function to
_tarboton_slopes_directions
"""
data0 = data[slc0]
data1 = data[slc1]
data2 = data[slc2]
s1 = (data0 - data1) / d1
s2 = (data1 - data2) / d2
s1_2 = s1**2
sd = (data0 - data2) / np.sqrt(d1**2 + d2**2)
r = np.arctan2(s2, s1)
rad2 = s1_2 + s2**2
# Handle special cases
# should be on diagonal
b_s1_lte0 = s1 <= 0
b_s2_lte0 = s2 <= 0
b_s1_gt0 = s1 > 0
b_s2_gt0 = s2 > 0
I1 = (b_s1_lte0 & b_s2_gt0) | (r > theta)
if I1.any():
rad2[I1] = sd[I1] ** 2
r[I1] = theta.repeat(I1.shape[1], 1)[I1]
I2 = (b_s1_gt0 & b_s2_lte0) | (r < 0) # should be on straight section
if I2.any():
rad2[I2] = s1_2[I2]
r[I2] = 0
I3 = b_s1_lte0 & (b_s2_lte0 | (b_s2_gt0 & (sd <= 0))) # upslope or flat
rad2[I3] = -1
I4 = rad2 > mag[slc0]
if I4.any():
mag[slc0][I4] = rad2[I4]
direction[slc0][I4] = r[I4] * ang[1] + ang[0] * np.pi/2
return mag, direction | This function gives the magnitude and direction of the slope based on
Tarboton's D_\infty method. This is a helper-function to
_tarboton_slopes_directions | entailment |
def get(self, key, side):
"""
Returns an edge given a particular key
Parmeters
----------
key : tuple
(te, be, le, re) tuple that identifies a tile
side : str
top, bottom, left, or right, which edge to return
"""
return getattr(self, side).ravel()[self.keys[key]] | Returns an edge given a particular key
Parmeters
----------
key : tuple
(te, be, le, re) tuple that identifies a tile
side : str
top, bottom, left, or right, which edge to return | entailment |
def set_i(self, i, data, field, side):
""" Assigns data on the i'th tile to the data 'field' of the 'side'
edge of that tile
"""
edge = self.get_i(i, side)
setattr(edge, field, data[edge.slice]) | Assigns data on the i'th tile to the data 'field' of the 'side'
edge of that tile | entailment |
def set_sides(self, key, data, field, local=False):
"""
Assign data on the 'key' tile to all the edges
"""
for side in ['left', 'right', 'top', 'bottom']:
self.set(key, data, field, side, local) | Assign data on the 'key' tile to all the edges | entailment |
def set_neighbor_data(self, neighbor_side, data, key, field):
"""
Assign data from the 'key' tile to the edge on the
neighboring tile which is on the 'neighbor_side' of the 'key' tile.
The data is assigned to the 'field' attribute of the neihboring tile's
edge.
"""
i = self.keys[key]
found = False
sides = []
if 'left' in neighbor_side:
if i % self.n_cols == 0:
return None
i -= 1
sides.append('right')
found = True
if 'right' in neighbor_side:
if i % self.n_cols == self.n_cols - 1:
return None
i += 1
sides.append('left')
found = True
if 'top' in neighbor_side:
sides.append('bottom')
i -= self.n_cols
found = True
if 'bottom' in neighbor_side:
sides.append('top')
i += self.n_cols
found = True
if not found:
print "Side '%s' not found" % neighbor_side
# Check if i is in range
if i < 0 or i >= self.n_chunks:
return None
# Otherwise, set the data
for side in sides:
self.set_i(i, data, field, side) | Assign data from the 'key' tile to the edge on the
neighboring tile which is on the 'neighbor_side' of the 'key' tile.
The data is assigned to the 'field' attribute of the neihboring tile's
edge. | entailment |
def set_all_neighbors_data(self, data, done, key):
"""
Given they 'key' tile's data, assigns this information to all
neighboring tiles
"""
# The order of this for loop is important because the topleft gets
# it's data from the left neighbor, which should have already been
# updated...
for side in ['left', 'right', 'top', 'bottom', 'topleft',
'topright', 'bottomleft', 'bottomright']:
self.set_neighbor_data(side, data, key, 'data')
# self.set_neighbor_data(side, todo, key, 'todo')
self.set_neighbor_data(side, done, key, 'done') | Given they 'key' tile's data, assigns this information to all
neighboring tiles | entailment |
def fill_n_todo(self):
"""
Calculate and record the number of edge pixels left to do on each tile
"""
left = self.left
right = self.right
top = self.top
bottom = self.bottom
for i in xrange(self.n_chunks):
self.n_todo.ravel()[i] = np.sum([left.ravel()[i].n_todo,
right.ravel()[i].n_todo,
top.ravel()[i].n_todo,
bottom.ravel()[i].n_todo]) | Calculate and record the number of edge pixels left to do on each tile | entailment |
def fill_n_done(self):
"""
Calculate and record the number of edge pixels that are done one each
tile.
"""
left = self.left
right = self.right
top = self.top
bottom = self.bottom
for i in xrange(self.n_chunks):
self.n_done.ravel()[i] = np.sum([left.ravel()[i].n_done,
right.ravel()[i].n_done,
top.ravel()[i].n_done,
bottom.ravel()[i].n_done]) | Calculate and record the number of edge pixels that are done one each
tile. | entailment |
def fill_percent_done(self):
"""
Calculate the percentage of edge pixels that would be done if the tile
was reprocessed. This is done for each tile.
"""
left = self.left
right = self.right
top = self.top
bottom = self.bottom
for i in xrange(self.n_chunks):
self.percent_done.ravel()[i] = \
np.sum([left.ravel()[i].percent_done,
right.ravel()[i].percent_done,
top.ravel()[i].percent_done,
bottom.ravel()[i].percent_done])
self.percent_done.ravel()[i] /= \
np.sum([left.ravel()[i].percent_done > 0,
right.ravel()[i].percent_done > 0,
top.ravel()[i].percent_done > 0,
bottom.ravel()[i].percent_done > 0, 1e-16]) | Calculate the percentage of edge pixels that would be done if the tile
was reprocessed. This is done for each tile. | entailment |
def fill_array(self, array, field, add=False, maximize=False):
"""
Given a full array (for the while image), fill it with the data on
the edges.
"""
self.fix_shapes()
for i in xrange(self.n_chunks):
for side in ['left', 'right', 'top', 'bottom']:
edge = getattr(self, side).ravel()[i]
if add:
array[edge.slice] += getattr(edge, field)
elif maximize:
array[edge.slice] = np.maximum(array[edge.slice],
getattr(edge, field))
else:
array[edge.slice] = getattr(edge, field)
return array | Given a full array (for the while image), fill it with the data on
the edges. | entailment |
def fix_shapes(self):
"""
Fixes the shape of the data fields on edges. Left edges should be
column vectors, and top edges should be row vectors, for example.
"""
for i in xrange(self.n_chunks):
for side in ['left', 'right', 'top', 'bottom']:
edge = getattr(self, side).ravel()[i]
if side in ['left', 'right']:
shp = [edge.todo.size, 1]
else:
shp = [1, edge.todo.size]
edge.done = edge.done.reshape(shp)
edge.data = edge.data.reshape(shp)
edge.todo = edge.todo.reshape(shp) | Fixes the shape of the data fields on edges. Left edges should be
column vectors, and top edges should be row vectors, for example. | entailment |
def find_best_candidate(self):
"""
Determine which tile, when processed, would complete the largest
percentage of unresolved edge pixels. This is a heuristic function
and does not give the optimal tile.
"""
self.fill_percent_done()
i_b = np.argmax(self.percent_done.ravel())
if self.percent_done.ravel()[i_b] <= 0:
return None
# check for ties
I = self.percent_done.ravel() == self.percent_done.ravel()[i_b]
if I.sum() == 1:
return i_b
else:
I2 = np.argmax(self.max_elev.ravel()[I])
return I.nonzero()[0][I2] | Determine which tile, when processed, would complete the largest
percentage of unresolved edge pixels. This is a heuristic function
and does not give the optimal tile. | entailment |
def save_array(self, array, name=None, partname=None, rootpath='.',
raw=False, as_int=True):
"""
Standard array saving routine
Parameters
-----------
array : array
Array to save to file
name : str, optional
Default 'array.tif'. Filename of array to save. Over-writes
partname.
partname : str, optional
Part of the filename to save (with the coordinates appended)
rootpath : str, optional
Default '.'. Which directory to save file
raw : bool, optional
Default False. If true will save a .npz of the array. If false,
will save a geotiff
as_int : bool, optional
Default True. If true will save array as an integer array (
excellent compression). If false will save as float array.
"""
if name is None and partname is not None:
fnl_file = self.get_full_fn(partname, rootpath)
tmp_file = os.path.join(rootpath, partname,
self.get_fn(partname + '_tmp'))
elif name is not None:
fnl_file = name
tmp_file = fnl_file + '_tmp.tiff'
else:
fnl_file = 'array.tif'
if not raw:
s_file = self.elev.clone_traits()
s_file.raster_data = np.ma.masked_array(array)
count = 10
while count > 0 and (s_file.raster_data.mask.sum() > 0 \
or np.isnan(s_file.raster_data).sum() > 0):
s_file.inpaint()
count -= 1
s_file.export_to_geotiff(tmp_file)
if as_int:
cmd = "gdalwarp -multi -wm 2000 -co BIGTIFF=YES -of GTiff -co compress=lzw -ot Int16 -co TILED=YES -wo OPTIMIZE_SIZE=YES -r near -t_srs %s %s %s" \
% (self.save_projection, tmp_file, fnl_file)
else:
cmd = "gdalwarp -multi -wm 2000 -co BIGTIFF=YES -of GTiff -co compress=lzw -co TILED=YES -wo OPTIMIZE_SIZE=YES -r near -t_srs %s %s %s" \
% (self.save_projection, tmp_file, fnl_file)
print "<<"*4, cmd, ">>"*4
subprocess.call(cmd)
os.remove(tmp_file)
else:
np.savez_compressed(fnl_file, array) | Standard array saving routine
Parameters
-----------
array : array
Array to save to file
name : str, optional
Default 'array.tif'. Filename of array to save. Over-writes
partname.
partname : str, optional
Part of the filename to save (with the coordinates appended)
rootpath : str, optional
Default '.'. Which directory to save file
raw : bool, optional
Default False. If true will save a .npz of the array. If false,
will save a geotiff
as_int : bool, optional
Default True. If true will save array as an integer array (
excellent compression). If false will save as float array. | entailment |
def save_uca(self, rootpath, raw=False, as_int=False):
""" Saves the upstream contributing area to a file
"""
self.save_array(self.uca, None, 'uca', rootpath, raw, as_int=as_int) | Saves the upstream contributing area to a file | entailment |
def save_twi(self, rootpath, raw=False, as_int=True):
""" Saves the topographic wetness index to a file
"""
self.twi = np.ma.masked_array(self.twi, mask=self.twi <= 0,
fill_value=-9999)
# self.twi = self.twi.filled()
self.twi[self.flats] = 0
self.twi.mask[self.flats] = True
# self.twi = self.flats
self.save_array(self.twi, None, 'twi', rootpath, raw, as_int=as_int) | Saves the topographic wetness index to a file | entailment |
def save_slope(self, rootpath, raw=False, as_int=False):
""" Saves the magnitude of the slope to a file
"""
self.save_array(self.mag, None, 'mag', rootpath, raw, as_int=as_int) | Saves the magnitude of the slope to a file | entailment |
def save_direction(self, rootpath, raw=False, as_int=False):
""" Saves the direction of the slope to a file
"""
self.save_array(self.direction, None, 'ang', rootpath, raw, as_int=as_int) | Saves the direction of the slope to a file | entailment |
def save_outputs(self, rootpath='.', raw=False):
"""Saves TWI, UCA, magnitude and direction of slope to files.
"""
self.save_twi(rootpath, raw)
self.save_uca(rootpath, raw)
self.save_slope(rootpath, raw)
self.save_direction(rootpath, raw) | Saves TWI, UCA, magnitude and direction of slope to files. | entailment |
def load_array(self, fn, name):
"""
Can only load files that were saved in the 'raw' format.
Loads previously computed field 'name' from file
Valid names are 'mag', 'direction', 'uca', 'twi'
"""
if os.path.exists(fn + '.npz'):
array = np.load(fn + '.npz')
try:
setattr(self, name, array['arr_0'])
except Exception, e:
print e
finally:
array.close()
else:
raise RuntimeError("File %s does not exist." % (fn + '.npz')) | Can only load files that were saved in the 'raw' format.
Loads previously computed field 'name' from file
Valid names are 'mag', 'direction', 'uca', 'twi' | entailment |
def _get_chunk_edges(self, NN, chunk_size, chunk_overlap):
"""
Given the size of the array, calculate and array that gives the
edges of chunks of nominal size, with specified overlap
Parameters
----------
NN : int
Size of array
chunk_size : int
Nominal size of chunks (chunk_size < NN)
chunk_overlap : int
Number of pixels chunks will overlap
Returns
-------
start_id : array
The starting id of a chunk. start_id[i] gives the starting id of
the i'th chunk
end_id : array
The ending id of a chunk. end_id[i] gives the ending id of
the i'th chunk
"""
left_edge = np.arange(0, NN - chunk_overlap, chunk_size)
left_edge[1:] -= chunk_overlap
right_edge = np.arange(0, NN - chunk_overlap, chunk_size)
right_edge[:-1] = right_edge[1:] + chunk_overlap
right_edge[-1] = NN
right_edge = np.minimum(right_edge, NN)
return left_edge, right_edge | Given the size of the array, calculate and array that gives the
edges of chunks of nominal size, with specified overlap
Parameters
----------
NN : int
Size of array
chunk_size : int
Nominal size of chunks (chunk_size < NN)
chunk_overlap : int
Number of pixels chunks will overlap
Returns
-------
start_id : array
The starting id of a chunk. start_id[i] gives the starting id of
the i'th chunk
end_id : array
The ending id of a chunk. end_id[i] gives the ending id of
the i'th chunk | entailment |
def _assign_chunk(self, data, arr1, arr2, te, be, le, re, ovr, add=False):
"""
Assign data from a chunk to the full array. The data in overlap regions
will not be assigned to the full array
Parameters
-----------
data : array
Unused array (except for shape) that has size of full tile
arr1 : array
Full size array to which data will be assigned
arr2 : array
Chunk-sized array from which data will be assigned
te : int
Top edge id
be : int
Bottom edge id
le : int
Left edge id
re : int
Right edge id
ovr : int
The number of pixels in the overlap
add : bool, optional
Default False. If true, the data in arr2 will be added to arr1,
otherwise data in arr2 will overwrite data in arr1
"""
if te == 0:
i1 = 0
else:
i1 = ovr
if be == data.shape[0]:
i2 = 0
i2b = None
else:
i2 = -ovr
i2b = -ovr
if le == 0:
j1 = 0
else:
j1 = ovr
if re == data.shape[1]:
j2 = 0
j2b = None
else:
j2 = -ovr
j2b = -ovr
if add:
arr1[te+i1:be+i2, le+j1:re+j2] += arr2[i1:i2b, j1:j2b]
else:
arr1[te+i1:be+i2, le+j1:re+j2] = arr2[i1:i2b, j1:j2b] | Assign data from a chunk to the full array. The data in overlap regions
will not be assigned to the full array
Parameters
-----------
data : array
Unused array (except for shape) that has size of full tile
arr1 : array
Full size array to which data will be assigned
arr2 : array
Chunk-sized array from which data will be assigned
te : int
Top edge id
be : int
Bottom edge id
le : int
Left edge id
re : int
Right edge id
ovr : int
The number of pixels in the overlap
add : bool, optional
Default False. If true, the data in arr2 will be added to arr1,
otherwise data in arr2 will overwrite data in arr1 | entailment |
def calc_slopes_directions(self, plotflag=False):
"""
Calculates the magnitude and direction of slopes and fills
self.mag, self.direction
"""
# TODO minimum filter behavior with nans?
# fill/interpolate flats first
if self.fill_flats:
data = np.ma.filled(self.data.astype('float64'), np.nan)
filled = data.copy()
edge = np.ones_like(data, bool)
edge[1:-1, 1:-1] = False
if self.fill_flats_below_sea: sea_mask = data != 0
else: sea_mask = data > 0
flat = (spndi.minimum_filter(data, (3, 3)) >= data) & sea_mask
flats, n = spndi.label(flat, structure=FLATS_KERNEL3)
objs = spndi.find_objects(flats)
for i, _obj in enumerate(objs):
obj = grow_obj(_obj, data.shape)
self._fill_flat(data[obj], filled[obj], flats[obj]==i+1, edge[obj])
self.data = np.ma.masked_array(filled, mask=np.isnan(filled)).astype(self.data.dtype)
# %% Calculate the slopes and directions based on the 8 sections from
# Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf
if self.data.shape[0] <= self.chunk_size_slp_dir and \
self.data.shape[1] <= self.chunk_size_slp_dir:
print "starting slope/direction calculation"
self.mag, self.direction = self._slopes_directions(
self.data, self.dX, self.dY, 'tarboton')
# Find the flat regions. This is mostly simple (look for mag < 0),
# but the downstream pixel at the edge of a flat will have a
# calcuable angle which will not be accurate. We have to also find
# these edges and set their magnitude to -1 (that is, the flat_id)
self.find_flats()
else:
self.direction = np.full(self.data.shape, FLAT_ID_INT, 'float64')
self.mag = np.full(self.data.shape, FLAT_ID_INT, 'float64')
self.flats = np.zeros(self.data.shape, bool)
top_edge, bottom_edge = \
self._get_chunk_edges(self.data.shape[0],
self.chunk_size_slp_dir,
self.chunk_overlap_slp_dir)
left_edge, right_edge = \
self._get_chunk_edges(self.data.shape[1],
self.chunk_size_slp_dir,
self.chunk_overlap_slp_dir)
ovr = self.chunk_overlap_slp_dir
count = 1
for te, be in zip(top_edge, bottom_edge):
for le, re in zip(left_edge, right_edge):
print "starting slope/direction calculation for chunk", \
count, "[%d:%d, %d:%d]" % (te, be, le, re)
count += 1
mag, direction = \
self._slopes_directions(self.data[te:be, le:re],
self.dX[te:be-1],
self.dY[te:be-1])
flats = self._find_flats_edges(self.data[te:be, le:re],
mag, direction)
direction[flats] = FLAT_ID_INT
mag[flats] = FLAT_ID_INT
self._assign_chunk(self.data, self.mag, mag,
te, be, le, re, ovr)
self._assign_chunk(self.data, self.direction, direction,
te, be, le, re, ovr)
self._assign_chunk(self.data, self.flats, flats,
te, be, le, re, ovr)
if plotflag:
self._plot_debug_slopes_directions()
gc.collect() # Just in case
return self.mag, self.direction | Calculates the magnitude and direction of slopes and fills
self.mag, self.direction | entailment |
def _slopes_directions(self, data, dX, dY, method='tarboton'):
""" Wrapper to pick between various algorithms
"""
# %%
if method == 'tarboton':
return self._tarboton_slopes_directions(data, dX, dY)
elif method == 'central':
return self._central_slopes_directions(data, dX, dY) | Wrapper to pick between various algorithms | entailment |
def _tarboton_slopes_directions(self, data, dX, dY):
"""
Calculate the slopes and directions based on the 8 sections from
Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf
"""
return _tarboton_slopes_directions(data, dX, dY,
self.facets, self.ang_adj) | Calculate the slopes and directions based on the 8 sections from
Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf | entailment |
def _central_slopes_directions(self, data, dX, dY):
"""
Calculates magnitude/direction of slopes using central difference
"""
shp = np.array(data.shape) - 1
direction = np.full(data.shape, FLAT_ID_INT, 'float64')
mag = np.full(direction, FLAT_ID_INT, 'float64')
ind = 0
d1, d2, theta = _get_d1_d2(dX, dY, ind, [0, 1], [1, 1], shp)
s2 = (data[0:-2, 1:-1] - data[2:, 1:-1]) / d2
s1 = -(data[1:-1, 0:-2] - data[1:-1, 2:]) / d1
direction[1:-1, 1:-1] = np.arctan2(s2, s1) + np.pi
mag = np.sqrt(s1**2 + s2**2)
return mag, direction | Calculates magnitude/direction of slopes using central difference | entailment |
def _find_flats_edges(self, data, mag, direction):
"""
Extend flats 1 square downstream
Flats on the downstream side of the flat might find a valid angle,
but that doesn't mean that it's a correct angle. We have to find
these and then set them equal to a flat
"""
i12 = np.arange(data.size).reshape(data.shape)
flat = mag == FLAT_ID_INT
flats, n = spndi.label(flat, structure=FLATS_KERNEL3)
objs = spndi.find_objects(flats)
f = flat.ravel()
d = data.ravel()
for i, _obj in enumerate(objs):
region = flats[_obj] == i+1
I = i12[_obj][region]
J = get_adjacent_index(I, data.shape, data.size)
f[J] = d[J] == d[I[0]]
flat = f.reshape(data.shape)
return flat | Extend flats 1 square downstream
Flats on the downstream side of the flat might find a valid angle,
but that doesn't mean that it's a correct angle. We have to find
these and then set them equal to a flat | entailment |
def calc_uca(self, plotflag=False, edge_init_data=None, uca_init=None):
"""Calculates the upstream contributing area.
Parameters
----------
plotflag : bool, optional
Default False. If true will plot debugging plots. For large files,
this will be very slow
edge_init_data : list, optional
edge_init_data = [uca_data, done_data, todo_data]
uca_data : dict
Dictionary with 'left', 'right', 'top', 'bottom' keys that
gives the arrays filled with uca data on the edge corresponding
to the key
done_data : dict
As uca_data, but bool array indicating if neighboring tiles
have computed a finished value for that edge pixel
todo_data : dict
As uca_data, but bool array indicating if edges on tile still
have to be computed
uca_init : array, optional
Array with pre-computed upstream contributing area
(without edge contributions)
Notes
-------
if edge_init_data is given, then the initialized area will be modified
such that the edges are equal to the edge_init_data.
If uca_init is given, then the interior of the upstream area will not
be calculated. Only the information from the edges will be updated.
Unless the tile is too large so that the calculation is chunked. In
that case, the whole tile is re-computed.
"""
if self.direction is None:
self.calc_slopes_directions()
# Initialize the upstream area
uca_edge_init = np.zeros(self.data.shape, 'float64')
uca_edge_done = np.zeros(self.data.shape, bool)
uca_edge_todo = np.zeros(self.data.shape, bool)
edge_init_done, edge_init_todo = None, None
if edge_init_data is not None:
edge_init_data, edge_init_done, edge_init_todo = edge_init_data
slices = {'left': [slice(None), slice(0, 1)],
'right': [slice(None), slice(-1, None)],
'top': [slice(0, 1), slice(None)],
'bottom': [slice(-1, None), slice(None)]}
for key, val in slices.iteritems():
# To initialize and edge it needs to have data and be finished
uca_edge_done[val] += \
edge_init_done[key].reshape(uca_edge_init[val].shape)
uca_edge_init[val] = \
edge_init_data[key].reshape(uca_edge_init[val].shape)
uca_edge_init[val][~uca_edge_done[val]] = 0
uca_edge_todo[val] += \
edge_init_todo[key].reshape(uca_edge_init[val].shape)
if uca_init is None:
self.uca = np.full(self.data.shape, FLAT_ID_INT, 'float64')
else:
self.uca = uca_init.astype('float64')
if self.data.shape[0] <= self.chunk_size_uca and \
self.data.shape[1] <= self.chunk_size_uca:
if uca_init is None:
print "Starting uca calculation"
res = self._calc_uca_chunk(self.data, self.dX, self.dY,
self.direction, self.mag,
self.flats,
area_edges=uca_edge_init,
plotflag=plotflag)
self.edge_todo = res[1]
self.edge_done = res[2]
self.uca = res[0]
else:
print "Starting edge resolution round: ",
# last return value will be None: edge_
area, e2doi, edone, _ = \
self._calc_uca_chunk_update(self.data, self.dX, self.dY,
self.direction, self.mag,
self.flats,
area_edges=uca_edge_init,
edge_todo=uca_edge_todo,
edge_done=uca_edge_done)
self.uca += area
self.edge_todo = e2doi
self.edge_done = edone
else:
top_edge, bottom_edge = \
self._get_chunk_edges(self.data.shape[0], self.chunk_size_uca,
self.chunk_overlap_uca)
left_edge, right_edge = \
self._get_chunk_edges(self.data.shape[1], self.chunk_size_uca,
self.chunk_overlap_uca)
ovr = self.chunk_overlap_uca
# Initialize the edge_todo and done arrays
edge_todo = np.zeros(self.data.shape, bool)
edge_todo_tile = np.zeros(self.data.shape, bool)
edge_not_done_tile = np.zeros(self.data.shape, bool)
edge_done = np.zeros(self.data.shape, bool)
tile_edge = TileEdge(top_edge, bottom_edge, left_edge,
right_edge, ovr,
self.elev.grid_coordinates.x_axis,
self.elev.grid_coordinates.y_axis, self.data)
count = 1
# Mask out the edges because we're just trying to resolve the
# internal edge conflicts
self.data.mask[:, 0] = True
self.data.mask[:, -1] = True
self.data.mask[0, :] = True
self.data.mask[-1, :] = True
# if 1: # uca_init == None:
print "Starting uca calculation for chunk: ",
# %%
for te, be in zip(top_edge, bottom_edge):
for le, re in zip(left_edge, right_edge):
print count, "[%d:%d, %d:%d]" % (te, be, le, re),
count += 1
area, e2doi, edone, e2doi_no_mask, e2o_no_mask = \
self._calc_uca_chunk(self.data[te:be, le:re],
self.dX[te:be-1],
self.dY[te:be-1],
self.direction[te:be, le:re],
self.mag[te:be, le:re],
self.flats[te:be, le:re],
area_edges=uca_edge_init[te:be, le:re],
plotflag=plotflag,
edge_todo_i_no_mask=uca_edge_todo[te:be, le:re])
self._assign_chunk(self.data, self.uca, area,
te, be, le, re, ovr)
edge_todo[te:be, le:re] += e2doi
edge_not_done_tile[te:be, le:re] += e2o_no_mask
# if this tile is on the edge of the domain, we actually
# want to keep the edge information
# UPDATE: I don't think we actually need this here as it
# will be handled by chunk update ???
self._assign_chunk(self.data, edge_todo_tile, e2doi_no_mask,
te, be, le, re, ovr)
# if te == top_edge[0] or be == bottom_edge[-1] \
# or le == left_edge[0] or re == right_edge[-1]:
# edge_todo_tile[te:be, le:re] = e2doi
self._assign_chunk(self.data, edge_done, edone,
te, be, le, re, ovr)
tile_edge.set_all_neighbors_data(self.uca,
edge_done,
(te, be, le, re))
tile_edge.set_sides((te, be, le, re), e2doi, 'todo',
local=True)
# %%
print '..Done'
# This needs to be much more sophisticated because we have to
# follow the tile's edge value through the interior.
# Since we have to do that anyway, we might as well recompute
# the UCA from scratch. So the above branch does that. The branch
# below would be more efficient if we can get it working.
if 0: # else:
# need to populate tile_edge somehow
edge_todo_tile = uca_edge_todo & ~uca_edge_done
edge_not_done_tile = edge_todo_tile.copy()
for te, be in zip(top_edge, bottom_edge):
for le, re in zip(
[left_edge[0], left_edge[-1]],
[right_edge[0], right_edge[-1]]):
e2doi = uca_edge_todo[te:be, le:re]
tiledata = uca_edge_init[te:be, le:re]
tile_edge.set_sides((te, be, le, re), tiledata, 'data',
local=True)
tiledone = uca_edge_done[te:be, le:re]
tile_edge.set_sides((te, be, le, re), tiledone, 'done',
local=True)
for te, be in zip([top_edge[0], top_edge[-1]],
[bottom_edge[0], bottom_edge[-1]]):
for le, re in zip(left_edge, right_edge):
e2doi = uca_edge_todo[te:be, le:re]
tiledata = uca_edge_init[te:be, le:re]
tile_edge.set_sides((te, be, le, re), tiledata, 'data',
local=True)
tiledone = uca_edge_done[te:be, le:re]
tile_edge.set_sides((te, be, le, re), tiledone, 'done',
local=True)
if not self.resolve_edges:
# This branch is probably horribly broken (but it might have
# always been that way)
self.tile_edge = tile_edge
self.edge_todo = edge_todo
self.edge_done = edge_done
return self.uca
# ## RESOLVING EDGES ## #
# Get a good starting tile for the iteration
i = tile_edge.find_best_candidate()
tile_edge.fix_shapes()
# dbug = np.zeros_like(self.uca)
print "Starting edge resolution round: ",
count = 0
i_old = -1
while i is not None and i != i_old:
count += 1
print count, '(%d) .' % i,
# %%
te, be, le, re = tile_edge.coords[i]
data, dX, dY, direction, mag, flats = \
[self.data[te:be, le:re],
self.dX[te:be-1], self.dY[te:be-1],
self.direction[te:be, le:re],
self.mag[te:be, le:re], self.flats[te:be, le:re]]
area, e2doi, edone, e2doi_tile = self._calc_uca_chunk_update(
data, dX, dY, direction, mag, flats, tile_edge, i,
edge_todo=edge_not_done_tile[te:be, le:re])
self._assign_chunk(self.data, self.uca, area,
te, be, le, re, ovr, add=True)
self._assign_chunk(self.data, edge_done, edone,
te, be, le, re, ovr)
tile_edge.set_all_neighbors_data(self.uca,
edge_done, (te, be, le, re))
try:
edge_not_done_tile[te:be, le:re] += e2doi_tile
except:
import ipdb; ipdb.set_trace() # BREAKPOINT
tile_edge.set_sides((te, be, le, re), e2doi, 'todo',
local=True)
i_old = i
i = tile_edge.find_best_candidate()
# Debugging plots below. Feel free to uncomment for debugging
# def drawgrid():
# ax = gca();
# ax.set_xticks(np.linspace(-0.5, 63.5, 9))
# ax.set_yticks(np.linspace(-0.5, 63.5, 9))
# grid(lw=2, ls='-', c=(0.5, 0.5, 0.5))
# figure(1);clf();imshow((self.uca), interpolation='none');colorbar(); title("uca" + str(i_old) + " " + str(i));drawgrid()
# figure(2);clf();imshow(area, interpolation='none');colorbar(); title("local area" + str(i_old) + " " + str(i))
## edge_todo[:] = 0
## edge_todo = tile_edge.fill_array(edge_todo, 'todo', add=True)
# figure(3);clf();imshow(edge_todo*1.0 + edge_done*2.0, interpolation='none');colorbar(); title("todo" + str(i_old) + " " + str(i));clim(0, 3)
# edge_todo[:] = 0
# edge_todo = tile_edge.fill_array(edge_todo, 'coulddo', add=True)
# figure(3);clf();imshow(edge_todo*1.0 + edge_done*2.0, interpolation='none');colorbar(); title("todo" + str(i_old) + " " + str(i));clim(0, 3);drawgrid()
# figure(4);clf();imshow(tile_edge.percent_done, interpolation='none');colorbar(); title("percent done" + str(i_old) + " " + str(i));clim(0, 1)
# dbug[:] = 0
# dbug = tile_edge.fill_array(dbug, 'coulddo', maximize=False)
# dbug[dbug > 0] -= (self.uca - ref_area)[dbug > 0]
# figure(5);clf();imshow(dbug, interpolation='none');colorbar(); title("data diff" + str(i_old) + " " + str(i));drawgrid()
# dbug = (self.uca - area1)
# figure(6);clf();imshow(np.log10(np.abs(dbug)), interpolation='none');colorbar(); title("uca diff" + str(i_old) + " " + str(i));drawgrid()
# %%
self.tile_edge = tile_edge
self.edge_todo = edge_todo_tile
self.edge_done = ~edge_not_done_tile
print '..Done'
# Fix the very last pixel on the edges
self.fix_edge_pixels(edge_init_data, edge_init_done, edge_init_todo)
gc.collect() # Just in case
return self.uca | Calculates the upstream contributing area.
Parameters
----------
plotflag : bool, optional
Default False. If true will plot debugging plots. For large files,
this will be very slow
edge_init_data : list, optional
edge_init_data = [uca_data, done_data, todo_data]
uca_data : dict
Dictionary with 'left', 'right', 'top', 'bottom' keys that
gives the arrays filled with uca data on the edge corresponding
to the key
done_data : dict
As uca_data, but bool array indicating if neighboring tiles
have computed a finished value for that edge pixel
todo_data : dict
As uca_data, but bool array indicating if edges on tile still
have to be computed
uca_init : array, optional
Array with pre-computed upstream contributing area
(without edge contributions)
Notes
-------
if edge_init_data is given, then the initialized area will be modified
such that the edges are equal to the edge_init_data.
If uca_init is given, then the interior of the upstream area will not
be calculated. Only the information from the edges will be updated.
Unless the tile is too large so that the calculation is chunked. In
that case, the whole tile is re-computed. | entailment |
def fix_edge_pixels(self, edge_init_data, edge_init_done, edge_init_todo):
"""
This function fixes the pixels on the very edge of the tile.
Drainage is calculated if the edge is downstream from the interior.
If there is data available on the edge (from edge_init_data, for eg)
then this data is used.
This is a bit of hack to take care of the edge-values. It could
possibly be handled through the main algorithm, but at least here
the treatment is explicit.
"""
data, dX, dY, direction, flats = \
self.data, self.dX, self.dY, self.direction, self.flats
sides = ['left', 'right', 'top', 'bottom']
slices_o = [[slice(None), slice(1, 2)], [slice(None), slice(-2, -1)],
[slice(1, 2), slice(None)], [slice(-2, -1), slice(None)]]
slices_d = [[slice(None), slice(0, 1)], [slice(None), slice(-1, None)],
[slice(0, 1), slice(None)], [slice(-1, None), slice(None)]]
# The first set of edges will have contributions from two nodes whereas
# the second set of edges will only have contributinos from one node
indices = {'left': [[3, 4], [2, 5]], 'right': [[0, 7], [1, 6]],
'top': [[1, 2], [0, 3]], 'bottom': [[5, 6], [4, 7]]}
# Figure out which section the drainage goes towards, and what
# proportion goes to the straight-sided (as opposed to diagonal) node.
for side, slice_o, slice_d in zip(sides, slices_o, slices_d):
section, proportion = \
self._calc_uca_section_proportion(data[slice_o],
dX[slice_o[0]],
dY[slice_o[0]],
direction[slice_o],
flats[slice_o])
# self-initialize:
if side in ['left', 'right']:
self.uca[slice_d] = \
np.concatenate(([dX[slice_d[0]][0] * dY[slice_d[0]][0]],
dX[slice_d[0]] * dY[slice_d[0]]))\
.reshape(self.uca[slice_d].shape)
else:
self.uca[slice_d] = dX[slice_d[0]][0] * dY[slice_d[0]][0]
for e in range(2):
for i in indices[side][e]:
ed = self.facets[i][2]
ids = section == i
if e == 0:
self.uca[slice_d][ids] += self.uca[slice_o][ids] \
* proportion[ids]
self.uca[slice_d][ids] += \
np.roll(np.roll(self.uca[slice_o] * (1 - proportion),
ed[0], 0),
ed[1], 1)[ids]
if e == 1:
self.uca[slice_d][ids] += \
np.roll(np.roll(self.uca[slice_o] * (proportion),
ed[0], 0),
ed[1], 1)[ids]
# Finally, add the edge data from adjacent tiles
if edge_init_done is not None:
ids = edge_init_done[side] # > 0
if side in ['left', 'right']:
self.uca[slice_d][ids, :] = \
edge_init_data[side][ids][:, None]
else:
self.uca[slice_d][:, ids] = edge_init_data[side][ids] | This function fixes the pixels on the very edge of the tile.
Drainage is calculated if the edge is downstream from the interior.
If there is data available on the edge (from edge_init_data, for eg)
then this data is used.
This is a bit of hack to take care of the edge-values. It could
possibly be handled through the main algorithm, but at least here
the treatment is explicit. | entailment |
def _calc_uca_chunk_update(self, data, dX, dY, direction, mag, flats,
tile_edge=None, i=None,
area_edges=None, edge_todo=None, edge_done=None,
plotflag=False):
"""
Calculates the upstream contributing area due to contributions from
the edges only.
"""
# %%
sides = ['left', 'right', 'top', 'bottom']
slices = [[slice(None), slice(0, 1)], [slice(None), slice(-1, None)],
[slice(0, 1), slice(None)], [slice(-1, None), slice(None)]]
# Figure out which section the drainage goes towards, and what
# proportion goes to the straight-sided (as opposed to diagonal) node.
section, proportion = self._calc_uca_section_proportion(
data, dX, dY, direction, flats)
# Build the drainage or adjacency matrix
A = self._mk_adjacency_matrix(section, proportion, flats, data, mag, dX, dY)
if CYTHON:
B = A
C = A.tocsr()
if not CYTHON:
A = A.tocoo()
ids = np.zeros(data.shape, bool)
area = np.zeros(data.shape, 'float64')
# Set the ids to the edges that are now done, and initialize the
# edge area
if tile_edge is not None:
if edge_todo is not None:
edge_todo_tile = edge_todo
else:
edge_todo_tile = None
edge_todo = np.zeros(data.shape, bool)
for side, slice0 in zip(sides, slices):
edge = getattr(tile_edge, side).ravel()[i]
ids[slice0] = edge.done & edge.coulddo
# only add area from the finished edges
area[slice0] = edge.data * edge.done * edge.coulddo
edge_todo[slice0] = edge.todo & ~edge.done
elif area_edges is not None and edge_todo is not None \
and edge_done is not None:
area[:, 0] = area_edges[:, 0]
area[:, -1] = area_edges[:, -1]
area[-1, :] = area_edges[-1, :]
area[0, :] = area_edges[0, :]
# Initialize starting ids
ids = edge_done & edge_todo
edge_todo = edge_todo & ~edge_done
edge_todo_tile = None
else:
raise RuntimeError("Need to specify either tile_edge or area_edges"
"in _calc_uca_chunk_update")
ids = ids.ravel()
ids0 = ids.copy()
area[flats] = np.nan
edge_done = ~edge_todo
edge_todo_i = edge_todo.copy()
ids_old = np.zeros_like(ids)
# I need this to keep track of when I have to add the area, and when
# I have to replace the area.
ids_i = np.arange(ids.size)
done = np.ones(data.shape, bool)
done.ravel()[ids] = False
# Now we have to advance done through the mesh to figure out which
# contributions matter (i.e. what's done already)
def drain_pixels_done(ids, arr, rows_A, cols_A):
ids_old = ids.copy()
ids_old[:] = False
# If I use ids.sum() > 0 then I might get stuck in circular
# references.
while (ids - ids_old).sum() > 0:
# %%
print "x",
ids_old = ids.copy()
ids_todo = ids_i[ids.ravel()]
ids[:] = False
for id_todo in ids_todo:
rows = cols_A == id_todo
rows_id = rows_A[rows]
ids[rows_id] += arr.ravel()[rows_id] is True
arr.ravel()[rows_id] = False # Set second arrival new id
return arr
if CYTHON:
a = cyutils.drain_connections(
done.ravel(), ids, B.indptr, B.indices, set_to=False)
done = a.reshape(done.shape).astype(bool)
else:
done = drain_pixels_done(ids, done, A.row, A.col)
done[data.mask] = True # deal with no-data values
#
ids = ids0.copy()
# Set all the edges to "done" for ids0. This ensures that no edges
# will ever be updated, whether they are done or not.
ids0 = ids0.reshape(data.shape)
ids0[:, 0] = True
ids0[:, -1] = True
ids0[0, :] = True
ids0[-1, :] = True
ids0 = ids0.ravel()
ids_old[:] = 0
# %%
def drain_area(ids, area, done, rows_A, cols_A, data_A,
edge_todo_tile):
ids_old = ids.copy()
ids_old[:] = False
# If I use ids.sum() > 0 then I might get stuck in
# circular references.
while (ids - ids_old).sum() > 0:
# %%
print "o",
ids_old = ids.copy()
done.ravel()[ids] = True
ids_todo = ids_i[ids.ravel()]
ids[:] = False
for id_todo in ids_todo:
rows = cols_A == id_todo
rows_id = rows_A[rows]
factor = data_A[rows]
# not allowed to modify edge values
edge_filter_ids = ~ids0[rows_id]
factor = factor[edge_filter_ids]
rows_id = rows_id[edge_filter_ids]
area.ravel()[rows_id] += area.ravel()[id_todo] * factor
if edge_todo_tile is not None:
edge_todo_tile.ravel()[rows_id] += \
edge_todo_tile.ravel()[id_todo] * factor
# Figure out of this cell that just received a contribution
# should give its contribution to what's next... i.e. make
# sure all inputs have been added together
for row_id in rows_id: # this is the 'waiting' part
cols = cols_A[rows_A == row_id]
ids[row_id] += (~(done.ravel()[cols])).sum() == 0
# for col in cols:
# print 'row', row_id, 'col', col, 'done', done.ravel()[col]
# Follow the drainage along. New candidates are cells that
# just changed
#ids = (track_id_old.ravel() == -1) \
# & (track_id_old.ravel() != track_id.ravel())
# done.ravel()[ids] = True
# figure(7);clf();imshow(ids.reshape(mag.shape), interpolation='none')
# figure(8);clf();imshow(area, interpolation='none');colorbar()
# figure(9);clf();imshow(done, interpolation='none');colorbar()
# figure(10);clf();imshow(a + area - b, interpolation='none');colorbar()
#%%
# self._plot_connectivity(A, data=data)
return area, done, edge_todo_tile
if CYTHON:
if edge_todo_tile is not None:
a, b, c, d = cyutils.drain_area(area.ravel(),
done.ravel(),
ids,
B.indptr, B.indices, B.data,
C.indptr, C.indices,
area.shape[0], area.shape[1],
edge_todo_tile.astype('float64').ravel(),
skip_edge=True)
edge_todo_tile = c.reshape(edge_todo_tile.shape)
else:
a, b, c, d = cyutils.drain_area(area.ravel(),
done.ravel(),
ids,
B.indptr, B.indices, B.data,
C.indptr, C.indices,
area.shape[0], area.shape[1],
skip_edge=True)
area = a.reshape(area.shape)
done = b.reshape(done.shape)
else:
area, done, edge_todo_tile = \
drain_area(ids, area, done, A.row, A.col, A.data,
edge_todo_tile)
# Rather unfortunately, we still have to follow through the boolean
# edge_todo matrix...
ids = edge_todo.copy().ravel()
# %%
def drain_pixels_todo(ids, arr, rows_A, cols_A):
ids_old = ids.copy()
ids_old[:] = False
# If I use ids.sum() > 0 then I might get stuck in
# circular references.
while (ids - ids_old).sum() > 0:
# %%
print "x",
ids_old = ids.copy()
# edge_todo_old = arr.copy()
ids_todo = ids_i[ids.ravel()]
ids[:] = False
for id_todo in ids_todo:
rows = cols_A == id_todo
rows_id = rows_A[rows]
ids[rows_id] += arr.ravel()[rows_id] == False
arr.ravel()[rows_id] = True # Set new id of second arrival
# #Follow the drainage along. New candidates are cells that just changed
# ids = (edge_todo_old.ravel() != arr.ravel())
return arr
if CYTHON:
a = cyutils.drain_connections(edge_todo.ravel(),
ids, B.indptr, B.indices,
set_to=True)
edge_todo = a.reshape(edge_todo.shape).astype(bool)
else:
edge_todo = drain_pixels_todo(ids, edge_todo, A.row, A.col)
area[flats] = np.nan
edge_done = ~edge_todo
return area, edge_todo_i, edge_done, edge_todo_tile | Calculates the upstream contributing area due to contributions from
the edges only. | entailment |
def _calc_uca_chunk(self, data, dX, dY, direction, mag, flats,
area_edges, plotflag=False, edge_todo_i_no_mask=True):
"""
Calculates the upstream contributing area for the interior, and
includes edge contributions if they are provided through area_edges.
"""
# %%
# Figure out which section the drainage goes towards, and what
# proportion goes to the straight-sided (as opposed to diagonal) node.
section, proportion = self._calc_uca_section_proportion(
data, dX, dY, direction, flats)
# Build the drainage or adjacency matrix
A = self._mk_adjacency_matrix(section, proportion, flats, data, mag, dX, dY)
if CYTHON:
B = A.tocsr()
colsum = np.array(A.sum(1)).ravel()
ids = colsum == 0 # If no one drains into me
area = (dX * dY)
# Record minimum area
min_area = np.nanmin(area)
self.twi_min_area = min(self.twi_min_area, min_area)
area = np.concatenate((area[0:1], area)).reshape(area.size+1, 1)
area = area.repeat(data.shape[1], 1)
# Set the edge areas to zero, will add those contributions later
area[:, 0] = area_edges[:, 0]
area[:, -1] = area_edges[:, -1]
area[-1, :] = area_edges[-1, :]
area[0, :] = area_edges[0, :]
# These edges are done, they have been drained already
ids[area_edges.ravel() > 0] = True
done = np.zeros(data.shape, bool)
done.ravel()[ids] = True
# deal with no-data values
done[1:-1, 1:-1] = done[1:-1, 1:-1] | data.mask[1:-1, 1:-1]
# Check the inlet edges
edge_todo = np.zeros_like(done)
ids_ed = np.arange(data.size).reshape(data.shape)
# left
edge_todo[:, 0] = (A[:, ids_ed[:, 0]].sum(0) > 0) \
& (area_edges[:, 0] == 0)
edge_todo[:, -1] = (A[:, ids_ed[:, -1]].sum(0) > 0) \
& (area_edges[:, -1] == 0)
edge_todo[0, :] = (A[:, ids_ed[0, :]].sum(0) > 0) \
& (area_edges[0, :] == 0)
edge_todo[-1, :] = (A[:, ids_ed[-1, :]].sum(0) > 0) \
& (area_edges[-1, :] == 0)
# Will do the tile-level doneness
edge_todo_i_no_mask = edge_todo.copy() & edge_todo_i_no_mask
edge_todo_no_mask = edge_todo_i_no_mask.copy() # tile-level doneness
edge_todo[data.mask] = False # Don't do masked areas
# Initialize done edges
edge_todo_i = edge_todo.copy()
ids_old = np.zeros_like(ids)
# %%
count = 1
if CYTHON:
area_ = area.ravel()
done_ = done.ravel()
edge_todo_ = edge_todo.astype('float64').ravel()
edge_todo_no_mask_ = edge_todo_no_mask.astype('float64').ravel()
data_ = data.ravel()
while (np.any(~done) and count < self.circular_ref_maxcount):
print ".",
count += 1
if CYTHON:
area_, done_, edge_todo_, edge_todo_no_mask_ = cyutils.drain_area(area_,
done_, ids,
A.indptr, A.indices, A.data, B.indptr, B.indices,
area.shape[0], area.shape[1],
edge_todo_, edge_todo_no_mask_)
else:
# If I use ids.sum() > 0 then I might get stuck in
# circular references.
while (ids - ids_old).sum() > 0:
# %%
ids_old = ids.copy()
ids, area, done, edge_todo = \
self._drain_step(A, ids, area, done, edge_todo)
# figure(1);clf();imshow(area, interpolation='none');colorbar()
# figure(2);clf();imshow(ids.reshape(area.shape), interpolation='none');colorbar()
# figure(3);clf();imshow(done, interpolation='none');colorbar()
done_ = done.ravel()
#%%
ids[:] = False
max_elev = (data_ * (~done_)).max()
ids[((data_ * (~done_) - max_elev) / max_elev > -0.01)] = True
if CYTHON:
area = area_.reshape(area.shape)
done = done_.reshape(done.shape)
edge_todo = edge_todo_.reshape(edge_todo.shape).astype(bool)
edge_todo_no_mask = edge_todo_no_mask_.reshape(edge_todo_no_mask.shape).astype(bool)
area[flats] = np.nan
edge_done = ~edge_todo
edge_done[data.mask] = True # Don't do masked areas
if self.apply_uca_limit_edges:
# 2x because of bifurcations (maybe should be more than 2x, but
# should be ok
edge_done[area > self.uca_saturation_limit * 2 * min_area] = True
# %%
if plotflag:
# TODO DTYPE
self._plot_connectivity(A, (done.astype('float64') is False)
+ flats.astype('float64') * 2, [0, 3])
return area, edge_todo_i, edge_done, edge_todo_i_no_mask, edge_todo_no_mask | Calculates the upstream contributing area for the interior, and
includes edge contributions if they are provided through area_edges. | entailment |
def _drain_step(self, A, ids, area, done, edge_todo):
"""
Does a single step of the upstream contributing area calculation.
Here the pixels in ids are drained downstream, the areas are updated
and the next set of pixels to drain are determined for the next round.
"""
# Only drain to cells that have a contribution
A_todo = A[:, ids.ravel()]
colsum = np.array(A_todo.sum(1)).ravel()
# Only touch cells that actually receive a contribution
# during this stage
ids_new = colsum != 0
# Is it possible that I may drain twice from my own cell?
# -- No, I don't think so...
# Is it possible that other cells may drain into me in
# multiple iterations -- yes
# Then say I check for when I'm done ensures that I don't drain until
# everyone has drained into me
area.ravel()[ids_new] += (A_todo[ids_new, :]
* (area.ravel()[ids].ravel()))
edge_todo.ravel()[ids_new] += (A_todo[ids_new, :]
* (edge_todo.ravel()[ids].ravel()))
# Figure out what's left to do.
done.ravel()[ids] = True
colsum = A * (~done.ravel())
ids = colsum == 0
# Figure out the new-undrained ids
ids = ids & (~done.ravel())
return ids, area, done, edge_todo | Does a single step of the upstream contributing area calculation.
Here the pixels in ids are drained downstream, the areas are updated
and the next set of pixels to drain are determined for the next round. | entailment |
def _calc_uca_section_proportion(self, data, dX, dY, direction, flats):
"""
Given the direction, figure out which nodes the drainage will go
toward, and what proportion of the drainage goes to which node
"""
shp = np.array(data.shape) - 1
facets = self.facets
adjust = self.ang_adj[:, 1]
d1, d2, theta = _get_d1_d2(dX, dY, 0, facets[0][1], facets[0][2], shp)
if dX.size > 1:
theta = np.row_stack((theta[0, :], theta, theta[-1, :]))
# Which quadrant am I in?
section = ((direction / np.pi * 2.0) // 1).astype('int8') # TODO DTYPE
# Gets me in the quadrant
quadrant = (direction - np.pi / 2.0 * section)
proportion = np.full_like(quadrant, np.nan)
# Now which section within the quadrant
section = section * 2 \
+ (quadrant > theta.repeat(data.shape[1], 1)) * (section % 2 == 0) \
+ (quadrant > (np.pi/2 - theta.repeat(data.shape[1], 1))) \
* (section % 2 == 1) # greater than because of ties resolution b4
# %% Calculate proportion
# As a side note, it's crazy to me how:
# _get_d1_d2 needs to use indices 0, 3, 4, 7,
# section uses even/odd (i.e. % 2)
# proportion uses indices (0, 1, 4, 5) {ALl of them different! ARG!}
I1 = (section == 0) | (section == 1) | (section == 4) | (section == 5)
# I1 = section % 2 == 0
I = I1 & (quadrant <= theta.repeat(data.shape[1], 1))
proportion[I] = quadrant[I] / theta.repeat(data.shape[1], 1)[I]
I = I1 & (quadrant > theta.repeat(data.shape[1], 1))
proportion[I] = (quadrant[I] - theta.repeat(data.shape[1], 1)[I]) \
/ (np.pi / 2 - theta.repeat(data.shape[1], 1)[I])
I = (~I1) & (quadrant <= (np.pi / 2 - theta.repeat(data.shape[1], 1)))
proportion[I] = (quadrant[I]) \
/ (np.pi / 2 - theta.repeat(data.shape[1], 1)[I])
I = (~I1) & (quadrant > (np.pi / 2 - theta.repeat(data.shape[1], 1)))
proportion[I] = (quadrant[I] - (np.pi / 2 - theta.repeat(data.shape[1], 1)[I])) \
/ (theta.repeat(data.shape[1], 1)[I])
# %%Finish Proportion Calculation
section[flats] = FLAT_ID_INT
proportion[flats] = FLAT_ID
section[section == 8] = 0 # Fence-post error correction
proportion = (1 + adjust[section]) / 2.0 - adjust[section] * proportion
return section, proportion | Given the direction, figure out which nodes the drainage will go
toward, and what proportion of the drainage goes to which node | entailment |
def _mk_adjacency_matrix(self, section, proportion, flats, elev, mag, dX, dY):
"""
Calculates the adjacency of connectivity matrix. This matrix tells
which pixels drain to which.
For example, the pixel i, will recieve area from np.nonzero(A[i, :])
at the proportions given in A[i, :]. So, the row gives the pixel
drain to, and the columns the pixels drained from.
"""
shp = section.shape
mat_data = np.row_stack((proportion, 1 - proportion))
NN = np.prod(shp)
i12 = np.arange(NN).reshape(shp)
j1 = - np.ones_like(i12)
j2 = - np.ones_like(i12)
# make the connectivity for the non-flats/pits
j1, j2 = self._mk_connectivity(section, i12, j1, j2)
j = np.row_stack((j1, j2))
i = np.row_stack((i12, i12))
# connectivity for flats/pits
if self.drain_pits:
pit_i, pit_j, pit_prop, flats, mag = \
self._mk_connectivity_pits(i12, flats, elev, mag, dX, dY)
j = np.concatenate([j.ravel(), pit_j]).astype('int64')
i = np.concatenate([i.ravel(), pit_i]).astype('int64')
mat_data = np.concatenate([mat_data.ravel(), pit_prop])
elif self.drain_flats:
j1, j2, mat_data, flat_i, flat_j, flat_prop = \
self._mk_connectivity_flats(
i12, j1, j2, mat_data, flats, elev, mag)
j = np.concatenate([j.ravel(), flat_j]).astype('int64')
i = np.concatenate([i.ravel(), flat_j]).astype('int64')
mat_data = np.concatenate([mat_data.ravel(), flat_prop])
# This prevents no-data values, remove connections when not present,
# and makes sure that floating point precision errors do not
# create circular references where a lower elevation cell drains
# to a higher elevation cell
I = ~np.isnan(mat_data) & (j != -1) & (mat_data > 1e-8) \
& (elev.ravel()[j] <= elev.ravel()[i])
mat_data = mat_data[I]
j = j[I]
i = i[I]
# %%Make the matrix and initialize
# What is A? The row i area receives area contributions from the
# entries in its columns. If all the entries in my columns have
# drained, then I can drain.
A = sps.csc_matrix((mat_data.ravel(),
np.row_stack((j.ravel(), i.ravel()))),
shape=(NN, NN))
normalize = np.array(A.sum(0) + 1e-16).squeeze()
A = np.dot(A, sps.diags(1/normalize, 0))
return A | Calculates the adjacency of connectivity matrix. This matrix tells
which pixels drain to which.
For example, the pixel i, will recieve area from np.nonzero(A[i, :])
at the proportions given in A[i, :]. So, the row gives the pixel
drain to, and the columns the pixels drained from. | entailment |
def _mk_connectivity(self, section, i12, j1, j2):
"""
Helper function for _mk_adjacency_matrix. Calculates the drainage
neighbors and proportions based on the direction. This deals with
non-flat regions in the image. In this case, each pixel can only
drain to either 1 or two neighbors.
"""
shp = np.array(section.shape) - 1
facets = self.facets
for ii, facet in enumerate(facets):
e1 = facet[1]
e2 = facet[2]
I = section[1:-1, 1:-1] == ii
j1[1:-1, 1:-1][I] = i12[1 + e1[0]:shp[0] + e1[0],
1 + e1[1]:shp[1] + e1[1]][I]
j2[1:-1, 1:-1][I] = i12[1 + e2[0]:shp[0] + e2[0],
1 + e2[1]:shp[1] + e2[1]][I]
# Now do the edges
# left edge
slc0 = [slice(1, -1), slice(0, 1)]
for ind in [0, 1, 6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
I = section[slc0] == ind
j1[slc0][I] = i12[1 + e1[0]:shp[0] + e1[0], e1[1]][I.ravel()]
j2[slc0][I] = i12[1 + e2[0]:shp[0] + e2[0], e2[1]][I.ravel()]
# right edge
slc0 = [slice(1, -1), slice(-1, None)]
for ind in [2, 3, 4, 5]:
e1 = facets[ind][1]
e2 = facets[ind][2]
I = section[slc0] == ind
j1[slc0][I] = i12[1 + e1[0]:shp[0] + e1[0],
shp[1] + e1[1]][I.ravel()]
j2[slc0][I] = i12[1 + e2[0]:shp[0] + e2[0],
shp[1] + e2[1]][I.ravel()]
# top edge
slc0 = [slice(0, 1), slice(1, -1)]
for ind in [4, 5, 6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
I = section[slc0] == ind
j1[slc0][I] = i12[e1[0], 1 + e1[1]:shp[1] + e1[1]][I.ravel()]
j2[slc0][I] = i12[e2[0], 1 + e2[1]:shp[1] + e2[1]][I.ravel()]
# bottom edge
slc0 = [slice(-1, None), slice(1, -1)]
for ind in [0, 1, 2, 3]:
e1 = facets[ind][1]
e2 = facets[ind][2]
I = section[slc0] == ind
j1[slc0][I] = i12[shp[0] + e1[0],
1 + e1[1]:shp[1] + e1[1]][I.ravel()]
j2[slc0][I] = i12[shp[0] + e2[0],
1 + e2[1]:shp[1] + e2[1]][I.ravel()]
# top-left corner
slc0 = [slice(0, 1), slice(0, 1)]
for ind in [6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
if section[slc0] == ind:
j1[slc0] = i12[e1[0], e1[1]]
j2[slc0] = i12[e2[0], e2[1]]
# top-right corner
slc0 = [slice(0, 1), slice(-1, None)]
for ind in [4, 5]:
e1 = facets[ind][1]
e2 = facets[ind][2]
if section[slc0] == ind:
j1[slc0] = i12[e1[0], shp[1] + e1[1]]
j2[slc0] = i12[e2[0], shp[1] + e2[1]]
# bottom-left corner
slc0 = [slice(-1, None), slice(0, 1)]
for ind in [0, 1]:
e1 = facets[ind][1]
e2 = facets[ind][2]
if section[slc0] == ind:
j1[slc0] = i12[shp[0] + e1[0], e1[1]]
j2[slc0] = i12[shp[0] + e2[0], e2[1]]
# bottom-right corner
slc0 = [slice(-1, None), slice(-1, None)]
for ind in [2, 3]:
e1 = facets[ind][1]
e2 = facets[ind][2]
if section[slc0] == ind:
j1[slc0] = i12[e1[0] + shp[0], shp[1] + e1[1]]
j2[slc0] = i12[e2[0] + shp[0], shp[1] + e2[1]]
return j1, j2 | Helper function for _mk_adjacency_matrix. Calculates the drainage
neighbors and proportions based on the direction. This deals with
non-flat regions in the image. In this case, each pixel can only
drain to either 1 or two neighbors. | entailment |
def _mk_connectivity_pits(self, i12, flats, elev, mag, dX, dY):
"""
Helper function for _mk_adjacency_matrix. This is a more general
version of _mk_adjacency_flats which drains pits and flats to nearby
but non-adjacent pixels. The slope magnitude (and flats mask) is
updated for these pits and flats so that the TWI can be computed.
"""
e = elev.data.ravel()
pit_i = []
pit_j = []
pit_prop = []
warn_pits = []
pits = i12[flats & (elev > 0)]
I = np.argsort(e[pits])
for pit in pits[I]:
# find drains
pit_area = np.array([pit], 'int64')
drain = None
epit = e[pit]
for it in range(self.drain_pits_max_iter):
border = get_border_index(pit_area, elev.shape, elev.size)
eborder = e[border]
emin = eborder.min()
if emin < epit:
drain = border[eborder < epit]
break
pit_area = np.concatenate([pit_area, border[eborder == emin]])
if drain is None:
warn_pits.append(pit)
continue
ipit, jpit = np.unravel_index(pit, elev.shape)
Idrain, Jdrain = np.unravel_index(drain, elev.shape)
# filter by drain distance in coordinate space
if self.drain_pits_max_dist:
dij = np.sqrt((ipit - Idrain)**2 + (jpit-Jdrain)**2)
b = dij <= self.drain_pits_max_dist
if not b.any():
warn_pits.append(pit)
continue
drain = drain[b]
Idrain = Idrain[b]
Jdrain = Jdrain[b]
# calculate real distances
dx = [_get_dX_mean(dX, ipit, idrain) * (jpit - jdrain)
for idrain, jdrain in zip(Idrain, Jdrain)]
dy = [dY[make_slice(ipit, idrain)].sum() for idrain in Idrain]
dxy = np.sqrt(np.array(dx)**2 + np.array(dy)**2)
# filter by drain distance in real space
if self.drain_pits_max_dist_XY:
b = dxy <= self.drain_pits_max_dist_XY
if not b.any():
warn_pits.append(pit)
continue
drain = drain[b]
dxy = dxy[b]
# calculate magnitudes
s = (e[pit]-e[drain]) / dxy
# connectivity info
# TODO proportion calculation (_mk_connectivity_flats used elev?)
pit_i += [pit for i in drain]
pit_j += drain.tolist()
pit_prop += s.tolist()
# update pit magnitude and flats mask
mag[ipit, jpit] = np.mean(s)
flats[ipit, jpit] = False
if warn_pits:
warnings.warn("Warning %d pits had no place to drain to in this "
"chunk" % len(warn_pits))
# Note: returning flats and mag here is not strictly necessary
return (np.array(pit_i, 'int64'),
np.array(pit_j, 'int64'),
np.array(pit_prop, 'float64'),
flats,
mag) | Helper function for _mk_adjacency_matrix. This is a more general
version of _mk_adjacency_flats which drains pits and flats to nearby
but non-adjacent pixels. The slope magnitude (and flats mask) is
updated for these pits and flats so that the TWI can be computed. | entailment |
def _mk_connectivity_flats(self, i12, j1, j2, mat_data, flats, elev, mag):
"""
Helper function for _mk_adjacency_matrix. This calcualtes the
connectivity for flat regions. Every pixel in the flat will drain
to a random pixel in the flat. This accumulates all the area in the
flat region to a single pixel. All that area is then drained from
that pixel to the surroundings on the flat. If the border of the
flat has a single pixel with a much lower elevation, all the area will
go towards that pixel. If the border has pixels with similar elevation,
then the area will be distributed amongst all the border pixels
proportional to their elevation.
"""
nn, mm = flats.shape
NN = np.prod(flats.shape)
# Label the flats
assigned, n_flats = spndi.label(flats, FLATS_KERNEL3)
flat_ids, flat_coords, flat_labelsf = _get_flat_ids(assigned)
flat_j = [None] * n_flats
flat_prop = [None] * n_flats
flat_i = [None] * n_flats
# Temporary array to find the flats
edges = np.zeros_like(flats)
# %% Calcute the flat drainage
warn_flats = []
for ii in xrange(n_flats):
ids_flats = flat_ids[flat_coords[ii]:flat_coords[ii+1]]
edges[:] = 0
j = ids_flats % mm
i = ids_flats // mm
for iii in [-1, 0, 1]:
for jjj in [-1, 0, 1]:
i_2 = i + iii
j_2 = j + jjj
ids_tmp = (i_2 >= 0) & (j_2 >= 0) & (i_2 < nn) & (j_2 < mm)
edges[i_2[ids_tmp], j_2[ids_tmp]] += \
FLATS_KERNEL3[iii+1, jjj+1]
edges.ravel()[ids_flats] = 0
ids_edge = np.argwhere(edges.ravel()).squeeze()
flat_elev_loc = elev.ravel()[ids_flats]
# It is possble for the edges to merge 2 flats, so we need to
# take the lower elevation to avoid large circular regions
flat_elev = flat_elev_loc.min()
loc_elev = elev.ravel()[ids_edge]
# Filter out any elevations larger than the flat elevation
# TODO: Figure out if this should be <= or <
I_filt = loc_elev < flat_elev
try:
loc_elev = loc_elev[I_filt]
loc_slope = mag.ravel()[ids_edge][I_filt]
except: # If this is fully masked out (i.e. inside a no-data area)
loc_elev = np.array([])
loc_slope = np.array([])
loc_dx = self.dX.mean()
# Now I have to figure out if I should just use the minimum or
# distribute amongst many pixels on the flat boundary
n = len(loc_slope)
if n == 0: # Flat does not have anywhere to drain
# Let's see if the flat goes to the edge. If yes, we'll just
# distribute the area along the edge.
ids_flat_on_edge = ((ids_flats % mag.shape[1]) == 0) | \
((ids_flats % mag.shape[1]) == (mag.shape[1] - 1)) | \
(ids_flats <= mag.shape[1]) | \
(ids_flats >= (mag.shape[1] * (mag.shape[0] - 1)))
if ids_flat_on_edge.sum() == 0:
warn_flats.append(ii)
continue
drain_ids = ids_flats[ids_flat_on_edge]
loc_proportions = mag.ravel()[ids_flats[ids_flat_on_edge]]
loc_proportions /= loc_proportions.sum()
ids_flats = ids_flats[~ids_flat_on_edge]
# This flat is entirely on the edge of the image
if len(ids_flats) == 0:
# therefore, whatever drains into it is done.
continue
flat_elev_loc = flat_elev_loc[~ids_flat_on_edge]
else: # Flat has a place to drain to
min_edges = np.zeros(loc_slope.shape, bool)
min_edges[np.argmin(loc_slope)] = True
# Add to the min edges any edge that is within an error
# tolerance as small as the minimum
min_edges = (loc_slope + loc_slope * loc_dx / 2) \
>= loc_slope[min_edges]
drain_ids = ids_edge[I_filt][min_edges]
loc_proportions = loc_slope[min_edges]
loc_proportions /= loc_proportions.sum()
# Now distribute the connectivity amongst the chosen elevations
# proportional to their slopes
# First, let all the the ids in the flats drain to 1
# flat id (for ease)
one_id = np.zeros(ids_flats.size, bool)
one_id[np.argmin(flat_elev_loc)] = True
j1.ravel()[ids_flats[~one_id]] = ids_flats[one_id]
mat_data.ravel()[ids_flats[~one_id]] = 1
# Negative indices will be eliminated before making the matix
j2.ravel()[ids_flats[~one_id]] = -1
mat_data.ravel()[ids_flats[~one_id] + NN] = 0
# Now drain the 1 flat to the drains
j1.ravel()[ids_flats[one_id]] = drain_ids[0]
mat_data.ravel()[ids_flats[one_id]] = loc_proportions[0]
if len(drain_ids) > 1:
j2.ravel()[ids_flats[one_id]] = drain_ids[1]
mat_data.ravel()[ids_flats[one_id] + NN] = loc_proportions[1]
if len(loc_proportions > 2):
flat_j[ii] = drain_ids[2:]
flat_prop[ii] = loc_proportions[2:]
flat_i[ii] = np.ones(drain_ids[2:].size, 'int64') * ids_flats[one_id]
try:
flat_j = np.concatenate([fj for fj in flat_j if fj is not None])
flat_prop = \
np.concatenate([fp for fp in flat_prop if fp is not None])
flat_i = np.concatenate([fi for fi in flat_i if fi is not None])
except:
flat_j = np.array([], 'int64')
flat_prop = np.array([], 'float64')
flat_i = np.array([], 'int64')
if len(warn_flats) > 0:
warnings.warn("Warning %d flats had no place" % len(warn_flats) +
" to drain to --> these are pits (check pit-remove"
"algorithm).")
return j1, j2, mat_data, flat_i, flat_j, flat_prop | Helper function for _mk_adjacency_matrix. This calcualtes the
connectivity for flat regions. Every pixel in the flat will drain
to a random pixel in the flat. This accumulates all the area in the
flat region to a single pixel. All that area is then drained from
that pixel to the surroundings on the flat. If the border of the
flat has a single pixel with a much lower elevation, all the area will
go towards that pixel. If the border has pixels with similar elevation,
then the area will be distributed amongst all the border pixels
proportional to their elevation. | entailment |
def calc_twi(self):
"""
Calculates the topographic wetness index and saves the result in
self.twi.
Returns
-------
twi : array
Array giving the topographic wetness index at each pixel
"""
if self.uca is None:
self.calc_uca()
gc.collect() # Just in case
min_area = self.twi_min_area
min_slope = self.twi_min_slope
twi = self.uca.copy()
if self.apply_twi_limits_on_uca:
twi[twi > self.uca_saturation_limit * min_area] = \
self.uca_saturation_limit * min_area
gc.collect() # Just in case
twi = np.log((twi) / (self.mag + min_slope))
# apply the cap
if self.apply_twi_limits:
twi_sat_value = \
np.log(self.uca_saturation_limit * min_area / min_slope)
twi[twi > twi_sat_value] = twi_sat_value
# multiply by 10 for better integer resolution when storing
self.twi = twi * 10
gc.collect() # Just in case
return twi | Calculates the topographic wetness index and saves the result in
self.twi.
Returns
-------
twi : array
Array giving the topographic wetness index at each pixel | entailment |
def _plot_connectivity(self, A, data=None, lims=[None, None]):
"""
A debug function used to plot the adjacency/connectivity matrix.
This is really just a light wrapper around _plot_connectivity_helper
"""
if data is None:
data = self.data
B = A.tocoo()
self._plot_connectivity_helper(B.col, B.row, B.data, data, lims) | A debug function used to plot the adjacency/connectivity matrix.
This is really just a light wrapper around _plot_connectivity_helper | entailment |
def _plot_connectivity_helper(self, ii, ji, mat_datai, data, lims=[1, 8]):
"""
A debug function used to plot the adjacency/connectivity matrix.
"""
from matplotlib.pyplot import quiver, colorbar, clim, matshow
I = ~np.isnan(mat_datai) & (ji != -1) & (mat_datai >= 0)
mat_data = mat_datai[I]
j = ji[I]
i = ii[I]
x = i.astype(float) % data.shape[1]
y = i.astype(float) // data.shape[1]
x1 = (j.astype(float) % data.shape[1]).ravel()
y1 = (j.astype(float) // data.shape[1]).ravel()
nx = (x1 - x)
ny = (y1 - y)
matshow(data, cmap='gist_rainbow'); colorbar(); clim(lims)
quiver(x, y, nx, ny, mat_data.ravel(), angles='xy', scale_units='xy',
scale=1, cmap='bone')
colorbar(); clim([0, 1]) | A debug function used to plot the adjacency/connectivity matrix. | entailment |
def _plot_debug_slopes_directions(self):
"""
A debug function to plot the direction calculated in various ways.
"""
# %%
from matplotlib.pyplot import matshow, colorbar, clim, title
matshow(self.direction / np.pi * 180); colorbar(); clim(0, 360)
title('Direction')
mag2, direction2 = self._central_slopes_directions()
matshow(direction2 / np.pi * 180.0); colorbar(); clim(0, 360)
title('Direction (central difference)')
matshow(self.mag); colorbar()
title('Magnitude')
matshow(mag2); colorbar(); title("Magnitude (Central difference)")
# %%
# Compare to Taudem
filename = self.file_name
os.chdir('testtiff')
try:
os.remove('test_ang.tif')
os.remove('test_slp.tif')
except:
pass
cmd = ('dinfflowdir -fel "%s" -ang "%s" -slp "%s"' %
(os.path.split(filename)[-1], 'test_ang.tif', 'test_slp.tif'))
taudem._run(cmd)
td_file = GdalReader(file_name='test_ang.tif')
td_ang, = td_file.raster_layers
td_file2 = GdalReader(file_name='test_slp.tif')
td_mag, = td_file2.raster_layers
os.chdir('..')
matshow(td_ang.raster_data / np.pi*180); clim(0, 360); colorbar()
title('Taudem direction')
matshow(td_mag.raster_data); colorbar()
title('Taudem magnitude')
matshow(self.data); colorbar()
title('The test data (elevation)')
diff = (td_ang.raster_data - self.direction) / np.pi * 180.0
diff[np.abs(diff) > 300] = np.nan
matshow(diff); colorbar(); clim([-1, 1])
title('Taudem direction - calculated Direction')
# normalize magnitudes
mag2 = td_mag.raster_data
mag2 /= np.nanmax(mag2)
mag = self.mag.copy()
mag /= np.nanmax(mag)
matshow(mag - mag2); colorbar()
title('Taudem magnitude - calculated magnitude')
del td_file
del td_file2
del td_ang
del td_mag | A debug function to plot the direction calculated in various ways. | entailment |
def clean(ctx, dry_run=False):
"""Cleanup generated document artifacts."""
basedir = ctx.sphinx.destdir or "build/docs"
cleanup_dirs([basedir], dry_run=dry_run) | Cleanup generated document artifacts. | entailment |
def build(ctx, builder="html", options=""):
"""Build docs with sphinx-build"""
sourcedir = ctx.config.sphinx.sourcedir
destdir = Path(ctx.config.sphinx.destdir or "build")/builder
destdir = destdir.abspath()
with cd(sourcedir):
destdir_relative = Path(".").relpathto(destdir)
command = "sphinx-build {opts} -b {builder} {sourcedir} {destdir}" \
.format(builder=builder, sourcedir=".",
destdir=destdir_relative, opts=options)
ctx.run(command) | Build docs with sphinx-build | entailment |
def browse(ctx):
"""Open documentation in web browser."""
page_html = Path(ctx.config.sphinx.destdir)/"html"/"index.html"
if not page_html.exists():
build(ctx, builder="html")
assert page_html.exists()
open_cmd = "open" # -- WORKS ON: MACOSX
if sys.platform.startswith("win"):
open_cmd = "start"
ctx.run("{open} {page_html}".format(open=open_cmd, page_html=page_html)) | Open documentation in web browser. | entailment |
def save(ctx, dest="docs.html", format="html"):
"""Save/update docs under destination directory."""
print("STEP: Generate docs in HTML format")
build(ctx, builder=format)
print("STEP: Save docs under %s/" % dest)
source_dir = Path(ctx.config.sphinx.destdir)/format
Path(dest).rmtree_p()
source_dir.copytree(dest)
# -- POST-PROCESSING: Polish up.
for part in [ ".buildinfo", ".doctrees" ]:
partpath = Path(dest)/part
if partpath.isdir():
partpath.rmtree_p()
elif partpath.exists():
partpath.remove_p() | Save/update docs under destination directory. | entailment |
def find_neighbors(neighbors, coords, I, source_files, f, sides):
"""Find the tile neighbors based on filenames
Parameters
-----------
neighbors : dict
Dictionary that stores the neighbors. Format is
neighbors["source_file_name"]["side"] = "neighbor_source_file_name"
coords : list
List of coordinates determined from the filename.
See :py:func:`utils.parse_fn`
I : array
Sort index. Different sorting schemes will speed up when neighbors
are found
source_files : list
List of strings of source file names
f : callable
Function that determines if two tiles are neighbors based on their
coordinates. f(c1, c2) returns True if tiles are neighbors
sides : list
List of 2 strings that give the "side" where tiles are neighbors.
Returns
-------
neighbors : dict
Dictionary of neighbors
Notes
-------
For example, if Tile1 is to the left of Tile2, then
neighbors['Tile1']['right'] = 'Tile2'
neighbors['Tile2']['left'] = 'Tile1'
"""
for i, c1 in enumerate(coords):
me = source_files[I[i]]
# If the left neighbor has already been found...
if neighbors[me][sides[0]] != '':
continue
# could try coords[i:] (+ fixes) for speed if it becomes a problem
for j, c2 in enumerate(coords):
if f(c1, c2):
# then tiles are neighbors neighbors
neigh = source_files[I[j]]
neighbors[me][sides[0]] = neigh
neighbors[neigh][sides[1]] = me
break
return neighbors | Find the tile neighbors based on filenames
Parameters
-----------
neighbors : dict
Dictionary that stores the neighbors. Format is
neighbors["source_file_name"]["side"] = "neighbor_source_file_name"
coords : list
List of coordinates determined from the filename.
See :py:func:`utils.parse_fn`
I : array
Sort index. Different sorting schemes will speed up when neighbors
are found
source_files : list
List of strings of source file names
f : callable
Function that determines if two tiles are neighbors based on their
coordinates. f(c1, c2) returns True if tiles are neighbors
sides : list
List of 2 strings that give the "side" where tiles are neighbors.
Returns
-------
neighbors : dict
Dictionary of neighbors
Notes
-------
For example, if Tile1 is to the left of Tile2, then
neighbors['Tile1']['right'] = 'Tile2'
neighbors['Tile2']['left'] = 'Tile1' | entailment |
def set_neighbor_data(self, elev_fn, dem_proc, interp=None):
"""
From the elevation filename, we can figure out and load the data and
done arrays.
"""
if interp is None:
interp = self.build_interpolator(dem_proc)
opp = {'top': 'bottom', 'left': 'right'}
for key in self.neighbors[elev_fn].keys():
tile = self.neighbors[elev_fn][key]
if tile == '':
continue
oppkey = key
for me, neigh in opp.iteritems():
if me in key:
oppkey = oppkey.replace(me, neigh)
else:
oppkey = oppkey.replace(neigh, me)
opp_edge = self.neighbors[tile][oppkey]
if opp_edge == '':
continue
interp.values = dem_proc.uca[::-1, :]
# interp.values[:, 0] = np.ravel(dem_proc.uca) # for other interp.
# for the top-left tile we have to set the bottom and right edges
# of that tile, so two edges for those tiles
for key_ed in oppkey.split('-'):
self.edges[tile][key_ed].set_data('data', interp)
interp.values = dem_proc.edge_done[::-1, :].astype(float)
# interp.values[:, 0] = np.ravel(dem_proc.edge_done)
for key_ed in oppkey.split('-'):
self.edges[tile][key_ed].set_data('done', interp) | From the elevation filename, we can figure out and load the data and
done arrays. | entailment |
def update_edge_todo(self, elev_fn, dem_proc):
"""
Can figure out how to update the todo based on the elev filename
"""
for key in self.edges[elev_fn].keys():
self.edges[elev_fn][key].set_data('todo', data=dem_proc.edge_todo) | Can figure out how to update the todo based on the elev filename | entailment |
def update_edges(self, elev_fn, dem_proc):
"""
After finishing a calculation, this will update the neighbors and the
todo for that tile
"""
interp = self.build_interpolator(dem_proc)
self.update_edge_todo(elev_fn, dem_proc)
self.set_neighbor_data(elev_fn, dem_proc, interp) | After finishing a calculation, this will update the neighbors and the
todo for that tile | entailment |
def get_edge_init_data(self, fn, save_path=None):
"""
Creates the initialization data from the edge structure
"""
edge_init_data = {key: self.edges[fn][key].get('data') for key in
self.edges[fn].keys()}
edge_init_done = {key: self.edges[fn][key].get('done') for key in
self.edges[fn].keys()}
edge_init_todo = {key: self.edges[fn][key].get('todo') for key in
self.edges[fn].keys()}
return edge_init_data, edge_init_done, edge_init_todo | Creates the initialization data from the edge structure | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.