repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
PyBDSF
|
PyBDSF-master/bdsf/collapse.py
|
"""Module collapse
Defines operation Op_collapse which collapses 3D image. Calculates and
stores mean and rms (normal and clipped) per channel anyway for further
use, even if weights are unity.
"""
from __future__ import absolute_import
import numpy as N
from .image import *
from . import _cbdsm
#_cbdsm.init_numpy()
from . import mylogger
from . import functions as func
class Op_collapse(Op):
"""Collapse 3D image"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Collapse")
if img.opts.polarisation_do:
pols = ['I', 'Q', 'U', 'V'] # make sure I is done first
else:
pols = ['I'] # assume I is always present
img.ch0_Q_arr = None
img.ch0_U_arr = None
img.ch0_V_arr = None
if img.shape[1] > 1:
c_mode = img.opts.collapse_mode
chan0 = img.opts.collapse_ch0
c_list = img.opts.collapse_av
c_wts = img.opts.collapse_wt
if c_list == []: c_list = N.arange(img.shape[1])
if len(c_list) == 1 and c_mode=='average':
c_mode = 'single'
chan0 = c_list[0]
img.collapse_ch0 = chan0
ch0sh = img.image_arr.shape[2:]
if img.opts.polarisation_do:
ch0images = ['ch0_arr', 'ch0_Q_arr', 'ch0_U_arr', 'ch0_V_arr']
else:
ch0images = ['ch0_arr']
# assume all Stokes images have the same blank pixels as I:
blank = N.isnan(img.image_arr[0])
hasblanks = blank.any()
if img.opts.kappa_clip is None:
kappa = -img.pixel_beamarea()
else:
kappa = img.opts.kappa_clip
mean, rms, cmean, crms = chan_stats(img, kappa)
img.channel_mean = mean; img.channel_rms = rms
img.channel_clippedmean = cmean; img.channel_clippedrms = crms
for ipol, pol in enumerate(pols):
if c_mode == 'single':
if pol == 'I':
ch0 = img.image_arr[0, chan0]
img.ch0_arr = ch0
mylogger.userinfo(mylog, 'Source extraction will be ' \
'done on channel', '%i (%.3f MHz)' % \
(chan0, img.frequency/1e6))
else:
ch0[:] = img.image_arr[ipol, chan0][:]
img.__setattr__(ch0images[ipol][:], ch0)
elif c_mode == 'average':
if not hasblanks:
if pol == 'I':
ch0, wtarr = avspc_direct(c_list, img.image_arr[0], img.channel_clippedrms, c_wts)
else:
# use wtarr from the I image, which is always collapsed first
ch0, wtarr = avspc_direct(c_list, img.image_arr[ipol], img.channel_clippedrms, c_wts, wtarr=wtarr)
else:
if pol == 'I':
ch0, wtarr = avspc_blanks(c_list, img.image_arr[0], img.channel_clippedrms, c_wts)
else:
# use wtarr from the I image, which is always collapsed first
ch0, wtarr = avspc_blanks(c_list, img.image_arr[ipol], img.channel_clippedrms, c_wts, wtarr=wtarr)
img.__setattr__(ch0images[ipol][:], ch0)
if pol == 'I':
img.avspc_wtarr = wtarr
init_freq_collapse(img, wtarr)
if c_wts == 'unity':
mylogger.userinfo(mylog, 'Channels averaged with '\
'uniform weights')
else:
mylogger.userinfo(mylog, 'Channels averaged with '\
'weights=(1/rms)^2')
mylogger.userinfo(mylog, 'Source extraction will be '\
'done on averaged ("ch0") image')
mylogger.userinfo(mylog, 'Frequency of averaged '\
'image', '%.3f MHz' % \
(img.frequency/1e6,))
str1 = " ".join(str(n) for n in c_list)
mylog.debug('%s %s' % ('Channels averaged : ', str1))
str1 = " ".join(["%9.4e" % n for n in wtarr])
mylog.debug('%s %s %s' % ('Channel weights : ', str1, '; unity=zero if c_wts="rms"'))
elif c_mode=='file':
mylogger.userinfo(mylog, 'Reading ch0 image from file %s' % (img.opts.collapse_file))
image,hdr=func.read_image_from_file(img.opts.collapse_file, img, None, quiet=False)
if pol == 'I':
ch0 = image[0,0]
img.ch0_arr = ch0
else:
raise NotImplementedError('Polarization cubes not allowed in file mode')
else:
raise NotImplementedError('Mode supplied not implemented') # should never happen!
if img.opts.output_all:
func.write_image_to_file(img.use_io, img.imagename+'.ch0_'+pol+'.fits', ch0,
img, outdir=img.basedir)
mylog.debug('%s %s ' % ('Writing file ', img.imagename+'.ch0_'+pol+'.fits'))
else:
# Only one channel in image
image = img.image_arr
img.ch0_arr = image[0, 0]
mylogger.userinfo(mylog, 'Frequency of image',
'%.3f MHz' % (img.frequency/1e6,))
if img.opts.polarisation_do:
for pol in pols[1:]:
if pol == 'Q':
img.ch0_Q_arr = image[1, 0][:]
if pol == 'U':
img.ch0_U_arr = image[2, 0][:]
if pol == 'V':
img.ch0_V_arr = image[3, 0][:]
# create mask if needed (assume all pols have the same mask as I)
image = img.ch0_arr
mask = N.isnan(image)
img.blankpix = N.sum(mask)
frac_blank = round(
float(img.blankpix) / float(image.shape[0] * image.shape[1]),
3)
mylogger.userinfo(mylog, "Number of blank pixels", str(img.blankpix)
+ ' (' + str(frac_blank * 100.0) + '%)')
if img.opts.blank_limit is not None:
import scipy
import sys
threshold = img.opts.blank_limit
mylogger.userinfo(mylog, "Blanking pixels with values "
"below %.1e Jy/beam" % (threshold,))
bad = (abs(image) < threshold)
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = func.NullDevice() # redirect the real STDOUT
count = scipy.signal.convolve2d(bad, N.ones((3, 3)), mode='same')
sys.stdout = original_stdout # turn STDOUT back on
mask_low = (count >= 5)
image[N.where(mask_low)] = N.nan
mask = N.isnan(image)
img.blankpix = N.sum(mask)
frac_blank = round(
float(img.blankpix) / float(image.shape[0] *
image.shape[1]), 3)
mylogger.userinfo(mylog, "Total number of blanked pixels",
str(img.blankpix) + ' (' + str(frac_blank * 100.0) + '%)')
masked = mask.any()
img.masked = masked
if masked:
img.mask_arr = mask
else:
img.mask_arr = None
if img.blankpix == image.shape[0] * image.shape[1]:
# ALL pixels are blanked!
raise RuntimeError('All pixels in the image are blanked.')
img.completed_Ops.append('collapse')
########################################################################################
def chan_stats(img, kappa):
bstat = func.bstat #_cbdsm.bstat
nchan = img.shape[1]
mean = []; rms = []; cmean = []; crms = []
for ichan in range(nchan):
if isinstance(img, Image): # check if img is an Image or just an ndarray
im = img.image_arr[0, ichan]
else:
im = img[0, ichan]
if N.any(im):
immask = N.isnan(im)
if immask.all():
m, r, cm, cr = 0, 0, 0, 0
else:
if immask.any():
m, r, cm, cr, cnt = bstat(im, immask, kappa)
else:
m, r, cm, cr, cnt = bstat(im, None, kappa)
else:
m, r, cm, cr = 0, 0, 0, 0
mean.append(m); rms.append(r); cmean.append(cm); crms.append(cr)
return N.array(mean), N.array(rms), N.array(cmean), N.array(crms)
########################################################################################
def avspc_direct(c_list, image, rmsarr, c_wts, wtarr=None):
shape2 = image.shape[1:]
ch0 = N.zeros(shape2, dtype=N.float32)
sumwts = 0.0
if wtarr is None:
wtarr = N.zeros(len(c_list))
for i, ch in enumerate(c_list):
im = image[ch]
r = rmsarr[ch]
if c_wts == 'unity': wt = 1.0
if c_wts == 'rms': wt = r
if r != 0:
wt = 1.0/(wt*wt)
else:
wt = 0
sumwts += wt
ch0 += im*wt
wtarr[i] = wt
else:
for i, ch in enumerate(c_list):
im = image[ch]
sumwts += wtarr[i]
ch0 += im*wtarr[i]
ch0 = ch0/sumwts
return ch0, wtarr
########################################################################################
def avspc_blanks(c_list, image, rmsarr, c_wts, wtarr=None):
shape2 = image.shape[1:]
ch0 = N.zeros(shape2, dtype=N.float32)
sumwtim = N.zeros(shape2, dtype=N.float32)
if wtarr is None:
wtarr = N.zeros(len(c_list))
for i, ch in enumerate(c_list):
im = image[ch]
r = rmsarr[ch]
if c_wts == 'unity': wt = 1.0
if c_wts == 'rms': wt = r
if r > 1e-18 and r < 1e18:
# Set reasonable limits to avoid overflow of float32
wt = 1.0/(wt*wt)
else:
wt = 0
wtim = N.ones(shape2, dtype=N.float32)*wt*(~N.isnan(im))
sumwtim += wtim
ch0 += N.nan_to_num(im)*wtim
wtarr[i] = wt
else:
for i, ch in enumerate(c_list):
im = image[ch]
wtim = N.ones(shape2)*wtarr[i]*(~N.isnan(im))
sumwtim += wtim
ch0 += N.nan_to_num(im)*wtim
ch0 = ch0/sumwtim
return ch0, wtarr
########################################################################################
def init_freq_collapse(img, wtarr):
# Place appropriate, post-collapse frequency info in img
# Calculate weighted average frequency
if img.opts.frequency_sp is not None:
c_list = img.opts.collapse_av
if c_list == []: c_list = N.arange(img.image_arr.shape[1])
freqs = img.opts.frequency_sp
if len(freqs) != len(c_list):
raise RuntimeError("Number of channels and number of frequencies specified "\
"by user do not match")
sumwts = 0.0
sumfrq = 0.0
for i, ch in enumerate(c_list):
sumwts += wtarr[i]
sumfrq += freqs[ch]*wtarr[i]
img.frequency = sumfrq / sumwts
img.freq_pars = (img.frequency, 0.0, 0.0)
else:
# Calculate from header info
c_list = img.opts.collapse_av
if c_list == []: c_list = N.arange(img.image_arr.shape[1])
sumwts = 0.0
sumfrq = 0.0
spec_indx = img.wcs_obj.wcs.spec
if spec_indx == -1 and img.opts.frequency_sp is None:
raise RuntimeError("Frequency information not found in header and frequencies "\
"not specified by user")
else:
for i, ch in enumerate(c_list):
sumwts += wtarr[i]
freq = img.wcs_obj.p2f(ch)
sumfrq += freq*wtarr[i]
img.frequency = sumfrq / sumwts
| 11,493 | 36.562092 | 114 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/const.py
|
"""Constants
Some universal constants
"""
import math
pi=math.pi
fwsig=2.35482
rad=180.0/pi
c=2.99792458e8
bolt=1.3806505e-23
sq2=math.sqrt(2)
| 147 | 8.866667 | 24 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/readimage.py
|
"""Module readimage.
Defines operation Op_readimage which initializes image and WCS
The current implementation tries to reduce input file to 2D if
possible, as this makes more sense atm. One more important thing
to note -- in its default configuration pyfits will read data
in non-native format, so we have to convert it before usage. See
the read_image_from_file in functions.py for details.
Lastly, wcs and spectal information are stored in the PyWCS
object img.wcs_obj.
"""
from __future__ import absolute_import
import numpy as N
from .image import *
from . import functions as func
from . import mylogger
import sys
import shutil
import tempfile
import time
import os
class Op_readimage(Op):
"""Image file loader
Loads image and configures wcslib machinery for it.
"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM." + img.log + "Readimage")
if img.opts.filename == '':
raise RuntimeError('Image file name not specified.')
# Check for trailing "/" in file name (since CASA images are directories).
# Although the general rule is to not alter the values in opts (only the
# user should be able to alter these), in this case there is no harm in
# replacing the file name in opts with the '/' trimmed off.
if img.opts.filename[-1] == '/':
img.opts.filename = img.opts.filename[:-1]
img.filename = img.opts.filename
# Determine indir if not explicitly given by user (in img.opts.indir)
if img.opts.indir is None:
indir = os.path.dirname(img.filename)
if indir == '':
indir = './'
img.indir = indir
else:
img.indir = img.opts.indir
# Set up output paths, etc.
parentname, basedir = func.set_up_output_paths(img.opts)
img.parentname = parentname # root name for constructing output files
img.imagename = img.parentname + '.pybdsf' # root name of output images (e.g., rms image)
img.outdir = basedir # path of parent output directory
img.basedir = os.path.join(basedir, img.parentname+'_pybdsf') # used for opts.output_all
if img.opts.solnname is not None:
# Add solname (if any) to basedir
img.basedir += img.opts.solnname
# Read in data and header
img.use_io = ''
image_file = os.path.basename(img.opts.filename)
result = func.read_image_from_file(image_file, img, img.indir)
if result is None:
raise RuntimeError("Cannot open file " + repr(image_file) + ". " + img._reason)
else:
data, hdr = result
# Check whether caching is to be used. If it is, set up a
# temporary directory. The temporary directory will be
# removed automatically upon exit.
if img.opts.do_cache:
img.do_cache = True
else:
img.do_cache = False
if img.do_cache:
mylog.info('Using disk caching.')
tmpdir = os.path.join(img.outdir, img.parentname+'_tmp')
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
img._tempdir_parent = TempDir(tmpdir)
img.tempdir = TempDir(tempfile.mkdtemp(dir=tmpdir))
import atexit, shutil
atexit.register(shutil.rmtree, img._tempdir_parent, ignore_errors=True)
else:
img.tempdir = None
# Store data and header in img. If polarisation_do = False, only store pol == 'I'
img.nchan = data.shape[1]
img.nstokes = data.shape[0]
mylogger.userinfo(mylog, 'Image size',
str(data.shape[-2:]) + ' pixels')
mylogger.userinfo(mylog, 'Number of channels',
'%i' % data.shape[1])
mylogger.userinfo(mylog, 'Number of Stokes parameters',
'%i' % data.shape[0])
if img.opts.polarisation_do and data.shape[0] == 1:
img.opts.polarisation_do = False
mylog.warning('Image has Stokes I only. Polarisation module disabled.')
if img.opts.polarisation_do or data.shape[0] == 1:
img.image_arr = data
else:
img.image_arr = data[0, :].reshape(1, data.shape[1], data.shape[2], data.shape[3])
img.header = hdr
img.shape = data.shape
img.j = 0
### initialize wcs conversion routines
self.init_wcs(img)
self.init_beam(img)
self.init_freq(img)
year, code = self.get_equinox(img)
if year is None:
mylog.info('Equinox not found in image header. Assuming J2000.')
img.equinox = 2000.0
else:
mylog.info('Equinox of image is %f.' % year)
img.equinox = year
if img.opts.output_all:
# Set up directory to write output to
opdir = img.opts.opdir_overwrite
if opdir not in ['overwrite', 'append']:
img.opts.opdir_overwrite = 'append'
if opdir == 'append':
mylog.info('Appending output files to directory ' + img.basedir)
img.basedir = os.path.join(img.basedir, time.strftime("%d%b%Y_%H.%M.%S"))
else:
mylog.info('Overwriting output files (if any) in directory ' + img.basedir)
if os.path.isdir(img.basedir):
os.system("rm -fr " + img.basedir + '/*')
# Make the final output directory
if not os.path.exists(img.basedir):
os.makedirs(img.basedir)
del data
img.completed_Ops.append('readimage')
return img
def init_wcs(self, img):
"""Initialize wcs pixel <=> sky conversion routines.
"""
from math import pi
import warnings
hdr = img.header
try:
from astropy.wcs import WCS
from astropy.wcs import FITSFixedWarning
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
warnings.filterwarnings("ignore",category=FITSFixedWarning)
t = WCS(hdr)
t.wcs.fix()
except ImportError as err:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
from pywcs import WCS
t = WCS(hdr)
t.wcs.fix()
acdelt = [abs(hdr['cdelt1']), abs(hdr['cdelt2'])]
# Here we define p2s and s2p to allow celestial coordinate
# transformations. Transformations for other axes (e.g.,
# spectral) are striped out.
def p2s(self, xy):
xy = list(xy)
for i in range(self.naxis-2):
xy.append(0)
if hasattr(self, 'wcs_pix2world'):
try:
xy_arr = N.array([xy[0:2]])
sky = self.wcs_pix2world(xy_arr, 0)
except:
xy_arr = N.array([xy])
sky = self.wcs_pix2world(xy_arr, 0)
else:
xy_arr = N.array([xy])
sky = self.wcs_pix2sky(xy_arr, 0)
return sky.tolist()[0][0:2]
def s2p(self, rd):
rd = list(rd)
for i in range(self.naxis-2):
rd.append(1) # For some reason, 0 gives nans with astropy in some situations
if hasattr(self, 'wcs_world2pix'):
try:
rd_arr = N.array([rd[0:2]])
pix = self.wcs_world2pix(rd_arr, 0)
except:
rd_arr = N.array([rd])
pix = self.wcs_world2pix(rd_arr, 0)
else:
rd_arr = N.array([rd])
pix = self.wcs_sky2pix(rd_arr, 0)
return pix.tolist()[0][0:2]
# Here we define functions to transform Gaussian parameters (major axis,
# minor axis, pos. angle) from the image plane to the celestial sphere.
# These transforms are valid only at the Gaussian's center and ignore
# any change across the extent of the Gaussian.
def gaus2pix(x, location=None, use_wcs=True):
""" Converts Gaussian parameters in deg to pixels.
x - (maj [deg], min [deg], pa [deg])
location - specifies the location in pixels (x, y) for which
transform is desired
Input beam angle should be degrees CCW from North.
The output beam angle is degrees CCW from the +y axis of the image.
"""
if use_wcs:
bmaj, bmin, bpa = x
brot = self.get_rot(img, location) # rotation delta CCW (in degrees) between N and +y axis of image
s1 = self.angdist2pixdist(img, bmaj, bpa, location=location)
s2 = self.angdist2pixdist(img, bmin, bpa + 90.0, location=location)
th = bpa + brot
s1, s2, th = func.fix_gaussian_axes(s1, s2, th)
return (s1, s2, th)
else:
return img.beam2pix(x)
def pix2gaus(x, location=None, use_wcs=True, is_error=False):
""" Converts Gaussian parameters in pixels to deg.
x - (maj [pix], min [pix], pa [deg])
location - specifies the location in pixels (x, y) for which
transform is desired
Input beam angle should be degrees CCW from the +y axis of the image.
The output beam angle is degrees CCW from North.
Set is_error = True when x contains the errors on the parameters instead of
the parameters themselves.
"""
if use_wcs:
s1, s2, th = x
if s1 == 0.0 and s2 == 0.0:
return (0.0, 0.0, 0.0)
th_rad = th / 180.0 * N.pi
bmaj = self.pixdist2angdist(img, s1, th, location=location)
bmin = self.pixdist2angdist(img, s2, th + 90.0, location=location)
bpa = th
if not is_error:
# Adjust the PA by the rotation delta and fix cases where
# major and minor axes are swapped
brot = self.get_rot(img, location) # rotation delta CCW (in degrees) between N and +y axis of image
bpa = th - brot
bmaj, bmin, bpa = func.fix_gaussian_axes(bmaj, bmin, bpa)
return (bmaj, bmin, bpa)
else:
return img.pix2beam(x, is_error=is_error)
def pix2coord(pix, location=None, use_wcs=True):
"""Converts size along x and y (in pixels) to size in RA and Dec (in degrees)
Currently, this function is only used to convert errors on x, y position
to errors in RA and Dec.
"""
if use_wcs:
# Account for projection effects
x, y = pix
brot = self.get_rot(img, location) # rotation delta CCW (in degrees) between N and +y axis of image
ra_dist_pix = N.sqrt( (x * N.cos(brot * N.pi / 180.0))**2 + (y * N.sin(brot * N.pi / 180.0))**2 )
dec_dist_pix = N.sqrt( (x * N.sin(brot * N.pi / 180.0))**2 + (y * N.cos(brot * N.pi / 180.0))**2 )
s1 = self.pixdist2angdist(img, ra_dist_pix, 90.0 - brot, location=location)
s2 = self.pixdist2angdist(img, dec_dist_pix, 0.0 - brot, location=location)
else:
x, y = pix
s1 = abs(x * cdelt1)
s2 = abs(y * cdelt2)
return (s1, s2)
if hasattr(t, 'wcs_pix2world'):
instancemethod = type(t.wcs_pix2world)
else:
instancemethod = type(t.wcs_pix2sky)
if sys.version_info[0] > 2:
t.p2s = instancemethod(p2s, t)
else:
t.p2s = instancemethod(p2s, t, WCS)
if hasattr(t, 'wcs_world2pix'):
instancemethod = type(t.wcs_world2pix)
else:
instancemethod = type(t.wcs_sky2pix)
if sys.version_info[0] > 2:
t.s2p = instancemethod(s2p, t)
else:
t.s2p = instancemethod(s2p, t, WCS)
img.wcs_obj = t
img.wcs_obj.acdelt = acdelt
img.pix2sky = t.p2s
img.sky2pix = t.s2p
img.gaus2pix = gaus2pix
img.pix2gaus = pix2gaus
img.pix2coord = pix2coord
def init_beam(self, img):
"""Initialize beam parameters, and conversion routines
to convert beam to/from pixel coordinates"""
from .const import fwsig
mylog = mylogger.logging.getLogger("PyBDSM.InitBeam")
hdr = img.header
cdelt1, cdelt2 = img.wcs_obj.acdelt[0:2]
### define beam conversion routines:
def beam2pix(x):
""" Converts beam in deg to pixels. Use when no dependence on
position is appropriate.
Input beam angle should be degrees CCW from North at image center.
The output beam angle is degrees CCW from the +y axis of the image.
"""
bmaj, bmin, bpa = x
s1 = abs(bmaj / cdelt1)
s2 = abs(bmin / cdelt2)
th = bpa
return (s1, s2, th)
def pix2beam(x, is_error=False):
""" Converts beam in pixels to deg. Use when no dependence on
position is appropriate.
Input beam angle should be degrees CCW from the +y axis of the image.
The output beam angle is degrees CCW from North at image center.
Set is_error = True when x contains the errors on the parameters instead of
the parameters themselves.
"""
s1, s2, th = x
bmaj = abs(s1 * cdelt1)
bmin = abs(s2 * cdelt2)
bpa = th
if not is_error:
bmaj, bmin, bpa = func.fix_gaussian_axes(bmaj, bmin, bpa)
return [bmaj, bmin, bpa]
def pixel_beam():
"""Returns the beam in sigma units in pixels"""
pbeam = beam2pix(img.beam)
return (pbeam[0]/fwsig, pbeam[1]/fwsig, pbeam[2])
def pixel_beamarea():
"""Returns the beam area in pixels"""
pbeam = beam2pix(img.beam)
return 1.1331 * pbeam[0] * pbeam[1]
### Get the beam information from the header
found = False
if img.opts.beam is not None:
beam = img.opts.beam
else:
try:
beam = (hdr['BMAJ'], hdr['BMIN'], hdr['BPA'])
found = True
except:
### try see if AIPS as put the beam in HISTORY as usual
for h in hdr['HISTORY']:
# Check if h is a string or a FITS Card object (long headers are
# split into Cards as of PyFITS 3.0.4)
if not isinstance(h, str):
hstr = h.value
else:
hstr = h
if N.all(['BMAJ' in hstr, 'BMIN' in hstr, 'BPA' in hstr, 'CLEAN' in hstr]):
try:
dum, dum, dum, bmaj, dum, bmin, dum, bpa = hstr.split()
except ValueError:
try:
dum, dum, bmaj, dum, bmin, dum, bpa, dum, dum = hstr.split()
except ValueError:
break
beam = (float(bmaj), float(bmin), float(bpa))
found = True
if not found: raise RuntimeError("No beam information found in image header.")
### convert beam into pixels (at image center)
pbeam = beam2pix(beam)
pbeam = (pbeam[0] / fwsig, pbeam[1] / fwsig, pbeam[2]) # IN SIGMA UNITS
### and store it
img.pix2beam = pix2beam
img.beam2pix = beam2pix
img.beam = beam # FWHM size in degrees
img.pixel_beam = pixel_beam # IN SIGMA UNITS in pixels
img.pixel_beamarea = pixel_beamarea
mylogger.userinfo(mylog, 'Beam shape (major, minor, pos angle)',
'(%.5e, %.5e, %s) degrees' % (beam[0], beam[1],
round(beam[2], 1)))
def init_freq(self, img):
"""Initialize frequency parameters and store them.
Basically, PyBDSM uses two frequency parameters:
img.frequency - the reference frequency in Hz of the ch0 image
img.freq_pars - the crval, crpix, and cdelt values for the
frequency axis in Hz
If the input frequency info (in the WCS) is not in Hz, it is
converted.
"""
try:
from astropy.wcs import WCS
except ImportError as err:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from pywcs import WCS
mylog = mylogger.logging.getLogger("PyBDSM.InitFreq")
if img.opts.frequency_sp is not None and img.image_arr.shape[1] > 1:
# If user specifies multiple frequencies, then let
# collapse.py do the initialization
img.frequency = img.opts.frequency_sp[0]
img.freq_pars = (0.0, 0.0, 0.0)
mylog.info('Using user-specified frequencies.')
elif img.opts.frequency is not None and img.image_arr.shape[1] == 1:
img.frequency = img.opts.frequency
img.freq_pars = (img.frequency, 0.0, 0.0)
mylog.info('Using user-specified frequency.')
else:
spec_indx = img.wcs_obj.wcs.spec
if spec_indx == -1:
# No frequency axis; check header instead
hdr = img.header
if 'RESTFREQ' in hdr:
img.frequency = hdr['RESTFREQ']
img.freq_pars = (img.frequency, 0.0, 0.0)
elif 'FREQ' in hdr:
img.frequency = hdr['FREQ']
img.freq_pars = (img.frequency, 0.0, 0.0)
else:
raise RuntimeError('No frequency information found in image header.')
else:
# Here we define p2f and f2p to allow pixel to frequency
# transformations. Transformations for other axes (e.g.,
# celestial) are striped out.
#
# First, convert frequency to Hz if needed:
img.wcs_obj.wcs.sptr('FREQ-???')
def p2f(self, spec_pix):
spec_list = [0] * self.naxis
spec_list[spec_indx] = spec_pix
spec_pix_arr = N.array([spec_list])
if hasattr(self, 'wcs_pix2world'):
freq = self.wcs_pix2world(spec_pix_arr, 0)
else:
freq = self.wcs_pix2sky(spec_pix_arr, 0)
return freq.tolist()[0][spec_indx]
def f2p(self, freq):
freq_list = [0] * self.naxis
freq_list[spec_indx] = freq
freq_arr = N.array([freq_list])
if hasattr(self, 'wcs_world2pix'):
pix = self.wcs_world2pix(freq_arr, 0)
else:
pix = self.wcs_sky2pix(freq_arr, 0)
return pix.tolist()[0][spec_indx]
if hasattr(img.wcs_obj, 'wcs_pix2world'):
instancemethod = type(img.wcs_obj.wcs_pix2world)
else:
instancemethod = type(img.wcs_obj.wcs_pix2sky)
if sys.version_info[0] > 2:
img.wcs_obj.p2f = instancemethod(p2f, img.wcs_obj)
else:
img.wcs_obj.p2f = instancemethod(p2f, img.wcs_obj, WCS)
if hasattr(img.wcs_obj, 'wcs_world2pix'):
instancemethod = type(img.wcs_obj.wcs_world2pix)
else:
instancemethod = type(img.wcs_obj.wcs_sky2pix)
if sys.version_info[0] > 2:
img.wcs_obj.f2p = instancemethod(f2p, img.wcs_obj)
else:
img.wcs_obj.f2p = instancemethod(f2p, img.wcs_obj, WCS)
if img.opts.frequency is not None:
img.frequency = img.opts.frequency
else:
img.frequency = img.wcs_obj.p2f(0)
def get_equinox(self, img):
"""Gets the equinox from the header.
Returns float year with code, where code is:
1 - EQUINOX, EPOCH or RADECSYS keyword not found in header
0 - EQUINOX found as a numeric value
1 - EPOCH keyword used for equinox (not recommended)
2 - EQUINOX found as 'B1950'
3 - EQUINOX found as 'J2000'
4 - EQUINOX derived from value of RADECSYS keyword
'ICRS', 'FK5' ==> 2000, 'FK4' ==> 1950
"""
code = -1
year = None
hdr = img.header
if 'EQUINOX' in hdr:
year = hdr['EQUINOX']
if isinstance(year, str): # Check for 'J2000' or 'B1950' values
tst = year[:1]
if (tst == 'J') or (tst == 'B'):
year = float(year[1:])
if tst == 'J': code = 3
if tst == 'B': code = 2
else:
code = 0
else:
if 'EPOCH' in hdr: # Check EPOCH if EQUINOX not found
year = float(hdr['EPOCH'])
code = 1
else:
if 'RADECSYS' in hdr:
sys = hdr['RADECSYS']
code = 4
if sys[:3] == 'ICR': year = 2000.0
if sys[:3] == 'FK5': year = 2000.0
if sys[:3] == 'FK4': year = 1950.0
return year, code
def get_rot(self, img, location=None):
"""Returns CCW rotation angle (in degrees) between N and +y axis of image
location specifies the location in pixels (x, y) for which angle is desired
"""
if location is None:
x1 = img.image_arr.shape[2] / 2.0
y1 = img.image_arr.shape[3] / 2.0
else:
x1, y1 = location
ra, dec = img.pix2sky([x1, y1])
delta_dec = self.pixdist2angdist(img, 1.0, 0.0, location=[x1, y1]) # approx. size in degrees of 1 pixel
if dec + delta_dec > 90.0:
# shift towards south instead
delta_dec *= -1.0
x2, y2 = img.sky2pix([ra, dec + delta_dec])
try:
rot_ang_rad = N.arctan2(y2-y1, x2-x1) - N.pi / 2.0
if delta_dec < 0.0:
rot_ang_rad -= N.pi
except:
rot_ang_rad = 0.0
return rot_ang_rad * 180.0 / N.pi
def angdist2pixdist(self, img, angdist, pa, location=None):
"""Returns the distance in pixels for a given angular distance in degrees
pa - position angle in degrees east of north
location - x and y location of center
"""
if location is None:
x1 = int(img.image_arr.shape[2] / 2.0)
y1 = int(img.image_arr.shape[3] / 2.0)
else:
x1, y1 = location
pa_pix = self.get_rot(img, location)
x0 = x1 - 10.0 * N.sin( (pa + pa_pix) * N.pi / 180.0 )
y0 = y1 - 10.0 * N.cos( (pa + pa_pix) * N.pi / 180.0 )
ra0, dec0 = img.pix2sky([x0, y0])
x2 = x1 + 10.0 * N.sin( (pa + pa_pix) * N.pi / 180.0 )
y2 = y1 + 10.0 * N.cos( (pa + pa_pix) * N.pi / 180.0 )
ra2, dec2 = img.pix2sky([x2, y2])
angdist12 = func.angsep(ra0, dec0, ra2, dec2) # degrees
pixdist12 = N.sqrt( (x0 - x2)**2 + (y0 - y2)**2 ) # pixels
if angdist12 > 0.0:
result = angdist * pixdist12 / angdist12
if N.isnan(result) or result <= 0.0:
result = N.mean(img.wcs_obj.acdelt[0:2])
else:
result = N.mean(img.wcs_obj.acdelt[0:2])
return result
def pixdist2angdist(self, img, pixdist, pa, location=None):
"""Returns the angular distance in degrees for a given distance in pixels
pa - position angle in degrees CCW from +y axis
location - x and y location of center
"""
if location is None:
x1 = int(img.image_arr.shape[2] / 2.0)
y1 = int(img.image_arr.shape[3] / 2.0)
else:
x1, y1 = location
x0 = x1 - pixdist / 2.0 * N.sin(pa * N.pi / 180.0)
y0 = y1 - pixdist / 2.0 * N.cos(pa * N.pi / 180.0)
ra0, dec0 = img.pix2sky([x0, y0])
x2 = x1 + pixdist / 2.0 * N.sin(pa * N.pi / 180.0)
y2 = y1 + pixdist / 2.0 * N.cos(pa * N.pi / 180.0)
ra2, dec2 = img.pix2sky([x2, y2])
angdist12 = func.angsep(ra0, dec0, ra2, dec2) # degrees
return angdist12
class TempDir(str):
"""Container for temporary directory for image caching.
Directory is deleted when garbage collected/zero references """
def __del__(self):
import os
if os.path.exists(self.__str__()):
shutil.rmtree(self.__str__())
| 25,341 | 40.13961 | 119 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/spectralindex.py
|
"""Module Spectral index.
This module calculates spectral indices for Gaussians and sources for a multichannel cube.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from .image import Op
from . import mylogger
from copy import deepcopy as cp
from . import functions as func
from . import statusbar
class Op_spectralindex(Op):
"""Computes spectral index of every gaussian and every source.
First do a quick fit to all channels to determine whether averaging over
frequency is needed to obtain desired SNR (set by img.opts.specind_snr).
This averaging should be done separately for both Gaussians and
sources. For S and C sources, averaging only needs to be done once
(as the sources have only one Gaussian).
For M sources, averaging is needed twice: once to obtain the desired
SNR for the faintest Gaussian in the source, and once to obtain the
desired SNR for the source as a whole.
If averaging is needed for a given source, don't let the
number of resulting channels fall below 2. If it is not possible
to obtain the desired SNR in 2 or more channels, set spec_indx of
Gaussian/source to NaN.
"""
def __call__(self, img):
global bar1
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"SpectIndex")
img.mylog = mylog
if img.opts.spectralindex_do:
mylogger.userinfo(mylog, '\nExtracting spectral indices for all ch0 sources')
shp = img.image_arr.shape
if shp[1] > 1:
# calc freq, beam_spectrum for nchan channels
self.freq_beamsp_unav(img)
sbeam = img.beam_spectrum
freqin = img.freq
# calc initial channel flags if needed
iniflags = self.iniflag(img)
img.specind_iniflags = iniflags
good_chans = N.where(iniflags == False)
unav_image = img.image_arr[0][good_chans]
unav_freqs = freqin[good_chans]
nmax_to_avg = img.opts.specind_maxchan
nchan = unav_image.shape[0]
mylogger.userinfo(mylog, 'Number of channels remaining after initial flagging', str(nchan))
if nmax_to_avg == 0:
nmax_to_avg = nchan
# calculate the rms map of each unflagged channel
bar1 = statusbar.StatusBar('Determing rms for channels in image ..... : ', 0, nchan)
if img.opts.quiet == False:
bar1.start()
rms_spec = self.rms_spectrum(img, unav_image) # bar1 updated here
bar2 = statusbar.StatusBar('Calculating spectral indices for sources : ', 0, img.nsrc)
c_wts = img.opts.collapse_wt
snr_desired = img.opts.specind_snr
if img.opts.quiet == False and img.opts.verbose_fitting == False:
bar2.start()
for src in img.sources:
isl = img.islands[src.island_id]
isl_bbox = isl.bbox
# Fit each channel with ch0 Gaussian(s) of the source,
# allowing only the normalization to vary.
chan_images = unav_image[:, isl_bbox[0], isl_bbox[1]]
chan_rms = rms_spec[:, isl_bbox[0], isl_bbox[1]]
beamlist = img.beam_spectrum
unavg_total_flux, e_unavg_total_flux = self.fit_channels(img, chan_images, chan_rms, src, beamlist)
# Check for upper limits and mask. gaus_mask is array of (N_channels x N_gaussians)
# and is True if measured flux is upper limit. n_good_chan_per_gaus is array of N_gaussians
# that gives number of unmasked channels for each Gaussian.
gaus_mask, n_good_chan_per_gaus = self.mask_upper_limits(unavg_total_flux, e_unavg_total_flux, snr_desired)
# Average if needed and fit again
# First find flux of faintest Gaussian of source and use it to estimate rms_desired
gflux = []
for g in src.gaussians:
gflux.append(g.peak_flux)
rms_desired = min(gflux)/snr_desired
total_flux = unavg_total_flux
e_total_flux = e_unavg_total_flux
freq_av = unav_freqs
nchan = chan_images.shape[0]
nchan_prev = nchan
while min(n_good_chan_per_gaus) < 2 and nchan > 2:
avimages, beamlist, freq_av, crms_av = self.windowaverage_cube(chan_images, rms_desired, chan_rms,
c_wts, sbeam, freqin, nmax_to_avg=nmax_to_avg)
total_flux, e_total_flux = self.fit_channels(img, avimages, crms_av, src, beamlist)
gaus_mask, n_good_chan_per_gaus = self.mask_upper_limits(total_flux, e_total_flux, snr_desired)
nchan = avimages.shape[0]
if nchan == nchan_prev:
break
nchan_prev = nchan
rms_desired *= 0.8
# Now fit Gaussian fluxes to obtain spectral indices.
# Only fit if there are detections (at specified sigma threshold)
# in at least two bands. If not, don't fit and set spec_indx
# and error to NaN.
for ig, gaussian in enumerate(src.gaussians):
npos = len(N.where(total_flux[:, ig] > 0.0)[0])
if img.opts.verbose_fitting:
if img.opts.flagchan_snr:
print('Gaussian #%i : averaged to %i channels, of which %i meet SNR criterion' % (gaussian.gaus_num,
len(total_flux[:, ig]), n_good_chan_per_gaus[ig]))
else:
print('Gaussian #%i : averaged to %i channels, all of which will be used' % (gaussian.gaus_num,
len(total_flux[:, ig])))
if (img.opts.flagchan_snr and n_good_chan_per_gaus[ig] < 2) or npos < 2:
gaussian.spec_indx = N.NaN
gaussian.e_spec_indx = N.NaN
gaussian.spec_norm = N.NaN
gaussian.specin_flux = [N.NaN]
gaussian.specin_fluxE = [N.NaN]
gaussian.specin_freq = [N.NaN]
gaussian.specin_freq0 = N.NaN
else:
if img.opts.flagchan_snr:
good_fluxes_ind = N.where(gaus_mask[:, ig] == False)
else:
good_fluxes_ind = range(len(freq_av))
fluxes_to_fit = total_flux[:, ig][good_fluxes_ind]
e_fluxes_to_fit = e_total_flux[:, ig][good_fluxes_ind]
freqs_to_fit = freq_av[good_fluxes_ind]
fit_res = self.fit_specindex(freqs_to_fit, fluxes_to_fit, e_fluxes_to_fit)
gaussian.spec_norm, gaussian.spec_indx, gaussian.e_spec_indx = fit_res
gaussian.specin_flux = fluxes_to_fit.tolist()
gaussian.specin_fluxE = e_fluxes_to_fit.tolist()
gaussian.specin_freq = freqs_to_fit.tolist()
gaussian.specin_freq0 = N.median(freqs_to_fit)
# Next fit total source fluxes for spectral index.
if len(src.gaussians) > 1:
# First, check unaveraged SNRs for total source.
src_total_flux = N.zeros((chan_images.shape[0], 1))
src_e_total_flux = N.zeros((chan_images.shape[0], 1))
src_total_flux[:,0] = N.sum(unavg_total_flux, 1) # sum over all Gaussians in source to obtain total fluxes in each channel
src_e_total_flux[:,0] = N.sqrt(N.sum(N.power(e_unavg_total_flux, 2.0), 1))
src_mask, n_good_chan = self.mask_upper_limits(src_total_flux, src_e_total_flux, snr_desired)
# Average if needed and fit again
rms_desired = src.peak_flux_max/snr_desired
total_flux = unavg_total_flux
e_total_flux = e_unavg_total_flux
freq_av = unav_freqs
nchan = chan_images.shape[0]
nchan_prev = nchan
while n_good_chan < 2 and nchan > 2:
avimages, beamlist, freq_av, crms_av = self.windowaverage_cube(chan_images, rms_desired, chan_rms,
c_wts, sbeam, freqin, nmax_to_avg=nmax_to_avg)
total_flux, e_total_flux = self.fit_channels(img, avimages, crms_av, src, beamlist)
src_total_flux = N.sum(total_flux, 1) # sum over all Gaussians in source to obtain total fluxes in each channel
src_e_total_flux = N.sqrt(N.sum(N.power(e_total_flux, 2.0), 1))
src_mask, n_good_chan = self.mask_upper_limits(src_total_flux, src_e_total_flux, snr_desired)
nchan = avimages.shape[0]
if nchan == nchan_prev:
break
nchan_prev = nchan
rms_desired *= 0.8
# Now fit source for spectral index.
src_total_flux = src_total_flux.reshape((src_total_flux.shape[0],))
src_e_total_flux = src_e_total_flux.reshape((src_e_total_flux.shape[0],))
src_mask = src_mask.reshape((src_mask.shape[0],))
if img.opts.verbose_fitting:
if img.opts.flagchan_snr:
print('Source #%i : averaged to %i channels, of which %i meet SNR criterion' % (src.source_id,
len(src_total_flux), nchan))
else:
print('Source #%i : averaged to %i channels, all of which will be used' % (src.source_id,
len(src_total_flux)))
npos = len(N.where(src_total_flux > 0.0)[0])
if isinstance(n_good_chan, int):
n_good_chan = [n_good_chan]
if (img.opts.flagchan_snr and n_good_chan[0] < 2) or npos < 2:
src.spec_indx = N.NaN
src.e_spec_indx = N.NaN
src.spec_norm = N.NaN
src.specin_flux = [N.NaN]
src.specin_fluxE = [N.NaN]
src.specin_freq = [N.NaN]
src.specin_freq0 = N.NaN
else:
if img.opts.flagchan_snr:
good_fluxes_ind = N.where(src_mask == False)
else:
good_fluxes_ind = range(len(freq_av))
fluxes_to_fit = src_total_flux[good_fluxes_ind]
e_fluxes_to_fit = src_e_total_flux[good_fluxes_ind]
freqs_to_fit = freq_av[good_fluxes_ind]
# if len(freqs_to_fit.shape) == 2:
# freqs_to_fit = freqs_to_fit.reshape((freqs_to_fit.shape[0],))
# if len(fluxes_to_fit.shape) == 2:
# fluxes_to_fit = fluxes_to_fit.reshape((fluxes_to_fit.shape[0],))
# if len(e_fluxes_to_fit.shape) == 2:
# e_fluxes_to_fit = e_fluxes_to_fit.reshape((e_fluxes_to_fit.shape[0],))
fit_res = self.fit_specindex(freqs_to_fit, fluxes_to_fit, e_fluxes_to_fit)
src.spec_norm, src.spec_indx, src.e_spec_indx = fit_res
src.specin_flux = fluxes_to_fit.tolist()
src.specin_fluxE = e_fluxes_to_fit.tolist()
src.specin_freq = freqs_to_fit.tolist()
src.specin_freq0 = N.median(freqs_to_fit)
else:
src.spec_norm = src.gaussians[0].spec_norm
src.spec_indx = src.gaussians[0].spec_indx
src.e_spec_indx = src.gaussians[0].e_spec_indx
src.specin_flux = src.gaussians[0].specin_flux
src.specin_fluxE = src.gaussians[0].specin_fluxE
src.specin_freq = src.gaussians[0].specin_freq
src.specin_freq0 = src.gaussians[0].specin_freq0
if bar2.started:
bar2.increment()
if bar2.started:
bar2.stop()
img.completed_Ops.append('spectralindex')
else:
mylog.warning('Image has only one channel. Spectral index module disabled.')
img.opts.spectralindex_do = False
####################################################################################
def flagchans_rmschan(self, crms, zeroflags, iniflags, cutoff):
""" Calculate clipped rms (r1) of the rms as fn of channel, crms, with zeroflags
applied and kappa=cutoff. Then exclude crms=0 (for NaN mages etc) and get ch.s
which are more than cutoff*r1 away from median of rms. If this is less than 10 %
of all channels, flag them.
"""
# crms_rms and median dont include rms=0 channels
nchan = len(crms)
mean, rms, cmean, crms_rms, cnt = func.bstat(crms, zeroflags, cutoff)
zeroind = N.where(crms==0)[0]
median = N.median(N.delete(crms, zeroind))
badind = N.where(N.abs(N.delete(crms, zeroind) - median)/crms_rms >=cutoff)[0]
frac = len(badind)/(nchan - len(zeroind))
if frac <= 0.1:
badind = N.where(N.abs(crms - median)/crms_rms >=cutoff)[0]
iniflags[badind] = True
return iniflags
####################################################################################
def iniflag(self, img):
""" Calculate clipped rms of every channel, and then median and clipped rms of this rms distribution.
Exclude channels where rms=0 (all pixels 0 or blanked) and of the remaining, if outliers beyond 5 sigma
are less then 10 % of number of channels, flag them. This is done only when flagchan_rms = True.
If False, only rms=0 (meaning, entire channel image is zero or blanked) is flagged."""
image = img.image_arr
nchan = image.shape[1]
iniflags = N.zeros(nchan, bool)
zeroflags = N.zeros(nchan, bool)
crms = img.channel_clippedrms
# First, check whether user has specified any channels to flag
if img.opts.flagchan_list is not None:
for chan in img.opts.flagchan_list:
zeroflags[chan] = True
# Next, flag channels with rms = 0
for ichan in range(nchan):
if crms[ichan] == 0: zeroflags[ichan] = True
iniflags = cp(zeroflags)
# Lastly, flag outliers
if img.opts.flagchan_rms:
iniflags = self.flagchans_rmschan(crms, zeroflags, iniflags, 4.0)
return iniflags
####################################################################################
def freq_beamsp_unav(self, img):
""" Defines img.beam_spectrum and img.freq for the unaveraged cube. """
# Find the channel frequencies
shp = img.image_arr.shape
img.freq = N.zeros(shp[1])
crval, cdelt, crpix = img.freq_pars
if img.wcs_obj.wcs.spec == -1 and \
img.opts.frequency_sp is None:
raise RuntimeError("Frequency info not found in header "\
"and frequencies not specified by user")
else:
if img.opts.frequency_sp is None:
for ichan in range(shp[1]):
img.freq[ichan] = img.wcs_obj.p2f(ichan)
else:
if len(img.opts.frequency_sp) != shp[1]:
raise RuntimeError("Number of channels does not match number "\
"of frequencies specified by user")
for ichan in range(shp[1]):
img.freq[ichan] = img.opts.frequency_sp[ichan]
# Find the channel beam shapes
sbeam = img.opts.beam_spectrum
if sbeam is not None and len(sbeam) != shp[1]:
sbeam = None # sanity check
if sbeam is None:
sbeam = []
hdr = img.header
try:
# search for channel beams in the image header
for ichan in range(shp[1]):
sbeam.append((hdr['BMAJ{}'.format(ichan+1)],
hdr['BMIN{}'.format(ichan+1)],
hdr['BPA{}'.format(ichan+1)]))
except KeyError:
# Channel beam info not found. Use constant beam or one scaled with
# frequency
if img.opts.beam_sp_derive:
# Adjust channel beam sizes assuming that the beam scales as 1/nu
# Note: beam is (major, minor, pos. angle)
for ichan in range(shp[1]):
sbeam.append((img.beam[0] * img.freq[0] / img.freq[ichan],
img.beam[1] * img.freq[0] / img.freq[ichan],
img.beam[2]))
else:
sbeam = [img.beam] * shp[1]
img.beam_spectrum = sbeam
####################################################################################
def rms_spectrum(self, img, image):
from .rmsimage import Op_rmsimage
global bar1
mylog = img.mylog
nchan = image.shape[0]
rms_map = img.use_rms_map
if img.opts.kappa_clip is None:
kappa = -img.pixel_beamarea()
else:
kappa = img.opts.kappa_clip
map_opts = (kappa, img.rms_box, img.opts.spline_rank)
if rms_map:
rms_spec = N.zeros(image.shape, dtype=N.float32)
mean = N.zeros(image.shape[1:], dtype=N.float32)
rms = N.zeros(image.shape[1:], dtype=N.float32)
median_rms = N.zeros(nchan)
for ichan in range(nchan):
if bar1.started:
bar1.increment()
dumi = Op_rmsimage()
mask = N.isnan(image[ichan])
Op_rmsimage.map_2d(dumi, image[ichan], mean, rms, mask, *map_opts)
rms_spec[ichan,:,:] = rms
median_rms[ichan] = N.median(rms)
else:
rms_spec = N.zeros(image.shape, dtype=N.float32)
for ichan in range(nchan):
if bar1.started:
bar1.increment()
rms_spec[ichan,:,:] = img.channel_clippedrms[ichan]
median_rms = rms_spec
if bar1.started:
bar1.stop()
str1 = " ".join(["%9.4e" % n for n in img.channel_clippedrms])
if rms_map:
mylog.debug('%s %s ' % ('Median rms of channels : ', str1))
mylog.info('RMS image made for each channel')
else:
mylog.debug('%s %s ' % ('RMS of channels : ', str1))
mylog.info('Clipped rms calculated for each channel')
return rms_spec
####################################################################################
def fit_specindex(self, freqarr, fluxarr, efluxarr, do_log=False):
""" Fits spectral index to data.
do_log is True/False implies you fit spectral index in logFlux vs logFreq space or not."""
from . import functions as func
import math
x = freqarr
flux = fluxarr
eflux = efluxarr
f0 = N.median(x)
mask = N.zeros(len(fluxarr), dtype=bool)
nan_errors = N.isnan(efluxarr)
mask[nan_errors] = 1
if do_log:
x = N.log10(x/f0); y = N.log10(flux); sig = N.abs(eflux/flux)/2.303
funct = func.poly
else:
x = x/f0; y = flux; sig = eflux
funct = func.sp_in
spin, espin = func.fit_mask_1d(x, y, sig, mask, funct, do_err=True, order=1)
if do_log:
spin[0] = math.pow(10.0, spin[0])
espin[0] = spin[0]*math.log(10.0)*espin[0]
return spin[0], spin[1], espin[1]
########################################################################################
def windowaverage_cube(self, imagein, rms_desired, chanrms, c_wts, sbeam,
freqin, n_min=2, nmax_to_avg=10):
"""Average neighboring channels of cube to obtain desired rms in at least n_min channels
The clipped rms of each channel is compared to the desired rms. If the
clipped rms is too high, the channel is averaged with as many neighboring
channels as necessary to obtain at least the desired rms. This is done
until the number of OK channels is 2. The averaging is done first at
the frequency extremes, as the frequency range of the resulting averaged
flux array will be maximized.
For example, if the desired rms is 0.1 and the list of rms's is:
[0.2, 0.2, 0.3, 0.2, 0.2]
the resulting channels that will be averaged are:
[[0, 1], [2], [3, 4]]
"""
from math import sqrt
from .collapse import avspc_blanks
# chan_list is a list of lists of channels to average. E.g., if we have
# 5 channels and we want to average only the first 2:
# chan_list = [[0,1], [2], [3], [4]]
if len(chanrms.shape) ==3:
crms = N.mean(N.nanmean(chanrms, axis=1), axis=1)
else:
crms = chanrms
chan_list = self.get_avg_chan_list(rms_desired, crms, nmax_to_avg)
n_new = len(chan_list)
beamlist = []
crms_av = N.zeros(n_new)
freq_av = N.zeros(n_new)
imageout = N.zeros((n_new, imagein.shape[1], imagein.shape[2]), dtype=N.float32)
for ichan, avg_list in enumerate(chan_list):
if len(avg_list) > 1:
imageout[ichan], dum = avspc_blanks(avg_list, imagein, crms, c_wts)
chan_slice = slice(avg_list[0], avg_list[1]+1)
beamlist.append(tuple(N.mean(sbeam[chan_slice], axis=0)))
freq_av[ichan] = N.mean(freqin[chan_slice])
crms_av[ichan] = 1.0/sqrt(N.sum(1.0/crms[chan_slice]**2))
else:
imageout[ichan] = imagein[avg_list[0]]
beamlist.append(sbeam[avg_list[0]])
freq_av[ichan] = N.mean(freqin[avg_list[0]])
crms_av[ichan] = 1.0/sqrt(N.sum(1.0/crms[avg_list[0]]**2))
return imageout, beamlist, freq_av, crms_av
def get_avg_chan_list(self, rms_desired, chanrms, nmax_to_avg):
"""Returns a list of channels to average to obtain given rms_desired
in at least 2 channels"""
end = 0
chan_list = []
nchan = len(chanrms)
good_ind = N.where(N.array(chanrms)/rms_desired < 1.0)[0]
num_good = len(good_ind)
if num_good < 2:
# Average channels at start of list
rms_avg = chanrms[0]
while rms_avg > rms_desired:
end += 1
chan_slice = slice(0, end)
rms_avg = 1.0/N.sqrt(N.sum(1.0/N.array(chanrms)[chan_slice]**2))
if end == nchan or end == nmax_to_avg:
break
if end == 0:
end = 1
chan_list.append(range(end))
if end == nchan:
# This means all channels are averaged into one. If this happens,
# instead average first half and second half to get two channels
# and return.
chan_list = [range(0, int(float(nchan)/2.0)), range(int(float(nchan)/2.0), nchan)]
return chan_list
# Average channels at end of list
rms_avg = chanrms[-1]
end = nchan
start = nchan
while rms_avg > rms_desired:
start -= 1
chan_slice = slice(start, end)
rms_avg = 1.0/N.sqrt(N.sum(1.0/chanrms[chan_slice]/chanrms[chan_slice]))
if end-start == nmax_to_avg:
break
if start <= max(chan_list[0]):
# This means we cannot get two averaged channels with desired rms,
# so just average remaining channels
chan_list.append(range(max(chan_list[0]), nchan))
else:
# First append any channels between those averaged at the start
# and those at the end
for i in range(max(chan_list[0])+1, start):
chan_list.append([i])
if start < end:
chan_list.append(range(start, end))
else:
# No averaging needed
for i in range(nchan):
chan_list.append([i])
return chan_list
def fit_channels(self, img, chan_images, clip_rms, src, beamlist):
"""Fits normalizations of Gaussians in source to multiple channels
If unresolved, the size of the Gaussians are adjusted to match the
channel's beam size (given by beamlist) before fitting.
Returns array of total fluxes (N_channels x N_Gaussians) and array
of errors (N_channels x N_Gaussians).
"""
from . import functions as func
from .const import fwsig
isl = img.islands[src.island_id]
isl_bbox = isl.bbox
nchan = chan_images.shape[0]
x, y = N.mgrid[isl_bbox]
gg = src.gaussians
fitfix = N.ones(len(gg)) # fit only normalization
srcmask = isl.mask_active
total_flux = N.zeros((nchan, len(fitfix))) # array of fluxes: N_channels x N_Gaussians
errors = N.zeros((nchan, len(fitfix))) # array of fluxes: N_channels x N_Gaussians
for cind in range(nchan):
image = chan_images[cind]
gg_adj = self.adjust_size_by_freq(img.beam, beamlist[cind], gg)
p, ep = func.fit_mulgaus2d(image, gg_adj, x, y, srcmask, fitfix, adj=True)
pbeam = img.beam2pix(beamlist[cind])
bm_pix = (pbeam[0]/fwsig, pbeam[1]/fwsig, pbeam[2]) # IN SIGMA UNITS
for ig in range(len(fitfix)):
total_flux[cind, ig] = p[ig*6]*p[ig*6+3]*p[ig*6+4]/(bm_pix[0]*bm_pix[1])
p = N.insert(p, N.arange(len(fitfix))*6+6, total_flux[cind])
rms_isl = N.nanmean(clip_rms[cind])
if N.isnan(rms_isl):
# If the channel rms is all NaNs, use the average rms value over all
# channels instead
rms_isl = N.nanmean(clip_rms)
if not N.isnan(rms_isl):
errors[cind] = func.get_errors(img, p, rms_isl, bm_pix=(bm_pix[0]*fwsig, bm_pix[1]*fwsig, bm_pix[2]))[6]
self.reset_size(gg)
return total_flux, errors
def adjust_size_by_freq(self, beam_ch0, beam, gg):
"""Adjust size of unresolved Gaussians to match the channel's beam size"""
gg_adj = []
for g in gg:
g.size_pix_adj = g.size_pix[:]
if g.deconv_size_sky[0] == 0.0:
g.size_pix_adj[0] *= beam[0] / beam_ch0[0]
if g.deconv_size_sky[1] == 0.0:
g.size_pix_adj[1] *= beam[1] / beam_ch0[1]
gg_adj.append(g)
return gg_adj
def reset_size(self, gg):
"""Reset size of unresolved Gaussians to match the ch0 beam size"""
for g in gg:
if hasattr(g, 'size_pix_adj'): del g.size_pix_adj
def mask_upper_limits(self, total_flux, e_total_flux, threshold):
"""Returns mask of upper limits"""
mask = N.zeros(total_flux.shape, dtype=bool)
if len(total_flux.shape) == 1:
is_src = True
ndet = 0
ncomp = 1
else:
is_src = False
ndet = N.zeros((total_flux.shape[1]), dtype=int)
ncomp = len(ndet)
for ig in range(ncomp):
for ichan in range(total_flux.shape[0]):
if is_src:
meas_flux = total_flux[ichan]
e_meas_flux = e_total_flux[ichan]
else:
meas_flux = total_flux[ichan, ig]
e_meas_flux = e_total_flux[ichan, ig]
if meas_flux < threshold * e_meas_flux:
# Upper limit
if is_src:
mask[ichan] = True
else:
mask[ichan, ig] = True
else:
# Detection
if is_src:
ndet += 1
mask[ichan] = False
else:
ndet[ig] += 1
mask[ichan, ig] = False
return mask, ndet
| 29,798 | 46.602236 | 146 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/image.py
|
"""Module image.
Instances of class Image are a primary data-holders for all PyBDSF
operations. They store the image itself together with some meta-information
(such as headers), options for processing modules and all data generated during
processing. A few convenience methods are also defined here for interactive
use: to allow viewing and output of the most important data, to allow listing
and setting of options, and to allow re-processing of Images (these methods are
used by the interactive IPython shell made by pybdsf).
This module also defines class Op, which is used as a base class for all PyBDSF
operations.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from .opts import *
class Image(object):
"""Image is a primary data container for PyBDSF.
All the run-time data (such as image data, mask, etc.)
is stored here. A number of type-checked properties
are defined for the most basic image attributes, such
as image data, mask, header, user options.
To allow transparent caching of large image data to disk,
the image data must be stored in attributes ending in
"_arr". Additionally, setting subarrays does not work
using the attributes directly (e.g., img.ch0_arr[0:100,0:100]
= 0.0 will not work). Instead, set the subarray values then set
the attribute (e.g., ch0[0:100,0:100] = 0.0; img.ch0_arr = ch0).
There is little sense in declaring all possible attributes
right here as it will introduce unneeded dependencies
between modules, thus most other attributes (like island lists,
gaussian lists, etc) are inserted at run-time by the specific
PyBDSF modules.
"""
def __init__(self, opts):
self._prev_opts = None
self.extraparams = {}
self.masked = False
self.completed_Ops = []
self.waveletimage = False
self._pi = False
self.do_cache = False
self.bbspatchnum = 0
self.blankpix = 0
self.use_io = ''
self.j = 0
self.freq_pars = [0.0, 0.0, 0.0]
self.filename = ''
self.logfilename = ''
self.resid_gaus_arr = None
self._is_interactive_shell = False
self.opts = Opts(opts)
def __setstate__(self, state):
"""Needed for multiprocessing"""
self.thresh_pix = state['thresh_pix']
self.minpix_isl = state['minpix_isl']
self.clipped_mean = state['clipped_mean']
def __getstate__(self):
"""Needed for multiprocessing"""
state = {}
state['thresh_pix'] = self.thresh_pix
state['minpix_isl'] = self.minpix_isl
state['clipped_mean'] = self.clipped_mean
return state
def __getattribute__(self, name):
from . import functions as func
if name.endswith("_arr"):
if self.do_cache:
map_data = func.retrieve_map(self, name)
if map_data is not None:
return map_data
else:
return object.__getattribute__(self, name)
else:
return object.__getattribute__(self, name)
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
from . import functions as func
if hasattr(self, 'do_cache'):
if self.do_cache and name.endswith("_arr") and isinstance(value, N.ndarray):
func.store_map(self, name, value)
else:
super(Image, self).__setattr__(name, value)
else:
super(Image, self).__setattr__(name, value)
def __delattr__(self, name):
from . import functions as func
if self.do_cache and name.endswith("_arr"):
func.del_map(self, name)
else:
super(Image, self).__delattr__(name)
def get_map(self, map_name):
"""Returns requested map."""
from . import functions as func
if self.do_cache:
map_data = func.retrieve_map(self, map_name)
else:
map_data = getattr(self, map_name)
return map_data
def put_map(self, map_name, map_data):
"""Stores requested map."""
from . import functions as func
if self.do_cache:
func.store_map(self, map_name, map_data)
else:
setattr(self, map_name, map_data)
def list_pars(self):
"""List parameter values."""
from . import interface
interface.list_pars(self)
def set_pars(self, **kwargs):
"""Set parameter values."""
from . import interface
interface.set_pars(self, **kwargs)
def process(self, **kwargs):
"""Process Image object"""
from . import interface
success = interface.process(self, **kwargs)
return success
def save_pars(self, savefile=None):
"""Save parameter values."""
from . import interface
interface.save_pars(self, savefile)
def load_pars(self, loadfile=None):
"""Load parameter values."""
from . import interface
import os
if loadfile is None or loadfile == '':
loadfile = self.opts.filename + '.pybdsf.sav'
if os.path.exists(loadfile):
timg, err = interface.load_pars(loadfile)
if timg is not None:
orig_filename = self.opts.filename
self.opts = timg.opts
self.opts.filename = orig_filename # reset filename to original
else:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: '"+\
loadfile+"' is not a valid parameter save file.")
else:
raise RuntimeError(str(err))
else:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: File '"+\
loadfile+"' not found.")
else:
raise RuntimeError('File not found')
def show_fit(self, **kwargs):
"""Show results of the fit."""
from . import plotresults
if not hasattr(self, 'nisl'):
print('Image has not been processed. Please run process_image first.')
return False
plotresults.plotresults(self, **kwargs)
return True
def export_image(self, **kwargs):
"""Export an internal image to a file."""
from . import interface
try:
result = interface.export_image(self, **kwargs)
return result
except RuntimeError as err:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: " + str(err))
else:
raise RuntimeError(str(err))
def write_catalog(self, **kwargs):
"""Write the Gaussian, source, or shapelet list to a file"""
from . import interface
try:
result = interface.write_catalog(self, **kwargs)
return result
except RuntimeError as err:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: " + str(err))
else:
raise RuntimeError(str(err))
class Op(object):
"""Common base class for all PyBDSF operations.
At the moment this class is empty and only defines placeholder
for method __call__, which should be redefined in all derived
classes.
"""
def __call__(self, img):
raise NotImplementedError("This method should be redefined")
| 7,542 | 34.580189 | 88 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/wavelet_atrous.py
|
"""Compute a-trous wavelet transform of the gaussian residual image.
Do source extraction on this if asked.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from .image import *
from . import mylogger
import os
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
from math import log, floor, sqrt
from .const import fwsig
from copy import deepcopy as cp
from . import functions as func
import gc
from numpy import array, product
import scipy.signal
from .preprocess import Op_preprocess
from .rmsimage import Op_rmsimage
from .threshold import Op_threshold
from .islands import Op_islands
from .gausfit import Op_gausfit, Gaussian
from .gaul2srl import Op_gaul2srl
from .make_residimage import Op_make_residimage
from .interface import raw_input_no_history
from . import statusbar
try:
import pyfftw.interfaces
pyfftw.interfaces.cache.enable()
N.fft.fftn = pyfftw.interfaces.numpy_fft.fftn
N.fft.ifftn = pyfftw.interfaces.numpy_fft.ifftn
scipy.signal.signaltools.fftn = pyfftw.interfaces.scipy_fftpack.fftn
scipy.signal.signaltools.ifftn = pyfftw.interfaces.scipy_fftpack.ifftn
has_pyfftw = True
except ImportError:
has_pyfftw = False
class Op_wavelet_atrous(Op):
"""Compute a-trous wavelet transform of the gaussian residual image."""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSF." + img.log + "Wavelet")
if img.opts.atrous_do:
if img.nisl == 0:
mylog.warning("No islands found. Skipping wavelet decomposition.")
img.completed_Ops.append('wavelet_atrous')
return
mylog.info("Decomposing gaussian residual image into a-trous wavelets")
bdir = img.basedir + '/wavelet/'
if img.opts.output_all:
if not os.path.isdir(bdir): os.makedirs(bdir)
if not os.path.isdir(bdir + '/residual/'): os.makedirs(bdir + '/residual/')
if not os.path.isdir(bdir + '/model/'): os.makedirs(bdir + '/model/')
dobdsm = img.opts.atrous_bdsm_do
filter = {'tr':{'size':3, 'vec':[1. / 4, 1. / 2, 1. / 4], 'name':'Triangle'},
'b3':{'size':5, 'vec':[1. / 16, 1. / 4, 3. / 8, 1. / 4, 1. / 16], 'name':'B3 spline'}}
if dobdsm: wchain, wopts = self.setpara_bdsm(img)
n, m = img.ch0_arr.shape
# Calculate residual image that results from normal (non-wavelet) Gaussian fitting
Op_make_residimage()(img)
resid = img.resid_gaus_arr
lpf = img.opts.atrous_lpf
if lpf not in ['b3', 'tr']: lpf = 'b3'
jmax = img.opts.atrous_jmax
l = len(filter[lpf]['vec']) # 1st 3 is arbit and 2nd 3 is whats expected for a-trous
if jmax < 1 or jmax > 15: # determine jmax
# Check if largest island size is
# smaller than 1/3 of image size. If so, use it to determine jmax.
min_size = min(resid.shape)
max_isl_shape = (0, 0)
for isl in img.islands:
if isl.image.shape[0] * isl.image.shape[1] > max_isl_shape[0] * max_isl_shape[1]:
max_isl_shape = isl.image.shape
if max_isl_shape != (0, 0) and min(max_isl_shape) < min(resid.shape) / 3.0:
min_size = min(max_isl_shape) * 4.0
else:
min_size = min(resid.shape)
jmax = int(floor(log((min_size / 3.0 * 3.0 - l) / (l - 1) + 1) / log(2.0) + 1.0)) + 1
if min_size * 0.55 <= (l + (l - 1) * (2 ** (jmax) - 1)): jmax = jmax - 1
img.wavelet_lpf = lpf
img.wavelet_jmax = jmax
mylog.info("Using " + filter[lpf]['name'] + ' filter with J_max = ' + str(jmax))
img.atrous_islands = []
img.atrous_gaussians = []
img.atrous_sources = []
img.atrous_opts = []
img.resid_wavelets_arr = cp(img.resid_gaus_arr)
im_old = img.resid_wavelets_arr
total_flux = 0.0
ntot_wvgaus = 0
stop_wav = False
pix_masked = N.where(N.isnan(resid) == True)
jmin = 1
if img.opts.ncores is None:
numcores = 1
else:
numcores = img.opts.ncores
for j in range(jmin, jmax + 1): # extra +1 is so we can do bdsm on cJ as well
mylogger.userinfo(mylog, "\nWavelet scale #" + str(j))
im_new = self.atrous(im_old, filter[lpf]['vec'], lpf, j, numcores=numcores, use_scipy_fft=img.opts.use_scipy_fft)
im_new[pix_masked] = N.nan # since fftconvolve wont work with blanked pixels
if img.opts.atrous_sum:
w = im_new
else:
w = im_old - im_new
im_old = im_new
suffix = 'w' + repr(j)
filename = img.imagename + '.atrous.' + suffix + '.fits'
if img.opts.output_all:
func.write_image_to_file('fits', filename, w, img, bdir)
mylog.info('%s %s' % ('Wrote ', img.imagename + '.atrous.' + suffix + '.fits'))
# now do bdsm on each wavelet image.
if dobdsm:
wopts['filename'] = filename
wopts['basedir'] = bdir
box = img.rms_box[0]
y1 = (l + (l - 1) * (2 ** (j - 1) - 1))
bs = max(5 * y1, box) # changed from 10 to 5
if bs > min(n, m) / 2:
wopts['rms_map'] = False
wopts['mean_map'] = 'const'
wopts['rms_box'] = None
else:
wopts['rms_box'] = (bs, bs/3)
if hasattr(img, '_adapt_rms_isl_pos'):
bs_bright = max(5 * y1, img.rms_box_bright[0])
if bs_bright < bs/1.5:
wopts['adaptive_rms_box'] = True
wopts['rms_box_bright'] = (bs_bright, bs_bright/3)
else:
wopts['adaptive_rms_box'] = False
if j <= 3:
wopts['ini_gausfit'] = 'default'
else:
wopts['ini_gausfit'] = 'nobeam'
wid = (l + (l - 1) * (2 ** (j - 1) - 1))# / 3.0
b1, b2 = img.pixel_beam()[0:2]
b1 = b1 * fwsig
b2 = b2 * fwsig
cdelt = img.wcs_obj.acdelt[:2]
wimg = Image(wopts)
wimg.beam = (sqrt(wid * wid + b1 * b1) * cdelt[0] * 2.0, sqrt(wid * wid + b2 * b2) * cdelt[1] * 2.0, 0.0)
wimg.orig_beam = img.beam
wimg.pixel_beam = img.pixel_beam
wimg.pixel_beamarea = img.pixel_beamarea
wimg.log = 'Wavelet.'
wimg.basedir = img.basedir
wimg.extraparams['bbsprefix'] = suffix
wimg.extraparams['bbsname'] = img.imagename + '.wavelet'
wimg.extraparams['bbsappend'] = True
wimg.bbspatchnum = img.bbspatchnum
wimg.waveletimage = True
wimg.j = j
wimg.indir = img.indir
if hasattr(img, '_adapt_rms_isl_pos'):
wimg._adapt_rms_isl_pos = img._adapt_rms_isl_pos
self.init_image_simple(wimg, img, w, '.atrous.' + suffix)
for op in wchain:
op(wimg)
gc.collect()
if isinstance(op, Op_islands) and img.opts.atrous_orig_isl:
if wimg.nisl > 0:
# Find islands that do not share any pixels with
# islands in original ch0 image.
good_isl = []
# Make original rank image boolean; rank counts from 0, with -1 being
# outside any island
orig_rankim_bool = N.array(img.pyrank + 1, dtype = bool)
# Multiply rank images
old_islands = orig_rankim_bool * (wimg.pyrank + 1) - 1
# Exclude islands that don't overlap with a ch0 island.
valid_ids = set(old_islands.flatten())
for idx, wvisl in enumerate(wimg.islands):
if idx in valid_ids:
wvisl.valid = True
good_isl.append(wvisl)
else:
wvisl.valid = False
wimg.islands = good_isl
wimg.nisl = len(good_isl)
mylogger.userinfo(mylog, "Number of islands found", '%i' %
wimg.nisl)
# Renumber islands:
for wvindx, wvisl in enumerate(wimg.islands):
wvisl.island_id = wvindx
if isinstance(op, Op_gausfit):
# If opts.atrous_orig_isl then exclude Gaussians outside of
# the original ch0 islands
nwvgaus = 0
if img.opts.atrous_orig_isl:
gaul = wimg.gaussians
tot_flux = 0.0
if img.ngaus == 0:
gaus_id = -1
else:
gaus_id = img.gaussians[-1].gaus_num
for g in gaul:
if not hasattr(g, 'valid'):
g.valid = False
if not g.valid:
try:
isl_id = img.pyrank[int(g.centre_pix[0] + 1), int(g.centre_pix[1] + 1)]
except IndexError:
isl_id = -1
if isl_id >= 0:
isl = img.islands[isl_id]
gcenter = (int(g.centre_pix[0] - isl.origin[0]),
int(g.centre_pix[1] - isl.origin[1]))
if not isl.mask_active[gcenter]:
gaus_id += 1
gcp = Gaussian(img, g.parameters[:], isl.island_id, gaus_id)
gcp.gaus_num = gaus_id
gcp.wisland_id = g.island_id
gcp.jlevel = j
g.valid = True
isl.gaul.append(gcp)
isl.ngaus += 1
img.gaussians.append(gcp)
nwvgaus += 1
tot_flux += gcp.total_flux
else:
g.valid = False
g.jlevel = 0
else:
g.valid = False
g.jlevel = 0
vg = []
for g in wimg.gaussians:
if g.valid:
vg.append(g)
wimg.gaussians = vg
mylogger.userinfo(mylog, "Number of valid wavelet Gaussians", str(nwvgaus))
else:
# Keep all Gaussians and merge islands that overlap
tot_flux = check_islands_for_overlap(img, wimg)
# Now renumber the islands and adjust the rank image before going to next wavelet image
renumber_islands(img)
total_flux += tot_flux
if img.opts.interactive and has_pl:
dc = '\033[34;1m'
nc = '\033[0m'
print(dc + '--> Displaying islands and rms image...' + nc)
if max(wimg.ch0_arr.shape) > 4096:
print(dc + '--> Image is large. Showing islands only.' + nc)
wimg.show_fit(rms_image=False, mean_image=False, ch0_image=False,
ch0_islands=True, gresid_image=False, sresid_image=False,
gmodel_image=False, smodel_image=False, pyramid_srcs=False)
else:
wimg.show_fit()
prompt = dc + "Press enter to continue or 'q' stop fitting wavelet images : " + nc
answ = raw_input_no_history(prompt)
while answ != '':
if answ == 'q':
img.wavelet_jmax = j
stop_wav = True
break
answ = raw_input_no_history(prompt)
if len(wimg.gaussians) > 0:
img.resid_wavelets_arr = self.subtract_wvgaus(img.opts, img.resid_wavelets_arr, wimg.gaussians, wimg.islands)
if img.opts.atrous_sum:
im_old = self.subtract_wvgaus(img.opts, im_old, wimg.gaussians, wimg.islands)
if stop_wav == True:
break
pyrank = N.zeros(img.pyrank.shape, dtype=N.int32)
for i, isl in enumerate(img.islands):
isl.island_id = i
for g in isl.gaul:
g.island_id = i
for dg in isl.dgaul:
dg.island_id = i
pyrank[tuple(isl.bbox)] += N.invert(isl.mask_active) * (i + 1)
pyrank -= 1 # align pyrank values with island ids and set regions outside of islands to -1
img.pyrank = pyrank
img.ngaus += ntot_wvgaus
img.total_flux_gaus += total_flux
mylogger.userinfo(mylog, "Total flux density in model on all scales" , '%.3f Jy' % img.total_flux_gaus)
if img.opts.output_all:
func.write_image_to_file('fits', img.imagename + '.atrous.cJ.fits',
im_new, img, bdir)
mylog.info('%s %s' % ('Wrote ', img.imagename + '.atrous.cJ.fits'))
func.write_image_to_file('fits', img.imagename + '.resid_wavelets.fits',
(img.ch0_arr - img.resid_gaus_arr + img.resid_wavelets_arr), img, bdir + '/residual/')
mylog.info('%s %s' % ('Wrote ', img.imagename + '.resid_wavelets.fits'))
func.write_image_to_file('fits', img.imagename + '.model_wavelets.fits',
(img.resid_gaus_arr - img.resid_wavelets_arr), img, bdir + '/model/')
mylog.info('%s %s' % ('Wrote ', img.imagename + '.model_wavelets.fits'))
img.completed_Ops.append('wavelet_atrous')
#######################################################################################################
def atrous(self, image, filtvec, lpf, j, numcores=1, use_scipy_fft=True):
ff = filtvec[:]
for i in range(1, len(filtvec)):
ii = 1 + (2 ** (j - 1)) * (i - 1)
ff[ii:ii] = [0] * (2 ** (j - 1) - 1)
kern = N.outer(ff, ff)
unmasked = N.nan_to_num(image)
if use_scipy_fft:
im_new = scipy.signal.fftconvolve(unmasked, kern, mode = 'same')
else:
im_new = fftconvolve(unmasked, kern, mode = 'same', pad_to_power_of_two=False, numcores=numcores)
if im_new.shape != image.shape:
im_new = im_new[0:image.shape[0], 0:image.shape[1]]
return im_new
#######################################################################################################
def setpara_bdsm(self, img):
chain = [Op_preprocess, Op_rmsimage(), Op_threshold(), Op_islands(),
Op_gausfit(), Op_gaul2srl(), Op_make_residimage()]
opts = {'thresh':'hard'}
opts['thresh_pix'] = img.thresh_pix
opts['kappa_clip'] = 3.0
opts['rms_map'] = img.opts.rms_map
opts['mean_map'] = img.opts.mean_map
opts['thresh_isl'] = img.opts.thresh_isl
opts['minpix_isl'] = 6
opts['savefits_rmsim'] = False
opts['savefits_meanim'] = False
opts['savefits_rankim'] = False
opts['savefits_normim'] = False
opts['polarisation_do'] = False
opts['aperture'] = None
opts['group_by_isl'] = img.opts.group_by_isl
opts['quiet'] = img.opts.quiet
opts['ncores'] = img.opts.ncores
opts['flag_smallsrc'] = False
opts['flag_minsnr'] = 0.2
opts['flag_maxsnr'] = 1.2
opts['flag_maxsize_isl'] = 2.5
opts['flag_bordersize'] = 0
opts['flag_maxsize_bm'] = 50.0
opts['flag_minsize_bm'] = 0.2
opts['flag_maxsize_fwhm'] = 0.5
opts['bbs_patches'] = img.opts.bbs_patches
opts['filename'] = ''
opts['output_all'] = img.opts.output_all
opts['verbose_fitting'] = img.opts.verbose_fitting
opts['split_isl'] = False
opts['peak_fit'] = True
opts['peak_maxsize'] = 30.0
opts['detection_image'] = ''
opts['verbose_fitting'] = img.opts.verbose_fitting
ops = []
for op in chain:
if isinstance(op, type):
ops.append(op())
else:
ops.append(op)
return ops, opts
#######################################################################################################
def init_image_simple(self, wimg, img, w, name):
wimg.ch0_arr = w
wimg.ch0_Q_arr = None
wimg.ch0_U_arr = None
wimg.ch0_V_arr = None
wimg.wcs_obj = img.wcs_obj
wimg.parentname = img.filename
wimg.filename = img.filename + name
wimg.imagename = img.imagename + name + '.pybdsf'
wimg.pix2sky = img.pix2sky
wimg.sky2pix = img.sky2pix
wimg.pix2beam = img.pix2beam
wimg.beam2pix = img.beam2pix
wimg.pix2gaus = img.pix2gaus
wimg.gaus2pix = img.gaus2pix
wimg.pix2coord = img.pix2coord
wimg.masked = img.masked
wimg.mask_arr = img.mask_arr
wimg.use_io = img.use_io
wimg.do_cache = img.do_cache
wimg.tempdir = img.tempdir
wimg.shape = img.shape
wimg.frequency = img.frequency
wimg.equinox = img.equinox
wimg.use_io = 'fits'
######################################################################################################
def subtract_wvgaus(self, opts, residim, gaussians, islands):
from . import functions as func
from .make_residimage import Op_make_residimage as opp
dummy = opp()
shape = residim.shape
thresh = opts.fittedimage_clip
for g in gaussians:
if g.valid:
C1, C2 = g.centre_pix
if hasattr(g, 'wisland_id'):
isl = islands[g.wisland_id]
else:
isl = islands[g.island_id]
b = opp.find_bbox(dummy, thresh * isl.rms, g)
bbox = N.s_[max(0, int(C1 - b)):min(shape[0], int(C1 + b + 1)),
max(0, int(C2 - b)):min(shape[1], int(C2 + b + 1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
residim[bbox] = residim[bbox] - ffimg
return residim
#######################################################################################################
def morphfilter_pyramid(self, img, bdir):
from math import ceil, floor
jmax = img.wavelet_jmax
ind = [i for i, isl in enumerate(img.atrous_islands) if len(isl) > 0]
ind.reverse()
lpyr = []
img.npyrsrc = -1
if len(ind) > 0 :
for i in ind:
isls = img.atrous_islands[i]
for isl in isls:
if i != ind[0]:
dumr = []
for pyrsrc in lpyr:
belongs = pyrsrc.belongs(img, isl)
if belongs: dumr.append(pyrsrc.pyr_id)
#if len(dumr) > 1:
# raise RuntimeError("Source in lower wavelet level belongs to more than one higher level.")
if len(dumr) == 1:
dumr = dumr[0]
pyrsrc = lpyr[dumr]
pyrsrc.add_level(img, i, isl)
else:
pyrsrc = Pyramid_source(img, isl, i)
lpyr.append(pyrsrc)
else:
pyrsrc = Pyramid_source(img, isl, i)
lpyr.append(pyrsrc)
img.pyrsrcs = lpyr
if img.opts.plot_pyramid and has_pl:
pl.figure()
a = ceil(sqrt(jmax)); b = floor(jmax / a)
if a * b < jmax: b += 1
colours = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
sh = img.ch0_arr.shape
for pyr in img.pyrsrcs:
for iisl, isl in enumerate(pyr.islands):
jj = pyr.jlevels[iisl]
col = colours[pyr.pyr_id % 7]
pl.subplot(a, b, jj)
ind = N.where(~isl.mask_active)
pl.plot(ind[0] + isl.origin[0], ind[1] + isl.origin[1], '.', color = col)
pl.axis([0.0, sh[0], 0.0, sh[1]])
pl.title('J = ' + str(jj))
pl.savefig(bdir + img.imagename + '.pybdsf.atrous.pyramidsrc.png')
#######################################################################################################
class Pyramid_source(object):
""" Pyramid_source is a source constructed out of multiple wavelet transform images. """
def __init__(self, img, island, level0):
img.npyrsrc = img.npyrsrc + 1
self.pyr_id = img.npyrsrc
self.islands = [island]
self.jlevels = [level0]
def belongs(self, img, isl):
from . import functions as func
# get centroid of island (as integer)
mom = func.momanalmask_gaus(isl.image, isl.mask_active, 0, 1.0, False)
cen = N.array(mom[1:3]) + isl.origin
belong = False
# check if lies within any island of self
for i, pyrisl in enumerate(self.islands):
if N.sum([pyrisl.bbox[j].start <= cen[j] < pyrisl.bbox[j].stop for j in range(2)]) == 2:
pix = tuple([cen[j] - pyrisl.origin[j] for j in range(2)])
if not pyrisl.mask_active[pix]:
belong = True
return belong
def add_level(self, img, level, isl):
self.islands.append(isl)
self.jlevels.append(level + 1)
Image.pyrsrcs = List(tInstance(Pyramid_source), doc = "List of Pyramidal sources")
def fftconvolve(in1, in2, mode="full", pad_to_power_of_two=True, numcores=1):
"""Convolve two N-dimensional arrays using FFT. See convolve.
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (N.issubdtype(in1.dtype, N.complex) or
N.issubdtype(in2.dtype, N.complex))
size = s1 + s2 - 1
if pad_to_power_of_two:
# Use 2**n-sized FFT; it might improve performance
fsize = 2 ** N.ceil(N.log2(size))
else:
# Padding to a power of two might degrade performance, too
fsize = size
if has_pyfftw:
IN1 = N.fft.fftn(in1, fsize, threads=numcores)
IN1 *= N.fft.fftn(in2, fsize, threads=numcores)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = N.fft.ifftn(IN1, threads=numcores)[fslice].copy()
else:
IN1 = N.fft.fftn(in1, fsize)
IN1 *= N.fft.fftn(in2, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = N.fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1, axis=0) > product(s2, axis=0):
osize = s1
else:
osize = s2
return func.centered(ret, osize)
elif mode == "valid":
return func.centered(ret, abs(s2 - s1) + 1)
def rebase_bbox(box,minv):
# return a new bounding box tuple where minv is subtracted from
# all the co-ordinate values
nbox=[]
for i,sl in enumerate(box):
nbox.append(slice(sl.start-minv[i],sl.stop-minv[i],None))
return tuple(nbox)
def merge_bbox(box1,box2):
# For two bounding box tuples find the minimal n-dimensional space
# that encompasses both structures and make new bounding boxes in
# this co-ordinate system
minv=[]
maxv=[]
for sl1,sl2 in zip(box1,box2):
minv.append(min(sl1.start,sl2.start))
maxv.append(max(sl1.stop,sl2.stop))
nbox1=rebase_bbox(box1,minv)
nbox2=rebase_bbox(box2,minv)
dims=[y-x for x,y in zip(minv,maxv)]
fullbox=[slice(x,y,None) for x,y in zip(minv,maxv)]
return dims,nbox1,nbox2,N.array(minv),fullbox
def merge_islands(img, isl1, isl2):
"""Merge two islands into one
Final island has island_id of isl1. The Gaussians from isl2 are appended
those in the isl1 list, with numbering starting from the last number in
img.gaussians (which is also updated with the isl2 Gaussians).
The merged island replaces isl1 in img.
"""
from .islands import Island
import scipy.ndimage as nd
shape,nbox1,nbox2,origin,fullbox=merge_bbox(isl1.bbox,isl2.bbox)
mask1 = N.zeros(shape, dtype=bool)
mask1[nbox1] = ~isl1.mask_active
mask2 = N.zeros(shape, dtype=bool)
mask2[nbox2] = ~isl2.mask_active
overlap_mask = N.logical_and(mask1, mask2)
if N.any(overlap_mask):
full_mask = N.logical_or(mask1, mask2)
image = img.ch0_arr
mask = img.mask_arr
rms = img.rms_arr
mean = img.mean_arr
rank = len(image.shape)
connectivity = nd.generate_binary_structure(rank, rank)
labels, count = nd.label(full_mask, connectivity)
slices = nd.find_objects(labels)
bbox = slices[0]
new_bbox = rebase_bbox(bbox,-origin)
idx = isl1.island_id
# labels array passed to Island must be capable of being
# indexed by new bounding box, so convert. Do the subtraction
# first to avoid an expensive operation over the whole array
labels = labels-1+idx
new_labels = N.zeros(image.shape)
new_labels[tuple(fullbox)]=labels
beamarea = img.pixel_beamarea()
merged_isl = Island(image, mask, mean, rms, new_labels, new_bbox, idx, beamarea)
# Add all the Gaussians to the merged island
merged_isl.gaul = isl1.gaul
merged_isl.dgaul = isl1.dgaul
copy_gaussians(img, merged_isl, isl2)
img.islands[idx] = merged_isl
def copy_gaussians(img, isl1, isl2):
"""Copies Gaussians from isl2 to isl1
img.gaussians is also updated
"""
if img.ngaus == 0:
gaus_id = -1
else:
gaus_id = img.gaussians[-1].gaus_num
for g in isl2.gaul:
gaus_id += 1
gcp = Gaussian(img, g.parameters[:], isl1.island_id, gaus_id)
gcp.gaus_num = gaus_id
gcp.jlevel = g.jlevel
isl1.gaul.append(gcp)
img.ngaus += 1
img.gaussians.append(gcp)
def renumber_islands(img):
"""Renumbers island_ids (after, e.g., removing one)
Also renumbers the pyrank image.
"""
pyrank = N.zeros(img.pyrank.shape, dtype=N.int32)
for i, isl in enumerate(img.islands):
isl.island_id = i
for g in isl.gaul:
g.island_id = i
for dg in isl.dgaul:
dg.island_id = i
pyrank[tuple(isl.bbox)] += N.invert(isl.mask_active) * (i + 1)
pyrank -= 1 # align pyrank values with island ids and set regions outside of islands to -1
img.pyrank = pyrank
gaussian_list = [g for isl in img.islands for g in isl.gaul]
img.gaussians = gaussian_list
def check_islands_for_overlap(img, wimg):
"""Checks for overlaps between img and wimg islands"""
have_numexpr=True
try:
import numexpr as ne
except:
have_numexpr=False
tot_flux = 0.0
bar = statusbar.StatusBar('Checking islands for overlap ............ : ', 0, len(wimg.islands))
# Make masks for regions that have islands
wpp=wimg.pyrank+1 # does not change, store for later
wav_rankim_bool = wpp>0 # boolean
orig_rankim_bool = img.pyrank>-1
# Make "images" of island ids for overlaping regions
orig_islands = wav_rankim_bool * (img.pyrank + 1) - 1
if not img.opts.quiet:
bar.start()
for idx, wvisl in enumerate(wimg.islands):
if len(wvisl.gaul) > 0:
# Get unique island IDs. If an island overlaps with one
# in the original ch0 image, merge them together. If not,
# add the island as a new one.
wav_islands = orig_rankim_bool[tuple(wvisl.bbox)] * wpp[tuple(wvisl.bbox)] - 1
wav_ids = N.unique(wav_islands) # saves conversion to set and back
for wvg in wvisl.gaul:
tot_flux += wvg.total_flux
wvg.valid = True
if idx in wav_ids:
orig_idx=N.unique(orig_islands[tuple(wvisl.bbox)][wav_islands == idx])
if len(orig_idx) == 1:
merge_islands(img, img.islands[orig_idx[0]], wvisl)
else:
merge_islands(img, img.islands[orig_idx[0]], wvisl)
for oidx in orig_idx[1:]:
merge_islands(img, img.islands[orig_idx[0]], img.islands[oidx])
img.islands = [x for x in img.islands if x.island_id not in orig_idx[1:]]
renumber_islands(img)
# Now recalculate the overlap images, since the islands have changed
ipp=img.pyrank+1
if have_numexpr:
orig_islands = ne.evaluate('wav_rankim_bool * ipp - 1')
else:
orig_islands = wav_rankim_bool * ipp - 1
else:
isl_id = img.islands[-1].island_id + 1
new_isl = wvisl.copy(img.pixel_beamarea(), image=img.ch0_arr[tuple(wvisl.bbox)], mean=img.mean_arr[tuple(wvisl.bbox)], rms=img.rms_arr[tuple(wvisl.bbox)])
new_isl.gaul = []
new_isl.dgaul = []
new_isl.island_id = isl_id
img.islands.append(new_isl)
copy_gaussians(img, new_isl, wvisl)
if not img.opts.quiet:
bar.increment()
bar.stop()
return tot_flux
| 30,509 | 40.966988 | 170 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/mylogger.py
|
""" WARNING, ERROR, and CRITICAL are always output to screen and to log file.
INFO and USERINFO always go to the log file. DEBUG goes to log file if debug is
True. USERINFO goes to screen only if quiet is False.
Use as follows:
mylog = mylogger.logging.getLogger("name")
mylog.info('info') --> print to logfile, but not to screen
mylogger.userinfo(mylog, 'info') --> print to screen (if quiet==False)
and to logfile
"""
import logging
from socket import gethostname
import copy
def init_logger(logfilename, quiet=False, debug=False):
logging.USERINFO = logging.INFO + 1
logging.addLevelName(logging.USERINFO, 'USERINFO')
logger = logging.root
logger.setLevel(logging.DEBUG)
# First remove any existing handlers (in case PyBDSM has been run
# before in this session but the quiet or debug options have changed
while len(logger.handlers) > 0:
logger.removeHandler(logger.handlers[0])
# File handlers
fh = ColorStripperHandler(logfilename)
if debug:
# For log file and debug on, print name and levelname
fh.setLevel(logging.DEBUG)
fmt1 = MultiLineFormatter('%(asctime)s %(name)-20s:: %(levelname)-8s: '\
'%(message)s',
datefmt='%a %d-%m-%Y %H:%M:%S')
else:
# For log file and debug off, don't print name and levelname as
# they have no meaning to the user.
fh.setLevel(logging.INFO)
fmt1 = MultiLineFormatter('%(asctime)s:: %(levelname)-8s: %(message)s',
datefmt='%a %d-%m-%Y %H:%M:%S')
fh.setFormatter(fmt1)
logger.addHandler(fh)
# Console handler for warning, error, and critical: format includes levelname
# ANSI colors are used
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
fmt2 = logging.Formatter('\033[31;1m%(levelname)s\033[0m: %(message)s')
ch.setFormatter(fmt2)
logger.addHandler(ch)
# Console handler for USERINFO only: format does not include levelname
# (the user does not need to see the levelname, as it has no meaning to them)
# ANSI colors are allowed
chi = logging.StreamHandler()
chi.addFilter(InfoFilter())
if quiet:
# prints nothing, since filter lets only USERINFO through
chi.setLevel(logging.WARNING)
else:
# prints only USERINFO
chi.setLevel(logging.USERINFO)
fmt3 = logging.Formatter('%(message)s')
chi.setFormatter(fmt3)
logger.addHandler(chi)
class InfoFilter(logging.Filter):
# Lets only USERINFO through
def filter(self, rec):
return rec.levelno == logging.USERINFO
class MultiLineFormatter(logging.Formatter):
def format(self, record):
str = logging.Formatter.format(self, record)
header, footer = str.split(record.message)
nocolor_header = strip_color(header)
str = str.replace('\n', '\n' + ' '*len(nocolor_header))
return str
def userinfo(mylog, desc_str, val_str=''):
"""Writes a nicely formatted string to the log file and console
mylog = logger
desc_str = description string / message
val_str = value string
Message is constructed as:
'desc_str .... : val_str'
"""
bc = '\033[1;34m' # Blue
nc = '\033[0m' # Normal text color
if val_str == '':
sep = ''
if desc_str[:1] == '\n':
bc += '\n'
desc_str = desc_str[1:]
desc_str = bc + '--> ' + desc_str + nc
else:
sep = ' : '
if len(desc_str) < 40:
desc_str += ' '
if len(desc_str) < 40:
while len(desc_str) < 41:
desc_str += '.'
else:
while len(desc_str) < 41:
desc_str += ' '
mylog.log(logging.USERINFO, desc_str+sep+val_str)
class ColorStripperHandler(logging.FileHandler):
def emit(self, record):
"""Strips ANSI color codes from file stream"""
myrecord = copy.copy(record)
nocolor_msg = strip_color(myrecord.msg)
myrecord.msg = nocolor_msg
logging.FileHandler.emit(self, myrecord)
def strip_color(msg):
"""Strips specific ANSI color codes from an input string
The color codes are hard-coded to those used above
in userinfo() and in WARNING, ERROR, and CRITICAL.
"""
nocolor_msg = ''
a = msg.split('\033[1;34m')
for b in a:
c = b.split('\033[0m')
for d in c:
e = d.split('\033[31;1m')
for f in e:
nocolor_msg += f
return nocolor_msg
| 4,331 | 30.852941 | 79 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/polarisation.py
|
"""Module polarisation.
This module finds the Q, U, and V fluxes, the total, linear, and circular
polarisation fractions and the linear polarisation angle of each source identified
by gaul2srl. The position angle is defined from North, with positive angles
towards East.
"""
from __future__ import absolute_import
from .image import *
from .islands import *
from .gaul2srl import *
from .preprocess import Op_preprocess
from .rmsimage import Op_rmsimage
from .threshold import Op_threshold
from .islands import Op_islands
from .gausfit import Op_gausfit
from .gaul2srl import Op_gaul2srl
from .make_residimage import Op_make_residimage
from .const import fwsig
from . import mylogger
import numpy as N
from . import functions as func
from . import statusbar
class Op_polarisation(Op):
""" Finds the flux in each Stokes and calculates the polarisation fraction
and angle.
Fluxes are calculated by summing all nonmasked pixels assigned to
the Gaussian. If a pixel contains contributions from two or more
Gaussians, its flux is divided between the Gaussians by the ratio of
fluxes that they contribute to the pixel. Errors on the fluxes are
derived by summing the same pixels in the rms maps in quadrature.
The results are stored in the Gaussian and Source structures.
Fits are also done to the polarized intensity (PI) image to
determine if there are any islands of emission that lie outside
those found in the I image. If there are, they are fit and the
process above is done for them too.
For linearly polarised emission, the signal and noise add
vectorially, giving a Rice distribution (Vinokur 1965) instead of a
Gaussian one. To correct for this, a bias is estimated and removed
from the polarisation fraction using the same method used for the
NVSS catalog (see ftp://ftp.cv.nrao.edu/pub/nvss/catalog.ps). Errors
on the linear and total polarisation fractions and polarisation
angle are estimated using the debiased polarised flux and standard
error propagation. See Sparks & Axon (1999) for a more detailed
treatment.
Prerequisites: module gaul2srl should be run first."""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Polarisatn")
if img.opts.polarisation_do:
mylog.info('Extracting polarisation properties for all sources')
pols = ['I', 'Q', 'U', 'V']
# Run gausfit and gual2srl on PI image to look for polarized sources
# undetected in I
fit_PI = img.opts.pi_fit
n_new = 0
ch0_pi = N.sqrt(img.ch0_Q_arr**2 + img.ch0_U_arr**2)
img.ch0_pi_arr = ch0_pi
if fit_PI:
from . import _run_op_list
mylogger.userinfo(mylog, "\nChecking PI image for new sources")
mask = img.mask_arr
# Set up image object for PI image.
pi_chain, pi_opts = self.setpara_bdsm(img)
pimg = Image(pi_opts)
pimg.beam = img.beam
pimg.pixel_beam = img.pixel_beam
pimg.pixel_beamarea = img.pixel_beamarea
pimg.log = 'PI.'
pimg.basedir = img.basedir
pimg.imagename = img.imagename
pimg.frequency = img.frequency
pimg.equinox = img.equinox
pimg.shape = img.shape
pimg.pix2beam = img.pix2beam
pimg.beam2pix = img.beam2pix
pimg.pix2gaus = img.pix2gaus
pimg.gaus2pix = img.gaus2pix
pimg.pix2sky = img.pix2sky
pimg.sky2pix = img.sky2pix
pimg.pix2coord = img.pix2coord
pimg.wcs_obj = img.wcs_obj
pimg.mask_arr = mask
pimg.masked = img.masked
pimg.ch0_arr = ch0_pi
pimg._pi = True
success = _run_op_list(pimg, pi_chain)
if not success:
return
img.pi_islands = pimg.islands
img.pi_gaussians = pimg.gaussians
img.pi_sources = pimg.sources
# Now check for new sources in the PI image that are not
# found in the Stokes I image. If any new sources are found,
# adjust their IDs to follow after those found in I.
new_isl = []
new_src = []
new_gaus = []
n_new_src = 0
if len(img.islands) == 0:
isl_id = 0
src_id = 0
gaus_id = 0
else:
isl_id = img.islands[-1].island_id
src_id = img.sources[-1].source_id
gaus_id = img.gaussians[-1].gaus_num
for pi_isl in pimg.islands:
new_sources = []
for pi_src in pi_isl.sources:
if img.pyrank[int(img.sky2pix(pi_src.posn_sky_max)[0]),
int(img.sky2pix(pi_src.posn_sky_max)[1])] == -1:
src_id += 1
pi_src._pi = True
pi_src.island_id = isl_id
pi_src.source_id = src_id
pi_src.spec_indx = N.NaN
pi_src.e_spec_indx = N.NaN
pi_src.spec_norm = N.NaN
pi_src.specin_flux = [N.NaN]
pi_src.specin_fluxE = [N.NaN]
pi_src.specin_freq = [N.NaN]
pi_src.specin_freq0 = N.NaN
for gaus in pi_src.gaussians:
gaus.island_id = isl_id
gaus.source_id = src_id
gaus.spec_indx = N.NaN
gaus.e_spec_indx = N.NaN
gaus.spec_norm = N.NaN
gaus.specin_flux = [N.NaN]
gaus.specin_fluxE = [N.NaN]
gaus.specin_freq = [N.NaN]
gaus.specin_freq0 = N.NaN
new_sources.append(pi_src)
new_src.append(pi_src)
n_new_src += 1
for g in pi_src.gaussians:
gaus_id += 1
new_gaus.append(g)
g.gaus_num = gaus_id
if len(new_sources) > 0:
isl_id += 1
pi_isl.sources = new_sources
pi_isl.island_id = isl_id
pi_isl._pi = True
new_isl.append(pi_isl)
n_new = len(new_isl)
mylogger.userinfo(mylog, "New sources found in PI image", '%i (%i total)' %
(n_new, img.nsrc+n_new))
if n_new > 0:
img.islands += new_isl
img.sources += new_src
img.gaussians += new_gaus
img.nsrc += n_new_src
renumber_islands(img)
bar = statusbar.StatusBar('Calculating polarisation properties .... : ', 0, img.nsrc)
if img.opts.quiet == False:
bar.start()
for isl in img.islands:
isl_bbox = isl.bbox
ch0_I = img.ch0_arr[tuple(isl_bbox)]
ch0_Q = img.ch0_Q_arr[tuple(isl_bbox)]
ch0_U = img.ch0_U_arr[tuple(isl_bbox)]
ch0_V = img.ch0_V_arr[tuple(isl_bbox)]
ch0_images = [ch0_I, ch0_Q, ch0_U, ch0_V]
for i, src in enumerate(isl.sources):
# For each source, assume the morphology does not change
# across the Stokes cube. This assumption allows us to fit
# the Gaussians of each source to each Stokes image by
# simply fitting only the overall normalizations of the
# individual Gaussians.
#
# First, fit all source Gaussians to each Stokes image:
x, y = N.mgrid[isl_bbox]
gg = src.gaussians
fitfix = N.ones(len(gg)) # fit only normalization
srcmask = isl.mask_active
total_flux = N.zeros((4, len(fitfix)), dtype=N.float32) # array of fluxes: N_Stokes x N_Gaussians
errors = N.zeros((4, len(fitfix)), dtype=N.float32) # array of fluxes: N_Stokes x N_Gaussians
for sind, image in enumerate(ch0_images):
if (sind==0 and hasattr(src, '_pi')) or sind > 0: # Fit I only for PI sources
p, ep = func.fit_mulgaus2d(image, gg, x, y, srcmask, fitfix)
for ig in range(len(fitfix)):
bm_pix = N.array([img.pixel_beam()[0], img.pixel_beam()[1], img.pixel_beam()[2]])
total_flux[sind, ig] = p[ig*6]*p[ig*6+3]*p[ig*6+4]/(bm_pix[0]*bm_pix[1])
p = N.insert(p, N.arange(len(fitfix))*6+6, total_flux[sind])
if sind > 0:
rms_img = img.__getattribute__('rms_'+pols[sind]+'_arr')
else:
rms_img = img.rms_arr
if len(rms_img.shape) > 1:
rms_isl = rms_img[tuple(isl.bbox)].mean()
else:
rms_isl = rms_img
errors[sind] = func.get_errors(img, p, rms_isl)[6]
# Now, assign fluxes to each Gaussian.
src_flux_I = 0.0
src_flux_Q = 0.0
src_flux_U = 0.0
src_flux_V = 0.0
src_flux_I_err_sq = 0.0
src_flux_Q_err_sq = 0.0
src_flux_U_err_sq = 0.0
src_flux_V_err_sq = 0.0
for ig, gaussian in enumerate(src.gaussians):
init_gaus_attr(gaussian)
flux_I = total_flux[0, ig]
flux_I_err = abs(errors[0, ig])
flux_Q = total_flux[1, ig]
flux_Q_err = abs(errors[1, ig])
flux_U = total_flux[2, ig]
flux_U_err = abs(errors[2, ig])
flux_V = total_flux[3, ig]
flux_V_err = abs(errors[3, ig])
if hasattr(src, '_pi'):
gaussian.total_flux = flux_I
gaussian.total_fluxE = flux_I_err
gaussian.total_flux_Q = flux_Q
gaussian.total_flux_U = flux_U
gaussian.total_flux_V = flux_V
gaussian.total_fluxE_Q = flux_Q_err
gaussian.total_fluxE_U = flux_U_err
gaussian.total_fluxE_V = flux_V_err
if hasattr(src, '_pi'):
src_flux_I += flux_I
src_flux_I_err_sq += flux_I_err**2
src_flux_Q += flux_Q
src_flux_U += flux_U
src_flux_V += flux_V
src_flux_Q_err_sq += flux_Q_err**2
src_flux_U_err_sq += flux_U_err**2
src_flux_V_err_sq += flux_V_err**2
# Calculate and store polarisation fractions and angle for each Gaussian in the island
# For this we need the I flux, which we can just take from g.total_flux and src.total_flux
flux_I = gaussian.total_flux
flux_I_err = gaussian.total_fluxE
stokes = [flux_I, flux_Q, flux_U, flux_V]
stokes_err = [flux_I_err, flux_Q_err, flux_U_err, flux_V_err]
lpol_frac, lpol_frac_loerr, lpol_frac_hierr = self.calc_lpol_fraction(stokes, stokes_err) # linear pol fraction
lpol_ang, lpol_ang_err = self.calc_lpol_angle(stokes, stokes_err) # linear pol angle
cpol_frac, cpol_frac_loerr, cpol_frac_hierr = self.calc_cpol_fraction(stokes, stokes_err) # circular pol fraction
tpol_frac, tpol_frac_loerr, tpol_frac_hierr = self.calc_tpol_fraction(stokes, stokes_err) # total pol fraction
gaussian.lpol_fraction = lpol_frac
gaussian.lpol_fraction_loerr = lpol_frac_loerr
gaussian.lpol_fraction_hierr = lpol_frac_hierr
gaussian.cpol_fraction = cpol_frac
gaussian.cpol_fraction_loerr = cpol_frac_loerr
gaussian.cpol_fraction_hierr = cpol_frac_hierr
gaussian.tpol_fraction = tpol_frac
gaussian.tpol_fraction_loerr = tpol_frac_loerr
gaussian.tpol_fraction_hierr = tpol_frac_hierr
gaussian.lpol_angle = lpol_ang
gaussian.lpol_angle_err = lpol_ang_err
# Store fluxes for each source in the island
init_src_attr(src)
if hasattr(src, '_pi'):
src.total_flux = src_flux_I
src.total_fluxE = N.sqrt(src_flux_I_err_sq)
src.total_flux_Q = src_flux_Q
src.total_flux_U = src_flux_U
src.total_flux_V = src_flux_V
src.total_fluxE_Q = N.sqrt(src_flux_Q_err_sq)
src.total_fluxE_U = N.sqrt(src_flux_U_err_sq)
src.total_fluxE_V = N.sqrt(src_flux_V_err_sq)
# Calculate and store polarisation fractions and angle for each source in the island
# For this we need the I flux, which we can just take from g.total_flux and src.total_flux
src_flux_I = src.total_flux
src_flux_I_err = src.total_fluxE
stokes = [src_flux_I, src_flux_Q, src_flux_U, src_flux_V]
stokes_err = [src_flux_I_err, N.sqrt(src_flux_Q_err_sq), N.sqrt(src_flux_U_err_sq), N.sqrt(src_flux_V_err_sq)]
lpol_frac, lpol_frac_loerr, lpol_frac_hierr = self.calc_lpol_fraction(stokes, stokes_err) # linear pol fraction
lpol_ang, lpol_ang_err = self.calc_lpol_angle(stokes, stokes_err) # linear pol angle
cpol_frac, cpol_frac_loerr, cpol_frac_hierr = self.calc_cpol_fraction(stokes, stokes_err) # circular pol fraction
tpol_frac, tpol_frac_loerr, tpol_frac_hierr = self.calc_tpol_fraction(stokes, stokes_err) # total pol fraction
src.lpol_fraction = lpol_frac
src.lpol_fraction_loerr = lpol_frac_loerr
src.lpol_fraction_hierr = lpol_frac_hierr
src.cpol_fraction = cpol_frac
src.cpol_fraction_loerr = cpol_frac_loerr
src.cpol_fraction_hierr = cpol_frac_hierr
src.tpol_fraction = tpol_frac
src.tpol_fraction_loerr = tpol_frac_loerr
src.tpol_fraction_hierr = tpol_frac_hierr
src.lpol_angle = lpol_ang
src.lpol_angle_err = lpol_ang_err
if bar.started:
bar.increment()
bar.stop()
img.completed_Ops.append('polarisation')
####################################################################################
def calc_lpol_fraction(self, stokes, err):
""" Calculate linear polarisation fraction and error from:
stokes = [I, Q, U, V] and err = [Ierr, Qerr, Uerr, Verr]
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
QUerr = N.mean([Qerr, Uerr])
stokes_lpol = [I, Q, U, 0.0]
err_lpol = [Ierr, Qerr, Uerr, 0.0]
lfrac, loerr, uperr, Iup, Qup, Uup, Vup = self.estimate_err_frac_with_limits(stokes_lpol, err_lpol)
# If all are detections, debias and use error propagation instead
if not Iup and not Qup and not Uup:
lpol = N.sqrt(Q**2 + U**2)
lpol_debiased = self.debias(lpol, QUerr) # debias (to first order)
if lpol_debiased > 0.0:
lfrac = lpol_debiased / I
dlfrac = lfrac * N.sqrt((Ierr/I)**2 + (Q*Qerr/lpol_debiased**2)**2 + (U*Uerr/lpol_debiased**2)**2)
else:
# if debiased fraction is consistent with zero, estimate a ballpark error with biased value
lfrac = 0.0
lpolsq = Q**2 + U**2
dlfrac = N.sqrt(lpolsq) / I * N.sqrt((Ierr/I)**2 + (Q*Qerr/lpolsq)**2 + (U*Uerr/lpolsq)**2)
loerr = dlfrac
uperr = dlfrac
lfrac, loerr, uperr = self.check_frac(lfrac, loerr, uperr)
return lfrac, loerr, uperr
####################################################################################
def calc_cpol_fraction(self, stokes, err):
""" Calculate circular polarisation fraction and error from:
stokes = [I, Q, U, V] and err = [Ierr, Qerr, Uerr, Verr]
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
stokes_cpol = [I, 0.0, 0.0, V]
err_cpol = [Ierr, 0.0, 0.0, Verr]
cfrac, loerr, uperr, Iup, Qup, Uup, Vup = self.estimate_err_frac_with_limits(stokes_cpol, err_cpol)
# If all are detections, debias and use error propagation instead
if not Iup and not Vup:
cfrac = abs(V) / I
dcfrac = cfrac * N.sqrt((Ierr/I)**2 + (Verr/V)**2)
loerr = dcfrac
uperr = dcfrac
cfrac, loerr, uperr = self.check_frac(cfrac, loerr, uperr)
return cfrac, loerr, uperr
####################################################################################
def calc_tpol_fraction(self, stokes, err):
""" Calculate total polarisation fraction and error from:
stokes = [I, Q, U, V] and err = [Ierr, Qerr, Uerr, Verr]
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
QUerr = N.mean([Qerr, Uerr])
tfrac, loerr, uperr, Iup, Qup, Uup, Vup = self.estimate_err_frac_with_limits(stokes, err)
# If all are detections, debias and use error propagation instead
if not Iup and not Qup and not Uup and not Vup:
lpol = N.sqrt(Q**2 + U**2)
lpol_debiased = self.debias(lpol, QUerr)
tpol_debiased = N.sqrt(Q**2 + U**2 + V**2) - (lpol - lpol_debiased) # debias (to first order)
if tpol_debiased > 0.0:
tfrac = tpol_debiased / I
dtfrac = tfrac * N.sqrt((Ierr/I)**2 + (Q*Qerr/tpol_debiased**2)**2 + (U*Uerr/tpol_debiased**2)**2 + (V*Verr/tpol_debiased**2)**2)
else:
# if debiased fraction is consistent with zero, estimate a ballpark error with biased value
tfrac = 0.0
tpolsq = Q**2 + U**2 + V**2
dtfrac = N.sqrt(tpolsq) / I * N.sqrt((Ierr/I)**2 + (Q*Qerr/tpolsq)**2 + (U*Uerr/tpolsq)**2 + (V*Verr/tpolsq)**2)
loerr = dtfrac
uperr = dtfrac
tfrac, loerr, uperr = self.check_frac(tfrac, loerr, uperr)
return tfrac, loerr, uperr
####################################################################################
def calc_lpol_angle(self, stokes, err, sig=3.0):
""" Calculate linear polarisation angle and error (in degrees) from:
stokes = [I, Q, U, V] and err = [Ierr, Qerr, Uerr, Verr]
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
if abs(Q) < sig*abs(Qerr) and abs(U) < sig*abs(Uerr):
return 0.0, 0.0
ang = 0.5 * N.arctan2(U, Q) * 180.0 / N.pi
dang = 0.5 / (1.0 + (U/Q)**2) * N.sqrt((Uerr/Q)**2 + (U*Qerr/Q**2)**2) * 180.0 / N.pi
return ang, dang
####################################################################################
def debias(self, pflux, QUerr):
""" Debiases the linearly polarised flux using the same method
used for the NVSS catalog (see ftp://ftp.cv.nrao.edu/pub/nvss/catalog.ps).
"""
data_table=N.array([[1.253,1.2530], [1.256,1.1560], [1.266,1.0660], [1.281,0.9814],
[1.303,0.9030], [1.330,0.8304], [1.364,0.7636], [1.402,0.7023],
[1.446,0.6462], [1.495,0.5951], [1.549,0.5486], [1.606,0.5064],
[1.668,0.4683], [1.734,0.4339], [1.803,0.4028], [1.875,0.3749],
[1.950,0.3498], [2.027,0.3273], [2.107,0.3070], [2.189,0.2888],
[2.272,0.2724], [2.358,0.2576], [2.444,0.2442], [2.532,0.2321],
[2.621,0.2212], [2.711,0.2112], [2.802,0.2021], [2.894,0.1938],
[2.986,0.1861], [3.079,0.1791], [3.173,0.1726], [3.267,0.1666],
[3.361,0.1610], [3.456,0.1557], [3.551,0.1509], [3.646,0.1463],
[3.742,0.1420], [3.838,0.1380], [3.934,0.1342], [4.031,0.1306]])
pnorm = pflux / QUerr
if pnorm <= data_table[0,0]:
bias = data_table[0,1]
else:
if pnorm >= data_table[-1,0]:
bias = 1.0 / (2.0 * pnorm) + 1.0 / (8.0 * pnorm**3)
pnorm = pnorm - bias
bias = 1.0 / (2.0 * pnorm) + 1.0 / (8.0 * pnorm**3)
else:
bias = N.interp(pnorm, data_table[:,0], data_table[:,1])
pflux_debiased = pflux - bias * QUerr
return pflux_debiased
def check_frac(self, frac, loerr, uperr):
if frac < 0.0:
frac = 0.0
if frac > 1.0:
frac = 1.0
if loerr < 0.0:
loerr = frac
if frac + uperr > 1.0:
uperr = 1.0 - frac
return frac, loerr, uperr
####################################################################################
def setpara_bdsm(self, img):
chain = [Op_preprocess, Op_rmsimage(), Op_threshold(), Op_islands(),
Op_gausfit(), Op_gaul2srl(), Op_make_residimage()]
opts = img.opts.to_dict()
if img.opts.pi_thresh_isl is not None:
opts['thresh_isl'] = img.opts.pi_thresh_isl
if img.opts.pi_thresh_pix is not None:
opts['thresh_pix'] = img.opts.pi_thresh_pix
opts['thresh'] = 'hard'
opts['polarisation_do'] = False
opts['filename'] = ''
opts['detection_image'] = ''
opts['output_all'] = False
ops = []
for op in chain:
if isinstance(op, type):
ops.append(op())
else:
ops.append(op)
return ops, opts
def estimate_err_frac_with_limits(self, stokes, err, sig=3.0):
"""Estimate reasonable errors on polarization fraction when upper
limits are present.
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
Iup = False
Qup = False
Uup = False
Vup = False
if abs(I) < sig * abs(Ierr):
Iup = True
if abs(Q) < sig * abs(Qerr):
Q = 0.0
Qup = True
if abs(U) < sig * abs(Uerr):
U = 0.0
Uup = True
if abs(V) < sig * abs(Verr):
V = 0.0
Vup = True
pol = N.sqrt(Q**2 + U**2 + V**2)
frac = pol / I
if frac < 0.0:
frac = 0.0
if frac > 1.0:
frac = 1.0
if Iup:
if Qup and Uup and Vup:
frac = 0.0
loerr = 0.0
uperr = 1.0
else:
loerr = frac - N.sqrt((abs(Q) - Qerr)**2 + (abs(U) - Uerr)**2 + (abs(V) - Verr)**2) / abs(Ierr)
uperr = 1.0 - frac
else:
loerr = frac - N.sqrt((abs(Q) - Qerr)**2 + (abs(U) - Uerr)**2 + (abs(V) - Verr)**2) / (I + Ierr)
uperr = N.sqrt((abs(Q) + Qerr)**2 + (abs(U) + Uerr)**2 + (abs(V) + Verr)**2) / (I - Ierr) - frac
if loerr < 0.0:
loerr = frac
if frac + uperr > 1.0:
uperr = 1.0 - frac
return frac, loerr, uperr, Iup, Qup, Uup, Vup
def double_bbox(self, bbox, shape):
"""Expand bbox of the island by factor of 2
bbox is isl.bbox
shape is img.shape
"""
def expand(bbox, shape):
bbox_width = (bbox.stop - bbox.start)/2.0
return slice(max(0, bbox.start - bbox_width), min(shape, bbox.stop + bbox_width))
return map(expand, bbox, shape)
def renumber_islands(img):
"""Renumbers island_ids (after, e.g., removing one)
Also renumbers the pyrank image.
"""
for i, isl in enumerate(img.islands):
isl.island_id = i
for g in isl.gaul:
g.island_id = i
for dg in isl.dgaul:
dg.island_id = i
if i == 0:
img.pyrank[tuple(isl.bbox)] = N.invert(isl.mask_active) - 1
else:
img.pyrank[tuple(isl.bbox)] = N.invert(isl.mask_active) * isl.island_id - isl.mask_active
gaussian_list = [g for isl in img.islands for g in isl.gaul]
img.gaussians = gaussian_list
def init_gaus_attr(gaussian):
### Insert polarization attributes
gaussian.total_flux_Q_def = Float(doc="Total flux density (Jy), Stokes Q", colname='Total_Q',
units='Jy')
gaussian.total_fluxE_Q_def = Float(doc="Error in total flux density (Jy), Stokes Q", colname='E_Total_Q',
units='Jy')
gaussian.total_flux_U_def = Float(doc="Total flux density (Jy), Stokes U", colname='Total_U',
units='Jy')
gaussian.total_fluxE_U_def = Float(doc="Error in total flux density (Jy), Stokes U", colname='E_Total_U',
units='Jy')
gaussian.total_flux_V_def = Float(doc="Total flux density (Jy), Stokes V", colname='Total_V',
units='Jy')
gaussian.total_fluxE_V_def = Float(doc="Error in total flux density (Jy), Stokes V", colname='E_Total_V',
units='Jy')
gaussian.lpol_fraction_def = Float(doc="Linear polarisation fraction",
colname='Linear_Pol_frac', units=None)
gaussian.lpol_fraction_loerr_def = Float(doc="Linear polarisation fraction low error",
colname='Elow_Linear_Pol_frac', units=None)
gaussian.lpol_fraction_hierr_def = Float(doc="Linear polarisation fraction high error",
colname='Ehigh_Linear_Pol_frac', units=None)
gaussian.cpol_fraction_def = Float(doc="Circular polarisation fraction",
colname='Circ_Pol_Frac', units=None)
gaussian.cpol_fraction_loerr_def = Float(doc="Circular polarisation fraction low error",
colname='Elow_Circ_Pol_Frac', units=None)
gaussian.cpol_fraction_hierr_def = Float(doc="Circular polarisation fraction high error",
colname='Ehigh_Circ_Pol_Frac', units=None)
gaussian.tpol_fraction_def = Float(doc="Total polarisation fraction",
colname='Total_Pol_Frac', units=None)
gaussian.tpol_fraction_loerr_def = Float(doc="Total polarisation fraction low error",
colname='Elow_Total_Pol_Frac', units=None)
gaussian.tpol_fraction_hierr_def = Float(doc="Total polarisation fraction high error",
colname='Ehigh_Total_Pol_Frac', units=None)
gaussian.lpol_angle_def = Float(doc="Polarisation angle (deg from North towards East)",
colname='Linear_Pol_Ang', units='deg')
gaussian.lpol_angle_err_def = Float(doc="Polarisation angle error (deg)",
colname='E_Linear_Pol_Ang', units='deg')
def init_src_attr(source):
### Insert polarization attributes
source.total_flux_Q_def = Float(doc="Total flux density (Jy), Stokes Q", colname='Total_Q',
units='Jy')
source.total_fluxE_Q_def = Float(doc="Error in total flux density (Jy), Stokes Q", colname='E_Total_Q',
units='Jy')
source.total_flux_U_def = Float(doc="Total flux density (Jy), Stokes U", colname='Total_U',
units='Jy')
source.total_fluxE_U_def = Float(doc="Error in total flux density (Jy), Stokes U", colname='E_Total_U',
units='Jy')
source.total_flux_V_def = Float(doc="Total flux density (Jy), Stokes V", colname='Total_V',
units='Jy')
source.total_fluxE_V_def = Float(doc="Error in total flux density (Jy), Stokes V", colname='E_Total_V',
units='Jy')
source.lpol_fraction_def = Float(doc="Linear polarisation fraction",
colname='Linear_Pol_frac', units=None)
source.lpol_fraction_loerr_def = Float(doc="Linear polarisation fraction low error",
colname='Elow_Linear_Pol_frac', units=None)
source.lpol_fraction_hierr_def = Float(doc="Linear polarisation fraction high error",
colname='Ehigh_Linear_Pol_frac', units=None)
source.cpol_fraction_def = Float(doc="Circular polarisation fraction",
colname='Circ_Pol_Frac', units=None)
source.cpol_fraction_loerr_def = Float(doc="Circular polarisation fraction low error",
colname='Elow_Circ_Pol_Frac', units=None)
source.cpol_fraction_hierr_def = Float(doc="Circular polarisation fraction high error",
colname='Ehigh_Circ_Pol_Frac', units=None)
source.tpol_fraction_def = Float(doc="Total polarisation fraction",
colname='Total_Pol_Frac', units=None)
source.tpol_fraction_loerr_def = Float(doc="Total polarisation fraction low error",
colname='Elow_Total_Pol_Frac', units=None)
source.tpol_fraction_hierr_def = Float(doc="Total polarisation fraction high error",
colname='Ehigh_Total_Pol_Frac', units=None)
source.lpol_angle_def = Float(doc="Polarisation angle (deg from North towards East)",
colname='Linear_Pol_Ang', units='deg')
source.lpol_angle_err_def = Float(doc="Polarisation angle error (deg)",
colname='E_Linear_Pol_Ang', units='deg')
| 30,916 | 46.2737 | 145 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/cleanup.py
|
"""
Does miscellaneous jobs at the end, which assumes all other tasks are run.
"""
from __future__ import absolute_import
import numpy as N
import os
from .image import *
from . import mylogger
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import matplotlib.cm as cm
from . import functions as func
class Op_cleanup(Op):
""" """
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM.Cleanup")
### plotresults for all gaussians together
if img.opts.plot_allgaus and has_pl:
pl.figure()
pl.title('All gaussians including wavelet images')
allgaus = img.gaussians
if hasattr(img, 'atrous_gaussians'):
for gg in img.atrous_gaussians:
allgaus += gg
for g in allgaus:
ellx, elly = func.drawellipse(g)
pl.plot(ellx, elly, 'r')
from math import log10
bdir = img.basedir + '/misc/'
if not os.path.isdir(bdir): os.makedirs(bdir)
im_mean = img.clipped_mean
im_rms = img.clipped_rms
low = 1.1*abs(img.min_value)
low1 = 1.1*abs(N.min(im_mean-im_rms*5.0))
if low1 > low: low = low1
vmin = log10(im_mean-im_rms*5.0 + low)
vmax = log10(im_mean+im_rms*15.0 + low)
im = N.log10(img.ch0_arr + low)
pl.imshow(N.transpose(im), origin='lower', interpolation='nearest',vmin=vmin, vmax=vmax, \
cmap=cm.gray); pl.colorbar()
pl.savefig(bdir+'allgaussians.png')
pl.close()
img.completed_Ops.append('cleanup')
| 1,688 | 29.160714 | 102 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/islands.py
|
"""Module islands.
Defines operation Op_islands which does island detection.
Current implementation uses scipy.ndimage operations for island detection.
While it's implemented to work for images of arbitrary dimensionality,
the bug in the current version of scipy (0.6) often causes crashes
(or just wrong results) for 3D inputs.
If this (scipy.ndimage.label) isn't fixed by the time we need 3D source
extraction, one will have to adopt my old pixel-runs algorithm for 3D data.
Check out islands.py rev. 1362 from repository for it.
"""
from __future__ import absolute_import
from __future__ import division
import numpy as N
import scipy.ndimage as nd
from .image import *
from . import mylogger
from . import functions as func
from .output import write_islands
from .readimage import Op_readimage
from .preprocess import Op_preprocess
from .rmsimage import Op_rmsimage
from .threshold import Op_threshold
from .collapse import Op_collapse
import os
class Op_islands(Op):
"""Detect islands of emission in the image
All detected islands are stored in the list img.islands,
where each individual island is represented as an instance
of class Island.
The option to detect islands on a different "detection"
image is also available. This option is useful for example
when a primary beam correction is used -- it is generally
better to detect sources on the uncorrected image, but
to measure them on the corrected image.
Prerequisites: module rmsimage should be run first.
"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Islands")
opts = img.opts
minsize = opts.minpix_isl
if minsize is None:
minsize = int(img.pixel_beamarea()/3.0) # 1/3 of beam area in pixels
if minsize < 6:
minsize = 6 # Need at least 6 pixels to obtain good fits
mylogger.userinfo(mylog, "Minimum number of pixels per island", '%i' %
minsize)
img.minpix_isl = minsize
maxsize = opts.maxpix_isl
if maxsize is None:
maxsize = N.inf
img.maxpix_isl = maxsize
if opts.detection_image != '':
# Use a different image for island detection. The detection
# image and the measurement image must have the same shape
# and be registered. Otherwise, one could reproject the
# detection image using, e.g., the Kapteyn package.
#
# First, set up up an Image object and run a limited
# op_chain.
from . import _run_op_list
mylogger.userinfo(mylog, "\nDetermining islands from detection image")
det_chain, det_opts = self.setpara_bdsm(img, opts.detection_image)
det_img = Image(det_opts)
det_img.log = 'Detection image'
success = _run_op_list(det_img, det_chain)
if not success:
return
# Check that the ch0 images are the same size
ch0_map = img.ch0_arr
det_ch0_map = det_img.ch0_arr
det_shape = det_ch0_map.shape
ch0_shape = ch0_map.shape
if det_shape != ch0_shape:
raise RuntimeError("Detection image shape does not match that of input image.")
# Save the rms and mean maps derived from the detection image
img.detection_mean_arr = det_img.mean_arr
img.detection_rms_arr = det_img.rms_arr
# Run through islands and correct the image and rms, mean and max values
corr_islands = []
mean_map = img.mean_arr
rms_map = img.rms_arr
for i, isl in enumerate(det_img.islands):
islcp = isl.copy(img.pixel_beamarea(), image=ch0_map[tuple(isl.bbox)], mean=mean_map[tuple(isl.bbox)], rms=rms_map[tuple(isl.bbox)])
islcp.island_id = i
corr_islands.append(islcp)
img.islands = corr_islands
img.nisl = len(img.islands)
img.pyrank = det_img.pyrank
img.minpix_isl = det_img.minpix_isl
if opts.output_all:
write_islands(img)
if opts.savefits_rankim or opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '_pyrank.fits', img.pyrank, img)
if opts.savefits_det_rmsim or opts.output_all:
resdir = img.basedir + '/background/'
os.makedirs(resdir, exist_ok=True)
func.write_image_to_file(img.use_io, img.imagename + '.detection_rmsd_I.fits',
img.detection_rms_arr, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.detection_rmsd_I.fits'))
if opts.savefits_det_meanim or opts.output_all:
resdir = img.basedir + '/background/'
os.makedirs(resdir, exist_ok=True)
func.write_image_to_file(img.use_io, img.imagename + '.detection_mean_I.fits',
img.detection_mean_arr, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.detection_mean_I.fits'))
mylogger.userinfo(mylog, "\nContinuing processing using primary image")
else:
if opts.src_ra_dec is not None:
mylogger.userinfo(mylog, "Constructing islands at user-supplied source locations")
img.islands = self.coords_to_isl(img, opts)
else:
img.islands = self.ndimage_alg(img, opts)
img.nisl = len(img.islands)
mylogger.userinfo(mylog, "Number of islands found", '%i' %
len(img.islands))
ch0_map = img.ch0_arr
ch0_shape = ch0_map.shape
pyrank = N.zeros(ch0_shape, dtype=N.int32)
for i, isl in enumerate(img.islands):
isl.island_id = i
pyrank[tuple(isl.bbox)] += N.invert(isl.mask_active) * (i + 1)
pyrank -= 1 # align pyrank values with island ids and set regions outside of islands to -1
if opts.output_all:
write_islands(img)
if opts.savefits_rankim or opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '_pyrank.fits', pyrank, img)
img.pyrank = pyrank
img.completed_Ops.append('islands')
return img
def ndimage_alg(self, img, opts):
"""Island detection using scipy.ndimage
Use scipy.ndimage.label to detect islands of emission in the image.
Island is defined as group of tightly connected (8-connectivity
for 2D images) pixels with emission.
The following cuts are applied:
- pixel is considered to have emission if it is 'thresh_isl' times
higher than RMS.
- Island should have at least 'minsize' active pixels
- There should be at lease 1 pixel in the island which is 'thresh_pix'
times higher than noise (peak clip).
Parameters:
image, mask: arrays with image data and mask
mean, rms: arrays with mean & rms maps
thresh_isl: threshold for 'active pixels'
thresh_pix: threshold for peak
minsize: minimal acceptable island size
Function returns a list of Island objects.
"""
### islands detection
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Islands")
image = img.ch0_arr
mask = img.mask_arr
rms = img.rms_arr
mean = img.mean_arr
thresh_isl = opts.thresh_isl
thresh_pix = img.thresh_pix
# act_pixels is true if significant emission
if img.masked:
act_pixels = ~(mask.copy())
act_pixels[~mask] = (image[~mask]-mean[~mask])/thresh_isl >= rms[~mask]
else:
act_pixels = (image-mean)/thresh_isl >= rms
# dimension of image
rank = len(image.shape)
# generates matrix for connectivity, in this case, 8-conn
connectivity = nd.generate_binary_structure(rank, rank)
# labels = matrix with value = (initial) island number
labels, count = nd.label(act_pixels, connectivity)
# slices has limits of bounding box of each such island
slices = nd.find_objects(labels)
img.island_labels = labels
### apply cuts on island size and peak value
pyrank = N.zeros(image.shape, dtype=N.int32)
res = []
for idx, s in enumerate(slices):
idx += 1 # nd.labels indices are counted from 1
# number of pixels inside bounding box which are in island
isl_size = (labels[s] == idx).sum()
isl_peak = nd.maximum(image[s], labels[s], idx)
isl_maxposn = tuple(N.array(N.unravel_index(N.nanargmax(image[s]), image[s].shape))+\
N.array((s[0].start, s[1].start)))
if (isl_size >= img.minpix_isl) and (isl_size <= img.maxpix_isl) and (isl_peak - mean[isl_maxposn])/thresh_pix > rms[isl_maxposn]:
isl = Island(image, mask, mean, rms, labels, s, idx, img.pixel_beamarea())
res.append(isl)
pyrank[tuple(isl.bbox)] += N.invert(isl.mask_active)*idx // idx
return res
def coords_to_isl(self, img, opts):
"""Construct islands around given coordinates with given size.
Returns a list of island objects.
"""
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Islands")
coords = opts.src_ra_dec # list of RA and Dec tuples
isl_radius_pix = opts.src_radius_pix
if isl_radius_pix is None:
isl_radius_pix = img.beam2pix(img.beam)[0] # twice beam major axis radius at half max (= FWHM)
res = []
for idx, coord in enumerate(coords):
idx += 1 # nd.labels indices are counted from 1
isl_posn_pix = img.sky2pix(coord)
image = img.ch0_arr
mask = img.mask_arr
rms = img.rms_arr
mean = img.mean_arr
labels = func.make_src_mask(image.shape,
isl_posn_pix, isl_radius_pix)
if img.masked:
aper_mask = N.where(labels.astype(bool) & ~mask)
else:
aper_mask = N.where(labels.astype(bool))
if N.size(aper_mask) >= img.minpix_isl and N.size(aper_mask) <= img.maxpix_isl:
labels[aper_mask] = idx
s = [slice(max(0, isl_posn_pix[0] - isl_radius_pix - 1),
min(image.shape[0], isl_posn_pix[0] + isl_radius_pix + 1)),
slice(max(0, isl_posn_pix[1] - isl_radius_pix - 1),
min(image.shape[1], isl_posn_pix[1] + isl_radius_pix + 1))]
isl = Island(image, mask, mean, rms, labels, s, idx,
img.pixel_beamarea())
res.append(isl)
return res
def setpara_bdsm(self, img, det_file):
chain=[Op_readimage(), Op_collapse(), Op_preprocess, Op_rmsimage(),
Op_threshold(), Op_islands()]
opts = img.opts.to_dict()
opts['filename'] = det_file
opts['detection_image'] = ''
opts['polarisation_do'] = False
opts['rmsmean_map_filename'] = opts['rmsmean_map_filename_det']
opts['det_rmsmean_map_filename'] = None
ops = []
for op in chain:
if isinstance(op, type):
ops.append(op())
else:
ops.append(op)
return ops, opts
from .image import *
class Island(object):
"""Instances of this class represent islands of emission in the image.
Its primary use is a container for all kinds of data describing island.
"""
def __init__(self, img, mask, mean, rms, labels, bbox, idx,
beamarea, origin=None, noise_mask=None, copy=False):
"""Create Island instance.
Parameters:
img, mask, mean, rms: arrays describing image
labels: labels array from scipy.ndimage
bbox: slices
"""
# Add attribute definitions needed for output
self.island_id_def = Int(doc="Island id, starting from 0", colname='Isl_id')
self.shapelet_basis_def = String(doc="Coordinate system for shapelet decomposition (cartesian/polar)",
colname='Basis', units=None)
self.shapelet_beta_def = Float(doc="Value of shapelet scale beta", colname='Beta', units=None)
self.shapelet_nmax_def = Int(doc="Maximum value of shapelet order", colname='NMax', units=None)
self.shapelet_posn_sky_def = List(Float(), doc="Posn (RA, Dec in deg) of shapelet centre",
colname=['RA', 'DEC'], units=['deg', 'deg'])
self.shapelet_posn_skyE_def = List(Float(), doc="Error on sky coordinates of shapelet centre",
colname=['E_RA', 'E_DEC'], units=['deg', 'deg'])
self.shapelet_cf_def = NArray(doc="Coefficient matrix of the shapelet decomposition",
colname='Coeff_matrix', units=None)
if not copy:
### we make bbox slightly bigger
self.oldbbox = bbox
self.oldidx = idx
bbox = self.__expand_bbox(bbox, img.shape)
origin = [b.start for b in bbox] # easier in case ndim > 2
data = img[tuple(bbox)]
bbox_rms_im = rms[tuple(bbox)]
bbox_mean_im = mean[tuple(bbox)]
### create (inverted) masks
# Note that mask_active is the island mask; mask_noisy marks only
# the noisy pixels in the island image. If you want to mask the
# noisy pixels, set the final mask to:
# mask = mask_active + mask_noisy
isl_mask = (labels[tuple(bbox)] == idx)
noise_mask = (labels[tuple(bbox)] == 0)
N.logical_or(noise_mask, isl_mask, noise_mask)
### invert masks
N.logical_not(isl_mask, isl_mask)
N.logical_not(noise_mask, noise_mask)
if isinstance(mask, N.ndarray):
noise_mask[mask[tuple(bbox)]] = True
isl_mask[mask[tuple(bbox)]] = True
else:
if origin is None:
origin = [b.start for b in bbox]
isl_mask = mask
if noise_mask is None:
noise_mask = mask
data = img
bbox_rms_im = rms
bbox_mean_im = mean
self.oldbbox = bbox
self.oldidx = idx
### finish initialization
isl_size = N.sum(~isl_mask)
self.island_id = idx
self.bbox = bbox
self.origin = origin
self.image = data
self.mask_active = isl_mask
self.mask_noisy = noise_mask
self.shape = data.shape
self.size_active = isl_size
self.max_value = N.max(self.image[~self.mask_active])
in_bbox_and_unmasked = N.where(~N.isnan(bbox_rms_im))
self.rms = bbox_rms_im[in_bbox_and_unmasked].mean()
in_bbox_and_unmasked = N.where(~N.isnan(bbox_mean_im))
self.mean = bbox_mean_im[in_bbox_and_unmasked].mean()
self.islmean = bbox_mean_im[in_bbox_and_unmasked].mean()
self.total_flux = N.nansum(self.image[in_bbox_and_unmasked])/beamarea
pixels_in_isl = N.sum(~N.isnan(self.image[self.mask_active])) # number of unmasked pixels assigned to current island
self.total_fluxE = func.nanmean(bbox_rms_im[in_bbox_and_unmasked]) * N.sqrt(pixels_in_isl/beamarea) # Jy
self.border = self.get_border()
self.gaul = []
self.fgaul = []
self.sources = []
self.gresid_mean = 0.0
self.gresid_rms = 0.0
def __setstate__(self, state):
"""Needed for multiprocessing"""
self.mean = state['mean']
self.rms = state['rms']
self.image = state['image']
self.islmean = state['islmean']
self.mask_active = state['mask_active']
self.mask_noisy = state['mask_noisy']
self.size_active = state['size_active']
self.shape = state['shape']
self.origin = state['origin']
self.island_id = state['island_id']
self.oldidx = state['oldidx']
self.bbox = state['bbox']
def __getstate__(self):
"""Needed for multiprocessing"""
state = {}
state['mean'] = self.mean
state['rms'] = self.rms
state['image'] = self.image
state['islmean'] = self.islmean
state['mask_active'] = self.mask_active
state['mask_noisy'] = self.mask_noisy
state['size_active'] = self.size_active
state['shape'] = self.shape
state['origin'] = self.origin
state['island_id'] = self.island_id
state['oldidx'] = self.oldidx
state['bbox'] = self.bbox
return state
### do map etc in case of ndim image
def __expand_bbox(self, bbox, shape):
"""Expand bbox of the image by 1 pixel"""
def __expand(bbox, shape):
return slice(int(max(0, bbox.start - 1)), int(min(shape, bbox.stop + 1)))
ebbox = [__expand(b, shape[i]) for i, b in enumerate(bbox)]
return ebbox
def copy(self, pixel_beamarea, image=None, mean=None, rms=None):
mask = self.mask_active
noise_mask = self.mask_noisy
if image is None:
image = self.image
if mean is None:
mean = N.zeros(mask.shape, dtype=N.float32) + self.mean
if rms is None:
rms = N.zeros(mask.shape, dtype=N.float32) + self.rms
bbox = self.bbox
idx = self.oldidx
origin = self.origin
return Island(image, mask, mean, rms, None, bbox, idx, pixel_beamarea,
origin=origin, noise_mask=noise_mask, copy=True)
def get_border(self):
""" From all valid island pixels, generate the border."""
mask = ~self.mask_active
border = N.transpose(N.asarray(N.where(mask ^ nd.binary_erosion(mask)))) + self.origin
return N.transpose(N.array(border))
| 18,339 | 41.258065 | 148 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/opts.py
|
"""PyBDSF options
Options are essentially user-controllable parameters passed into PyBDSF
operations, and allow for end-users to control the exact details of how
calculations are done.
The doc string should give a short description of the option, followed by a
line break ('\n') then a long, detailed description. The short description can
then be split off using "str(v.doc()).split('\n')[0]".
The group string can be used to group suboptions under a parent option. The
group string should be the name of the parent option, which must be Bool
(except for the "hidden" group, which will suppress listing of the option; the
option can still be set as normal).
In general it's better to specify newly added options directly in this file, so
one can oversee them all. But it's also possible to extend it at run-time, and
under some circumstances (e.g. pybdsf installed system-wide, and there is no
way to modify this file) this might be the only option to do so. An example of
such extension follows:
==== file newmodule.py ====
from image import Op
class Op_new_op(Op):
## do something useful here
## we need to add option my_new_opt
pass
## this will extend Opts class at runtime and ensure that
## type-checking works properly.
Opts.my_new_opt = Float(33, doc="docstring")
"""
from __future__ import absolute_import
from .tc import Int, Float, Bool, String, Tuple, Enum, \
Option, NArray, Instance, tInstance, List, Any, TCInit, tcError
try:
# For Python 2
basestring = basestring
except NameError:
basestring = str
class Opts(object):
"""Class Opts -- user-controllable parameters."""
advanced_opts = Bool(False,
doc = "Show advanced options")
atrous_do = Bool(False,
doc = "Decompose Gaussian residual image "\
"into multiple scales\n"\
"If True, then the Gaussian-subtracted "\
"residual image is decomposed into multiple "\
"scales using an a-trous wavelet transform.\n"\
"This option is most useful when there is "\
"significant extended emission in the image. "\
"If the image contains only point sources, "\
"it is best to set this to Fasle.")
beam = Option(None, Tuple(Float(), Float(), Float()),
doc = "FWHM of restoring beam. Specify as (maj, "\
"min, pos ang E of N) in degrees. "\
"E.g., beam = (0.06, 0.02, 13.3). None => "\
"get from header\n"\
"For more than one channel, use the beam_spectrum "\
"parameter. "\
"If the beam is not given "\
"by the user, then it is looked for in the "\
"image header. If not found, then an error "\
"is raised. PyBDSF will not work without "\
"knowledge of the restoring beam.")
filename = String(doc = "Input image file name\n"\
"The input image can be a FITS or CASA 2-, "\
"3-, or 4-D cube.")
flagging_opts = Bool(False,
doc = "Show options for Gaussian flagging\n"\
"Gaussians which are likely in error "\
"(e.g., very small or very large Gaussians) "\
"are flagged according to a number of criteria, "\
"which the user may control. "\
"Flags are cumulative (i.e., if multiple "\
"flagging criteria are met, the respective "\
"flag values are added to produce the final "\
"flag value). Flag values are defined as follows:\n"\
"If flag_minsnr: flag + 1\n"\
"If flag_maxsnr: flag + 2\n"\
"If flag_bordersize: flag + 4 (x) or 8 (y)\n"\
"If flag_maxsize_isl: flag + 16 (x) or 32 (y)\n"\
"If flag_maxsize_bm: flag + 64\n"\
"If flag_minsize_bm: flag + 128\n"\
"If flag_maxsize_fwhm: flag + 256")
frequency = Option(None, Float(),
doc = "Frequency in Hz of input image. "\
"E.g., frequency = 74e6. None => get from header.\n"\
"For more than one channel, use the frequency_sp "\
"parameter. If the frequency is not given "\
"by the user, then it is looked for in the "\
"image header. If not found, then an error "\
"is raised. PyBDSF will not work without "\
"knowledge of the frequency.")
interactive = Bool(False,
doc = "Use interactive mode\n"\
"In interactive mode, plots are displayed at "\
"various stages of the processing so that "\
"the user may check the progress of the fit.\n"\
"First, plots of the rms and mean background images are "\
"displayed along with the islands found, before "\
"fitting of Gaussians takes place. The user should "\
"verify that the islands and maps are reasonable "\
"before preceding.\n"\
"Next, if atrous_do is True, the fits to each "\
"wavelet scale are shown. The wavelet fitting "\
"may be truncated at the current scale if "\
"desired.\nLastly, the final results are shown.")
mean_map = Enum('default', 'zero', 'const', 'map',
doc = "Background mean map: 'default' => calc whether "\
"to use or not, 'zero' => 0, 'const' => "\
"clipped mean, 'map' => use 2-D map\n"\
"This parameter determines "\
"how the background mean map is computed "\
"and how it is used further.\nIf 'const', then "\
"the value of the clipped "\
"mean of the entire image (set by the kappa_clip "\
"option) is used as the "\
"background mean map.\nIf 'zero', then a value "\
"of zero is used.\nIf 'map', then "\
"the 2-dimensional mean map is computed and used. "\
"The resulting mean map is largely determined by "\
"the value of the rms_box parameter (see the "\
"rms_box parameter for more information).\nIf "\
"'default', then PyBDSF will attempt to "\
"determine automatically whether to use "\
"a 2-dimensional map or a constant one as "\
"follows. First, "\
"the image is assumed to be confused if "\
"bmpersrc_th < 25 or the ratio of the "\
"clipped mean to rms (clipped mean/clipped rms) "\
"is > 0.1, else the image is not confused. "\
"Next, the mean map is checked to "\
"see if its spatial variation is significant. If "\
"so, then a 2-D map is used and, if not, "\
"then the mean map is set to either 0.0 or a "\
"constant depending on whether the image is "\
"thought to be confused or not.\nGenerally, "\
"'default' works well. However, if there is "\
"significant extended emission in the image, "\
"it is often necessary to force the use of a "\
"constant mean map using either 'const' or "\
"'mean'.")
multichan_opts = Bool(False,
doc = "Show options for multi-channel "\
"images")
output_opts = Bool(False,
doc = "Show output options")
polarisation_do = Bool(False,
doc = "Find polarisation properties\n"\
"First, if pi_fit = True, source detection is done on the polarized intensity "\
"(PI) image and sources not detected in "\
"the Stokes I image are identified. The thresholds for island "\
"detection can be controlled using the pi_thresh_isl and "\
"pi_thresh_pix parameters.\n"\
"Next, for any such PI-only sources, "\
"plus all sources detected in the Stokes I image, "\
"the flux densities in each of the other Stokes images are found. "\
"Flux densities are calculated by fitting for the normalization of the Gaussians "\
"found from the Stokes I or PI images."\
"Lastly, the polarisation fraction and angle for each source "\
"are calculated.\n"\
"For linearly polarised emission, the signal and noise "\
"add vectorially, giving a Rice distribution "\
"(Vinokur 1965) instead of a Gaussian one. To correct "\
"for this, a bias is estimated and removed from the "\
"polarisation fraction using the same method used for the "\
"NVSS catalog (see ftp://ftp.cv.nrao.edu/pub/nvss/catalog.ps). "\
"Errors on the linear and total polarisation fractions "\
"and polarisation angle are estimated using the debiased "\
"polarised flux density and standard error propagation. See "\
"Sparks & Axon (1999) for a more detailed treatment.")
psf_vary_do = Bool(False,
doc = "Calculate PSF variation across image")
rm_do = Bool(False,
doc = "Find rotation measure properties",
group = 'hidden')
rms_box = Option(None, Tuple(Int(), Int()),
doc = "Box size, step size for rms/mean map "\
"calculation. Specify as (box, step) in "\
"pixels. E.g., rms_box = (40, 10) => box "\
"of 40x40 pixels, step of 10 pixels. "\
"None => calculate inside program\n"\
"This is a tuple of two integers and is probably the "\
"most important input parameter for PyBDSF. The first "\
"integer, boxsize, is the size of the 2-D sliding box "\
"for calculating the rms and mean over the entire image. "\
"The second, stepsize, is the number of pixels by which "\
"this box is moved for the next measurement. If None, "\
"then suitable values are calculated internally.\n"\
"In general, it is best to choose a box size that "\
"corresponds to the typical scale of artifacts in the "\
"image, such as those that are common around bright "\
"sources. Too small of a box size will effectively "\
"raise the local rms near a source so much that a "\
"source may not be fit at all; too large a box size "\
"can result in underestimates of the rms due to "\
"oversmoothing. A step size of 1/3 "\
"to 1/4 of the box size usually works well.\n"\
"If adaptive_rms_box is True, the rms_box parameter "\
"sets the large-scale box size that is used far "\
"from bright sources.")
rms_map = Enum(None, True, False,
doc = "Background rms map: True => "\
"use 2-D rms map; False => use constant rms; " \
"None => calculate inside program\n"\
"If True, then the 2-D background rms image is "\
"computed and used. If False, then a constant value is "\
"assumed (use rms_value to force the rms to a specific "\
"value). If None, then the 2-D rms image is calculated, and "\
"if the variation is statistically significant then it "\
"is taken, else a constant value is assumed. The rms image "\
"used for each channel in computing the spectral index "\
"follows what was done for the channel-collapsed image.\n"\
"Generally, None works well. However, if there is "\
"significant extended emission in the image, "\
"it is often necessary to force the use of a "\
"constant rms map by setting rms_map = False.")
shapelet_do = Bool(False,
doc = "Decompose islands into shapelets\n"\
"If True, then each island is decomposed using shapelets, "\
"However, at the moment, output of the shapelet parameters "\
"is not supported.")
spectralindex_do = Bool(False,
doc = "Calculate spectral indices (for multi-channel image)\n"\
"If True, then for a multi-channel image, spectral indices "\
"are calculated for all Gaussians and sources which are "\
"detected in the channel-collapsed image.\nFrequencies "\
"can be specified manually using frequency_sp.")
thresh = Enum(None, "hard", "fdr",
doc = "Type of thresholding: " \
"None => calculate inside program, 'fdr' => use "\
"false detection rate algorithm, 'hard' => "\
"use sigma clipping\nIf thresh = 'hard', "\
"then a hard threshold is assumed, given by thresh_pix. "\
"If thresh = 'fdr', then the False Detection Rate algorithm of "\
"Hancock et al. (2002) is used to calculate the value of "\
"thresh_pix. If thresh is None, then the false detection "\
"probability is first calculated, and if the number of false "\
"source pixels is more than fdr_ratio times the estimated "\
"number of true source pixels, then the 'fdr' threshold "\
"option is chosen, else the 'hard' threshold option is "\
"chosen.")
thresh_isl = Float(3,
doc = "Threshold for the island boundary in number of sigma "\
"above the mean. Determines extent of island used for fitting\n"\
"This parameter determines the region to which fitting "\
"is done. A higher value will produce smaller islands, "\
"and hence smaller regions that are considered in the "\
"fits. A lower value will produce larger islands. "\
"Use the thresh_pix parameter to set the detection "
"threshold for sources. Generally, thresh_isl should "\
"be lower than thresh_pix.\n"
"Only regions "\
"above the absolute threshold will be used. "\
"The absolute threshold is calculated as abs_thr = "\
"mean + thresh_isl * rms. Use the mean_map "\
"and rms_map parameters to control the way "\
"the mean and rms are determined.")
thresh_pix = Float(5,
doc = "Source detection threshold: threshold for the "\
"island peak in number of sigma "\
"above the mean. If "\
"false detection rate thresholding is used, "\
"this value is ignored and thresh_pix is "\
"calculated inside the program\n"\
"This parameter sets the overall detection threshold "\
"for islands (i.e. thresh_pix = 5 will find all sources "\
"with peak flux densities per beam of 5-sigma or greater). Use the "\
"thresh_isl parameter to control how much of each island "\
"is used in fitting. Generally, thresh_pix should be larger "\
"than thresh_isl.\n"
"Only islands "\
"with peaks above the absolute threshold will be used. "\
"The absolute threshold is calculated as abs_thr = "\
"mean + thresh_pix * rms. Use the mean_map "\
"and rms_map parameters to control the way "\
"the mean and rms are determined.")
adaptive_rms_box = Bool(False,
doc = "Use adaptive rms_box when determining rms and "\
"mean maps\n"\
"If True, the rms_box is reduced in size near "\
"bright sources and enlarged far from them. "\
"This scaling attempts to account for possible "\
"strong artifacts around bright sources while "\
"still acheiving accurate background rms and "\
"mean values when extended sources are present.\n"\
"This option is generally slower than non-"\
"adaptive scaling.\n"\
"Use the rms_box parameter to set the large-"\
"scale rms_box and the rms_box_bright parameter "\
"to set the small-scale rms_box. The threshold "\
"for bright sources can be set with the "\
"adaptive_thresh parameter.")
#--------------------------------ADVANCED OPTIONS--------------------------------
split_isl = Bool(True,
doc = "Split island if it is too large, has a large "\
"convex deficiency and it opens well.\n"\
"If it doesn't open well, then isl.mean = "\
"isl.clipped_mean, and is taken for fitting. "\
"Splitting, if needed, is always done for "\
"wavelet images",
group = 'advanced_opts')
splitisl_maxsize = Float(50.0,
doc = "If island size in beam area is more than this, "\
"consider splitting island. Min value is 50",
group = 'advanced_opts')
splitisl_size_extra5 = Float(0.1,
doc = "Fraction of island area for 5x5 opening to "\
"be used.\nWhen deciding to split an island, "\
"if the smallest extra sub islands while opening "\
"with a 5x5 footprint add up to at least this "\
"fraction of the island area, and if the largest "\
"sub island is less than 75% the size of the "\
"largest when opened with a 3x3 footprint, a "\
"5x5 opening is taken.",
group = 'hidden')
splitisl_frac_bigisl3 = Float(0.8,
doc = "Fraction of island area for 3x3 opening to "\
"be used.\nWhen deciding to split an island, "\
"if the largest sub island when opened with a "\
"3x3 footprint is less than this fraction of the "\
"island area, then a 3x3 opening is considered.",
group = 'hidden')
peak_fit = Bool(True,
doc = "Find and fit peaks of large islands iteratively\n"\
"When enabled, PyBDSF will identify and "\
"fit peaks of emission in "\
"large islands iteratively (the size of islands for which "\
"peak fitting is done is controlled with the "\
"peak_maxsize option), using a maximum of 10 "\
"Gaussians per iteration. Enabling this option will "\
"generally speed up fitting, but may result in "\
"somewhat higher residuals.",
group = 'advanced_opts')
peak_maxsize = Float(30.0,
doc = "If island size in beam area is more than this, "\
"attempt to fit peaks iteratively (if "\
"peak_fit = True). Min value is 30",
group = 'advanced_opts')
fdr_alpha = Float(0.05,
doc = "Alpha for FDR algorithm for thresholds\n"\
"If thresh is 'fdr', then the estimate of fdr_alpha "\
"(see Hancock et al. 2002 for details) is stored "\
"in this parameter.",
group = "advanced_opts")
fdr_ratio = Float(0.1,
doc = "For thresh = None; " \
"if #false_pix / #source_pix < fdr_ratio, " \
"thresh = 'hard' else thresh = 'fdr'",
group = "advanced_opts")
kappa_clip = Option(None, Float(),
doc = "Kappa for clipped mean and rms. None => calculate "\
"inside program\n"\
"The value of this is the factor used for Kappa-alpha "\
"clipping, as in AIPS. For an image with few source "\
"pixels added on to (Gaussian) noise pixels, the "\
"dispersion of the underlying noise will need to be "\
"determined. This is done iteratively, whereby the actual "\
"dispersion is first computed. Then, all pixels whose "\
"value exceeds kappa clip times this rms are excluded and "\
"the rms is computed again. This process is repeated until "\
"no more pixels are excluded. For well behaved noise "\
"statistics, this process will converge to the true noise "\
"rms with a value for this parameter ~3-5. A large "\
"fraction of source pixels, less number of pixels in total, "\
"or significant non-gaussianity of the underlying noise "\
"will all lead to non-convergence.",
group = "advanced_opts")
bmpersrc_th = Option(None, Float(),
doc = "Theoretical estimate of number of beams " \
"per source. None => calculate inside program\n"\
"Its value is calculated inside the program if its "\
"value is given as None as N/[n*(alpha-1)], where N "\
"is the total number of pixels in the image, n is "\
"the number of pixels in the image whose value is "\
"greater than 5 times the clipped rms, and alpha is "\
"the slope of the differential source counts "\
"distribution, assumed to be 2.5. The value of "\
"bmpersrc_th is used to estimate the average separation "\
"in pixels between two sources, which in turn is used "\
"to estimate the boxsize for calculating the background "\
"rms and mean images. In addition, if the value is below "\
"25 (or the ratio of clipped mean to clipped rms of the "\
"image is greater than 0.1), the image is assumed to be "\
"confused and hence the background mean is put to zero.",
group = "advanced_opts")
spline_rank = Enum(3, 1, 2, 4,
doc = "Rank of the interpolating function for rms/mean map\n"\
"This is an integer and is the order of the interpolating "\
"spline function to interpolate the background rms and "\
"mean map over the entire image.",
group = "advanced_opts")
minpix_isl = Option(None, Int(),
doc = "Minimum number of pixels with emission per island "\
"(minimum is 6 pixels). "\
"None -> calculate inside program\n"\
"This is an integer and is the minimum number of pixels "\
"in an island for "\
"the island to be included. If None, the number of "\
"pixels is set to 1/3 of the area of an unresolved source "\
"using the beam and pixel size information in the "\
"image header. It is set to 6 pixels for all "\
"wavelet images.",
group = "advanced_opts")
maxpix_isl = Option(None, Int(),
doc = "Maximum number of pixels with emission per island. "\
"None -> no limit\n"\
"This is an integer and is the maximum number of pixels "\
"in an island for the island to be included.",
group = "advanced_opts")
rms_value = Option(None, Float(),
doc = "Value of constant rms in "\
"Jy/beam to use if rms_map = False. "\
"None => calculate inside program",
group = "advanced_opts")
aperture = Option(None, Float(),
doc = "Radius of aperture in pixels inside which aperture fluxes are measured "\
"for each source. None => no aperture fluxes measured\n" \
"This is a float and sets the radius (in pixels) inside "\
"which the aperture flux is measured for each source. "\
"Depending on the value of aperture_posn, the aperture is centered either "\
"on the centroid or the peak of the source. Errors are calculated "\
"from the mean of the rms map inside the aperture.",
group = "advanced_opts")
aperture_posn = Enum('centroid', 'peak',
doc = "Position the aperture (if aperture is not None) on: "\
"'centroid' or 'peak' of the source.\n"\
"This parameter determines how the aperture is "\
"positioned relative to the source. If 'centroid', "\
"the aperture is centered on the source centroid. If "\
"'peak', the aperture is centered on the source peak. "\
"If aperture=None (i.e., no aperture radius is specified), "\
"this parameter is ignored.",
group = "advanced_opts")
src_ra_dec = Option(None, List(Tuple(Float(), Float())),
doc = "List of source positions at which fitting is done. "\
"E.g., src_ra_dec = [(197.1932, 47.9188), (196.5573, 42.4852)].\n"\
"This parameter defines the center positions at which "\
"fitting will be done. The size of the region used for "\
"the fit is given by the src_radius_pix parameter. "\
"Positions should be given as a list of RA and Dec, "\
"in degrees, one set per source. These positions will "\
"override the normal island finding module.",
group = "advanced_opts")
src_radius_pix = Option(None, Float(),
doc = "Radius of the island (if src_ra_dec is not None) in pixels. "\
"None => radius is set to the FWHM of the beam major axis.\n"\
"This parameter determines the size of the region used "\
"to fit the source positions specified by the src_ra_dec "\
"parameter.",
group = "advanced_opts")
ini_gausfit = Enum('default', 'simple', 'nobeam',
doc = "Initial guess for Gaussian "\
"parameters: 'default', 'simple', or 'nobeam'\n"\
"These are three different ways of estimating the initial "\
"guess for fitting of Gaussians to an island of emission.\n"\
"If 'default', the number of Gaussians is "\
"estimated from the number of peaks in the island. An initial "\
"guess is made for the parameters of these Gaussians before "\
"final fitting is done. This method should produce the best "\
"results when there are no large sources present.\n"\
"If 'simple', the maximum allowable number of Gaussians per island "\
"is set to 25, and no initial guess for the gaussian parameters "\
"is made.\nLastly, the 'nobeam' method is similar to the "\
"'default' method, but no information about the beam is "\
"used. This method is best used when source sizes are "\
"expected to be very different from the beam and is generally "\
"slower than the other methods.\n"\
"For wavelet images, the value used for the original "\
"image is used for wavelet order j <= 3 and 'nobeam' for "\
"higher orders.",
group = "advanced_opts")
ini_method = Enum('intensity', 'curvature',
doc = "Method by which inital guess for fitting of Gaussians "\
"is chosen: 'intensity' or 'curvature'\n"\
"If 'intensity', the inital guess described in the help for "\
"the ini_gausfit parameter is calculated using the intensity "\
"(ch0) image. If 'curvature', it is done using the curvature "\
"map (see Hancock et al. 2012).",
group = "advanced_opts")
fix_to_beam = Bool(False,
doc = "Fix major and minor axes and PA of Gaussians to beam?\n"\
"If True, then during fitting the major and minor axes "\
"and PA of the Gaussians are fixed to the beam. Only the "\
"amplitude and position are fit. If False, all parameters "\
"are fit.\n"\
"Note that when this option is activated, as a "\
"consequence of using fewer free parameters, the estimated errors on the "\
"peak and total flux densities are a factor of sqrt(2) lower "\
"compared to the case in which all parameters are fit (see "\
"Condon 1997). Additionally, the reported errors on the major "\
"and minor axes and the PA are zero.",
group = "advanced_opts")
fittedimage_clip = Float(0.1,
doc = "Sigma for clipping Gaussians " \
"while creating fitted image\n"\
"When the residual image is being made after Gaussian "\
"decomposition, the model images for each fitted Gaussian "\
"are constructed up to a size 2b, such that the amplitude "\
"of the Gaussian falls to a value of fitted_image_clip times "\
"the local rms, b pixels from the peak.",
group = "advanced_opts")
check_outsideuniv = Bool(False,
doc = "Check for pixels outside the "\
"universe\n"\
"If True, then the coordinate of each pixel is examined "\
"to check if it is outside the universe, which may "\
"happen when, e.g., an all sky image is made with SIN "\
"projection (commonly done at LOFAR earlier). When found, "\
"these pixels are blanked (since imaging software do not "\
"do this on their own). Note that this process takes a "\
"lot of time, as every pixel is checked in case weird "\
"geometries and projections are used",
group = "advanced_opts")
trim_box = Option(None, Tuple(Float(), Float(), Float(), Float()),
doc = "Do source detection on only a part of the image. "\
"Specify as (xmin, xmax, ymin, ymax) in pixels. "\
"E.g., trim_box = (120, 840, 15, 895). None => "\
"use entire image",
group = "advanced_opts")
stop_at = Enum(None, 'isl', 'read',
doc = "Stops after: 'isl' = island finding step or "\
"'read' = image reading step",
group = "advanced_opts")
group_by_isl = Bool(False,
doc = "Group all Gaussians in each island into a single "\
"source\n"\
"If True, all Gaussians in the island belong to a "\
"single source. If False, grouping is controlled "\
"by the group_tol parameter.",
group = "advanced_opts")
group_method = Enum('intensity', 'curvature',
doc = "Group Gaussians into sources using 'intensity' map "\
"or 'curvature' map\n"\
"Gaussians are deemed to be a part of "\
"the same source if: 1. no pixel on the line joining "\
"the centers of any pair of Gaussians has a (Gaussian-"\
"reconstructed) value less than the island threshold, and "\
"2. the centers are separated by a distance less than "\
"half the sum of their FWHMs along the line joining them.\n"\
"If 'curvature', the above comparisons are done on the "\
"curature map (see Hancock et al. 2012). If 'intensity', "\
"the comparisons are done on the intensity map.",
group = "advanced_opts")
group_tol = Float(1.0,
doc = "Tolerance for grouping of Gaussians into sources: "\
"larger values will result in larger sources\n"\
"Sources are created by "\
"grouping nearby Gaussians as follows: (1) If the "\
"difference between the minimum value between two "\
"Gaussians and the lower of the peak flux densities of "\
"the Gaussians in an island is less than "\
"group_tol * thresh_isl * rms_clip, "\
"and (2) if the centres are seperated by a distance less "\
"than 0.5*group_tol of the sum of their fwhms along the "\
"PA of the line joining them, they belong to the "\
"same island.",
group = "advanced_opts")
blank_limit = Option(None, Float(),
doc = "Limit in Jy/beam below which pixels are blanked. "\
"None => no such blanking is done\n"\
"All pixels in the ch0 image with a value less than the "\
"specified limit and with at least 4 neighboring pixels "\
"with values also less than this limit are blanked. "\
"If None, any such pixels are left unblanked. "\
"Pixels with a value of NaN are always blanked.",
group = "advanced_opts")
detection_image = String(doc = "Detection image file name used only for detecting "\
"islands of emission. Source measurement is still done "\
"on the main image\n"\
"The detection image can be a FITS or CASA 2-, "\
"3-, or 4-D cube. The detection image and the main"\
"image must have the same size and be registered.",
group = "advanced_opts")
rmsmean_map_filename = List(None,
doc = "Filenames of FITS files to use as the mean and rms maps, "\
"given as a list [<mean_map.fits>, <rms_map.fits>]\n"\
"If supplied, the internally generated mean and rms maps "\
"are not used.",
group = 'advanced_opts')
rmsmean_map_filename_det = List(None,
doc = "Filenames of FITS files to use as the mean and rms maps "\
"when a detection image is specified, "\
"given as a list [<mean_map.fits>, <rms_map.fits>]\n"\
"If supplied, the internally generated mean and rms maps "\
"are not used.",
group = 'advanced_opts')
do_mc_errors = Bool(False,
doc = "Estimate uncertainties for 'M'-type sources using Monte "\
"Carlo method\n"\
"If True, uncertainties on the sizes and "\
"positions of 'M'-type sources "\
"due to uncertainties in the constituent Gaussians are "\
"estimated using a Monte Carlo technique. These "\
"uncertainties are added in quadrature with those "\
"calculated using Condon (1997). If False, "\
"these uncertainties are ignored, and errors are "\
"calculated using Condon (1997) only.\n"\
"Enabling this option will result in longer run "\
"times if many 'M'-type sources are present, but "\
"should give better estimates of the uncertainites, "
"particularly for complex sources composed of many "\
"Gaussians.",
group = "advanced_opts")
ncores = Option(None, Int(),
doc = "Number of cores to use during fitting, None => "\
"use all\n"\
"Sets the number of cores to use during fitting.",
group = "advanced_opts")
do_cache = Bool(False,
doc = "Cache internally derived images to disk\n" \
"This option controls whether internally "\
"derived images are stored in memory or are "\
"cached to disk. Caching can reduce the amount "\
"of memory used, and is therefore useful when "\
"analyzing large images.",
group = "advanced_opts")
#--------------------------------ADAPTIVE RMS_BOX OPTIONS--------------------------------
rms_box_bright = Option(None, Tuple(Int(), Int()),
doc = "Box size, step size for rms/mean map "\
"calculation near bright sources. Specify as (box, step) in "\
"pixels. None => calculate inside program\n"\
"This parameter sets the box and step sizes "\
"to use near bright sources (determined by the "\
"adaptive_thresh parameter). The large-scale "\
"box size is set with the rms_box parameter.",
group = "adaptive_rms_box")
adaptive_thresh = Option(None, Float(),
doc = "Sources with pixels "\
"above adaptive_thresh*clipped_rms will be considered as "\
"bright sources (i.e., with potential artifacts). "\
"Minimum is 10.0. "\
"None => calculate inside program\n"\
"This parameter sets the SNR above which "\
"sources may be affected by strong artifacts "\
"Sources that meet the SNR threshold will use the "\
"small-scale rms_box (which helps to exclude artifacts) "\
"if their sizes at a threshold of 10.0 is less "\
"than 25 beam areas.\n"
"If None, the threshold is varied from 500 "\
"to 50 to attempt to obtain at least 5 candidate "\
"bright sources.",
group = "adaptive_rms_box")
#--------------------------------A-TROUS OPTIONS--------------------------------
atrous_jmax = Int(0,
doc = 'Max allowed wavelength order, 0 => calculate '\
'inside program\n'\
'This is an integer which is the maximum order of '\
'the a-trous wavelet decomposition. If 0 (or <0 or '\
'>15), then the value is determined within the '\
'program. The value of this parameter is then '\
'estimated as the (lower) rounded off value of '\
'ln[(nm-l)/(l-1) + 1]/ln2 + 1 where nm is the '\
'minimum of the residual image size (n, m) in pixels '\
'and l is the length of the filter a-trous lpf (see '\
'the atrous_lpf parameter for more info).\nA sensible '\
'value of jmax is such that the size of the kernel is '\
'not more than 3-4 times smaller than the smallest image '\
'dimension.',
group = "atrous_do")
atrous_lpf = Enum('b3', 'tr',
doc = "Low pass filter, either 'b3' or "\
"'tr', for B3 spline or Triangle\n"\
"This is the low pass filter, which can be "\
"either the B3 spline or the Triangle function, which "\
"is used to generate the a-trous wavelets. The B3 "\
"spline is [1, 4, 6, 4, 1] and the triangle is "\
"[1, 2, 1], normalised so that the sum is unity. The "\
"lengths of the filters are hence 5 and 3 respectively.",
group = "atrous_do")
atrous_bdsm_do = Bool(True,
doc = "Perform source extraction on each wavelet "\
"scale\n"\
"If True, fitting is done on each wavelet scale "\
"(or sum of scales if atrous_sum is True). If False, "\
"no fitting is done.",
group = "atrous_do")
atrous_orig_isl = Bool(False,
doc = "Restrict wavelet Gaussians to islands found "\
"in original image\n"\
"If True, all wavelet Gaussians must lie within "\
"the boundaries of islands found in the original "\
"image. If False, new islands that are found only in "\
"the wavelet images are included in the final "\
"fit.",
group = "atrous_do")
atrous_sum = Bool(True,
doc = "Fit to the sum of remaining wavelet scales\n"\
"If True, fitting is done on an image that is the sum "\
"of the remaining wavelet scales. Using the sum will "\
"generally result in improved signal. If False, "\
"fitting is done on only the wavelet scale under "\
"consideration.",
group = "atrous_do")
use_scipy_fft = Bool(True,
doc = "Use fast SciPy FFT for convolution\n"\
"If True, the SciPy FFT function will be used instead "\
"of the custom version. The SciPy version is much "\
"faster but also uses much more memory.",
group = "atrous_do")
#--------------------------------FLAGGING OPTIONS--------------------------------
flag_smallsrc = Bool(False,
doc = "Flag sources smaller than "\
"flag_minsize_bm times beam area\n"\
"If True, "\
"then fitted Gaussians whose size is less than "\
"flag_minsize_bm times the synthesized beam area are "\
"flagged. When "\
"combining Gaussians into sources, an "\
"error is raised if a 2x2 box with the peak of "\
"the Gaussian does not have all four pixels "\
"belonging to the source. Usually this means "\
"that the Gaussian is an artifact or has a very "\
"small size. \nIf False, then if either of the sizes "\
"of the fitted Gaussian is zero, then the "\
"Gaussian is flagged.\nIf the image is barely Nyquist "\
"sampled, this flag is best set to False. This "\
"flag is automatically set to False while "\
"decomposing wavelet images into Gaussians. ",
group = "flagging_opts")
flag_minsnr = Float(0.6,
doc = "Flag Gaussian if peak is less than flag_minsnr "\
"times thresh_pix times local rms\n"\
"Any fitted Gaussian whose peak is less than "\
"flag_minsnr times thresh_pix times the local rms "\
"is flagged. The flag value is increased by 1.",
group = "flagging_opts")
flag_maxsnr = Float(1.5,
doc = "Flag Gaussian if peak is greater than "\
"flag_maxsnr times image value at the peak\n"\
"Any fitted Gaussian whose peak is greater than "\
"flag_maxsnr times the image value at the peak "\
"is flagged. The flag value is increased by 2.",
group = "flagging_opts")
flag_maxsize_isl = Float(2.0,
doc = "Flag Gaussian if x, y bounding box "\
"around sigma-contour is factor times island bbox\n"\
"Any fitted Gaussian whose maximum x-dimension is "\
"larger than flag_maxsize_isl times the x-dimension "\
"of the island (and likewise for the y-dimension) is "\
"flagged. The flag value is increased by 16 (for x) "\
"and 32 (for y).",
group = "flagging_opts")
flag_maxsize_fwhm = Float(0.5,
doc = "Flag Gaussian if fwhm-contour times factor extends beyond island\n"\
"Any fitted Gaussian whose contour of flag_maxsize_fwhm times the fwhm "\
"falls outside the island is "\
"flagged. The flag value is increased by 256.",
group = "flagging_opts")
flag_bordersize = Int(0,
doc = "Flag Gaussian if centre is outside border "\
"- flag_bordersize pixels\n"\
"Any fitted Gaussian whose centre is border pixels "\
"outside the island bounding box is flagged. The flag "\
"value is increased by 4 (for x) and 8 (for y).",
group = "flagging_opts")
flag_maxsize_bm = Float(25.0,
doc = "Flag Gaussian if area greater than "\
"flag_maxsize_bm times beam area\n"\
"Any fitted "\
"Gaussian whose size is greater than flag_maxsize_"\
"bm times the synthesized beam is flagged. The "\
"flag value is increased by 64.",
group = "flagging_opts")
flag_minsize_bm = Float(0.7,
doc = "Flag Gaussian if flag_smallsrc = True "\
"and area smaller than flag_minsize_bm times "\
"beam area\n"\
"If flag_smallsrc is "\
"True, then any fitted Gaussian whose size "\
"is less than flag_maxsize_bm times the "\
"synthesized beam is flagged. The Gaussian "\
"flag is increased by 128.",
group = "flagging_opts")
#-----------------------------MULTICHANNEL OPTIONS--------------------------------
beam_spectrum = Option(None, List(Tuple(Float(), Float(), Float())),
doc = "FWHM of synthesized beam per channel. Specify as "\
"[(bmaj_ch1, bmin_ch1, bpa_ch1), (bmaj_ch2, "\
"bmin_ch2, bpa_ch2), etc.] in degrees. E.g., "\
"beam_spectrum = [(0.01, 0.01, 45.0), (0.02, "\
"0.01, 34.0)] for two channels. None => all "\
"equal to beam\n"\
"If None, then the channel-dependent "\
"restoring beam is either assumed to be a constant or "\
"to scale with frequency, depending on whether the "\
"parameter beam_sp_derive is False or True.",
group = "multichan_opts")
frequency_sp = Option(None, List(Float()),
doc = "Frequency in Hz of channels in input image when "\
"more than one channel is present. "\
"E.g., frequency_sp = [74e6, 153e6]. "\
"None => get from header\n"\
"If the frequency is not given "\
"by the user, then it is looked for in the "\
"image header. If not found, then an error "\
"is raised. PyBDSF will not work without the "\
"knowledge of the frequency.",
group = "multichan_opts")
beam_sp_derive = Bool(True,
doc = "If True and beam_spectrum is None, then "\
"assume header beam is for lowest frequency and scales "\
"with frequency for channels\n"\
"If True and the parameter beam_spectrum is None, then "\
"we assume that the beam in the header is for the lowest "\
"frequency of the image cube and scale accordingly to "\
"calculate the beam per channel. If False, then a "\
"constant value of the beam is taken instead.",
group = "multichan_opts")
collapse_mode = Enum('average', 'single', 'file',
doc = "Collapse method: 'average', "\
"'single', or 'file'. If 'file', use a user-provided "\
"file, else either average channels or take single "\
"channel to perform source detection on\n"\
"This parameter determines whether, when multiple "\
"channels are present, the source extraction is "\
"done on a single channel or an average of many "\
"channels.",
group = 'multichan_opts')
collapse_file = String(None,
doc = "If collapse_mode is 'file' then use this file "\
"as the ch0 image. The image supplied can be a FITS or CASA 2-, "\
"3-, or 4-D cube. The detection image and the main "\
"image must have the same size and be registered.",
group = 'multichan_opts')
collapse_ch0 = Int(0,
doc = "Number of the channel for source extraction, "\
"if collapse_mode = 'single', starting from 0",
group = 'multichan_opts')
collapse_av = List(None,
doc = "List of channels to average if collapse_mode "\
"= 'average', starting from 0. E.g., collapse_av "\
"= [0, 1, 5]. [] => all\n"\
"This parameter is a list of channels to be averaged "\
"to produce the continuum image for performing source "\
"extraction, if collapse_mode is 'average'. If the "\
"value is an empty list ([]), then all channels are used. Else, the "\
"value is a Python list of channel numbers, starting "\
"from 0 (i.e., the first channel has number 0, the "\
"second has number 1, etc.).",
group = 'multichan_opts')
collapse_wt = Enum('unity', 'rms',
doc = "Weighting: 'unity' or 'rms'. "\
"Average channels with weights = 1 or 1/rms_clip^2 if " \
"collapse_mode = 'average'\n"\
"When collapse_mode is 'average', then if this value "\
"is 'unity', the channels given by collapse_av are "\
"averaged with unit weights and if 'rms', then they "\
"are averaged with weights which are inverse square "\
"of the clipped rms of each channel image.",
group = 'multichan_opts')
#-----------------------------OUTPUT OPTIONS--------------------------------
plot_islands = Bool(False,
doc = 'Make separate plots of each island during '\
'fitting (for large images, this may take '\
'a long time and a lot of memory)',
group = "output_opts")
plot_allgaus = Bool(False,
doc = 'Make a plot of all Gaussians at the end',
group = "output_opts")
output_all = Bool(False,
doc = "Write out all files automatically to directory "\
"'outdir/filename_pybdsm'",
group = "output_opts")
opdir_overwrite = Enum('overwrite', 'append',
doc = "'overwrite'/'append': If output_all=True, "\
"delete existing "\
"files or append a new directory",
group = "output_opts")
bbs_patches = Enum(None, 'single', 'gaussian', 'source', 'mask',
doc = "For BBS format, type of patch to use: None "\
"=> no patches. "\
"'single' => all Gaussians in one patch. "\
"'gaussian' => each Gaussian gets its own "\
"patch. 'source' => all Gaussians belonging "\
"to a single source are grouped into one patch. "\
"'mask' => use mask file specified by bbs_patches_mask\n"\
"When the Gaussian catalogue is written as a "\
"BBS-readable sky file, this determines whether "\
"all Gaussians are in a single patch, there are "\
"no patches, all Gaussians for a given source "\
"are in a separate patch, each Gaussian gets "\
"its own patch, or a mask image is used to define "\
"the patches.\n"\
"If you wish to have patches defined by island, "\
"then set group_by_isl = True (under advanced_opts) "\
"before fitting to force all Gaussians in an "\
"island to be in a single source. Then set "\
"bbs_patches='source' when writing the catalog.",
group = "output_opts")
bbs_patches_mask = Option(None, String(),
doc = "Name of the mask file (of same size as input "\
"image) that defines the patches if bbs_patches "\
"= 'mask'\nA mask file may be used to define the "\
"patches in the output BBS sky model. The mask "\
"image should be 1 inside the patches and 0 "\
"elsewhere and should be the same size as the "\
"input image (before any trim_box is applied). Any "\
"Gaussians that fall outside of the patches "\
"will be ignored and will not appear in the "\
"output sky model.",
group = "output_opts")
solnname = Option(None, String(),
doc = "Name of the run, to be prepended "\
"to the name of the output directory. E.g., "\
"solname='Run_1'",
group = "output_opts")
indir = Option(None, String(),
doc = "Directory of input FITS files. None => get "\
"from filename",
group = "output_opts")
outdir = Option(None, String(),
doc = "Directory to use for all output files "\
"(including log files). None => parent directory of the "\
"input filename.",
group = "output_opts")
savefits_residim = Bool(False,
doc = "Save residual image as fits file",
group = "output_opts")
savefits_rmsim = Bool(False,
doc = "Save background rms image as fits file",
group = "output_opts")
savefits_meanim = Bool(False,
doc = "Save background mean image as fits file",
group = "output_opts")
savefits_det_rmsim = Bool(False,
doc = "Save detection background rms image as fits file",
group = "output_opts")
savefits_det_meanim = Bool(False,
doc = "Save detection background mean image as fits file",
group = "output_opts")
savefits_rankim = Bool(False,
doc = "Save island rank image as fits file",
group = "output_opts")
savefits_normim = Bool(False,
doc = "Save norm image as fits file",
group = "output_opts")
print_timing = Bool(False,
doc = "Print basic timing information",
group = "output_opts")
verbose_fitting = Bool(False,
doc = "Print out extra information " \
"during fitting",
group = "output_opts")
quiet = Bool(False,
doc = "Suppress text output to screen. Output is "\
"still sent to the log file as usual",
group = "output_opts")
#------------------------POLARISATION OPTIONS------------------------------
pi_fit = Bool(True,
doc = "Check the polarized intesity (PI) image for "\
"sources not found in Stokes I\n"\
"If True, the polarized intensity image is "\
"searched for sources not present in the Stokes "\
"I image. If any such sources are found, they are "\
"added to the the Stokes I source lists. Use the "\
"pi_thresh_pix and pi_thresh_isl parameters to "\
"control island detection in the PI image.",
group = "polarisation_do")
pi_thresh_isl = Option(None, Float(),
doc = "Threshold for PI island boundary in number of sigma "\
"above the mean. None => use thresh_isl\n"\
"This parameter determines the region to which fitting "\
"is done in the polarized intensity (PI) image. "\
"A higher value will produce smaller islands, "\
"and hence smaller regions that are considered in the "\
"fits. A lower value will produce larger islands. "\
"Use the pi_thresh_pix parameter to set the detection "
"threshold for sources. Generally, pi_thresh_isl should "\
"be lower than pi_thresh_pix.",
group = "polarisation_do")
pi_thresh_pix = Option(None, Float(),
doc = "Source detection threshold for PI image: threshold for the "\
"island peak in number of sigma "\
"above the mean. None => use thresh_pix\n"\
"This parameter sets the overall detection threshold "\
"for islands in the polarized intensity (PI) image "\
"(i.e. pi_thresh_pix = 5 will find all sources "\
"with peak flux densities per beam of 5-sigma or greater). Use the "\
"pi_thresh_isl parameter to control how much of each island "\
"is used in fitting. Generally, pi_thresh_pix should be larger "\
"than pi_thresh_isl.",
group = "polarisation_do")
#-----------------------------PSF VARY OPTIONS--------------------------------
psf_generators = Enum('calibrators', 'field',
doc = "PSF generators: 'calibrators' or 'field'\n"\
" If 'calibrator', only one source is taken per "\
"facet, and sources between psf_snrtop and maximum "\
"SNR are primary Voronoi generators. If 'field', "\
"all sources between psf_snrbot and psf_snrtop are "\
"secondary generators to be used in tessellating. "\
"Currently, the 'field' option is not implemented.",
group = "hidden")
psf_nsig = Float(3.0,
doc = "Kappa for clipping within each bin\n"\
"When constructing a set of 'unresolved' sources "\
"for psf estimation, the (clipped) median, rms and "\
"mean of major and minor axis sizes of Gaussians versus "\
"SNR within each bin is calculated using kappa = "\
"psf_nsig.",
group = "psf_vary_do")
psf_over = Int(2,
doc = "Factor of nyquist sample for binning bmaj, "\
"etc. vs SNR",
group = "psf_vary_do")
psf_kappa2 = Float(2.0,
doc = "Kappa for clipping for analytic fit\n"\
"When iteratively arriving at a statistically "\
"probable set of 'unresolved' sources, the fitted "\
"major and minor axis sizes versus SNR are binned "\
"and fitted with analytical functions. Those "\
"Gaussians which are within psf_kappa2 times "\
"the fitted rms from the fitted median are then "\
"considered 'unresolved' and are used further to "\
"estimate the PSFs.",
group = "psf_vary_do")
psf_smooth = Option(None, Float(),
doc = "Size of Gaussian to use for smoothing of "\
"interpolated images in arcsec. None => no "\
"smoothing",
group = "psf_vary_do")
psf_snrcut = Float(10.0,
doc = "Minimum SNR for statistics\n"\
"Only Gaussians with SNR greater than this are "\
"considered for processing. The minimum value is 5.0",
group = "psf_vary_do")
psf_snrtop = Float(0.15,
doc = "Fraction of SNR > snrcut as primary generators\n"\
"If psf_generators is 'calibrator', then the peak "\
"pixels of Gaussians which are the psf_snrtop "\
"fraction of SNR are taken as Voronoi generators. If "\
"psf_generators is 'field', then peak pixels of "\
"Gaussians which are between psf_snrbot and psf_snrtop "\
"fraction of the highest SNR are taken.",
group = "psf_vary_do")
psf_snrbot = Float(0.20,
doc = "Fraction of SNR > snrcut as all generators\n"\
"If psf_generators is 'field', then all sources which "\
"are between a fraction psf_snrbot and a fraction "\
"psf_snrtop of the highest SNR Gaussians are taken as "\
"Voronoi generators. That is, for a value of 0.2, the "\
"top 20% (in terms of SNR) of Gaussians are taken.",
group = "hidden")
psf_snrcutstack = Float(15.0,
doc = "Unresolved sources with higher SNR "\
"taken for stacked psfs\n"\
"Only Gaussians with SNR greater than this are used for "\
"estimating psf images in each tile.",
group = "psf_vary_do")
psf_gencode = Enum('list', 'file',
doc = "'list'/'file': Take primary "\
"gens from Gaussian list or file\n"\
"This is a string which can be either of 'list' or "\
"'file' (default is 'list'; 'file' not implemented "\
"yet). If psf_generators is 'calibrators', then the "\
"generators used for Voronoi tessellation of the "\
"image are either taken from a file if psf gencode is "\
"'file' or are determined from the data if psf gencode "\
"is 'list' (see psf_snrcut and psf_snrtop). The maximum "\
"pixel for each source is used as the generator. For "\
"'file' to be used, a list of good sources whose "\
"psfs are believed to close to theoretical (e.g. strong "\
"calibrators) need to be supplied with the metadata.",
group = "hidden")
psf_primarygen = String('',
doc = "Filename for primary gens if psf_gencode='file'\n"\
"This is the filename with the generators if psf_gencode "\
"is 'file'. This is not yet implemented.",
group = "hidden")
psf_itess_method = Int(0,
doc = "0 = normal, 1 = 0 + round, 2 = LogSNR, "\
"3 = SqrtLogSNR\n"\
"This is an integer which can be 0, 1, 2 or 3 "\
"(default is 0), which corresponds to a tessellation "\
"method. "\
"If 0, 2 or 3, then the weights used for Voronoi "\
"tessellation are unity, log(SNR) and sqrt[log(SNR)] where "\
"SNR is the signal to noise ratio of the generator "\
"in a tile. If 1, then the image is tessellated such "\
"that each tile has smooth boundaries instead of straight "\
"lines, using pixel-dependent weights.",
group = "psf_vary_do")
psf_tess_sc = Enum('s', 'c',
doc = "('s')imple/('c')omplicated - normal "\
"or approximate (fuzzy)\n"\
"If 's', then each pixel can only belong to one Voronoi "\
"tile. If 'c', then we do a fuzzy tessellation where border "\
"pixels can belong to more than one tile. However, we do "\
"not yet process the result of fuzzy tessellation and hence "\
"it is advisable to use 's'.",
group = "hidden")
psf_tess_fuzzy = Float(0.05,
doc = "Fraction of overlap for fuzzy tessellation\n"\
"If psf_tess_sc is 'c', then this determines the fraction "\
"of overlap between adjacent tiles for fuzzy tessellation.",
group = "hidden")
psf_use_shap = Bool(False,
doc = "Use shapelets for PSF variation",
group = "hidden")
psf_high_snr = Option(None, Float(),
doc = "SNR above which all sources are taken to be unresolved. "\
"E.g., psf_high_snr = 20.0. None => no such selection is made\n"\
"Gaussians with SNR greater than this are "\
"used to determine the PSF variation, even if they are deemed "\
"to be resolved. This corrects for the unreliability at high SNRs in the "\
"algorithm used to find unresolved sources. The minimum value is 20.0",
group = "psf_vary_do")
psf_stype_only = Bool(True,
doc = "Restrict sources to "\
"be only of type 'S'",
group = "psf_vary_do")
psf_stype_only = Bool(True,
doc = "Restrict sources to "\
"be only of type 'S'",
group = "psf_vary_do")
psf_fwhm = Option(None, Tuple(Float(), Float(), Float()),
doc = "FWHM of the PSF. Specify as (maj, "\
"min, pos ang E of N) in degrees. "\
"E.g., psf_fwhm = (0.06, 0.02, 13.3). None => "\
"estimate from image\n"\
"If the size of the PSF is specified with this option, "\
"the PSF and its variation acrosss the image are "\
"assumed to be constant and are not estimated "\
"from the image. Instead, all sources "\
"are deconvolved with the specified PSF.",
group = "psf_vary_do")
#-----------------------------SHAPELET OPTIONS--------------------------------
shapelet_basis = Enum("cartesian", "polar",
doc = "Basis set for shapelet decomposition: "\
"'cartesian' or 'polar'\n"\
"If shapelet decomposition is done, this determines "\
"the type of shapelet basis used. Currently however, "\
"only cartesian is supported.",
group = "shapelet_do")
shapelet_fitmode = Enum("fit", None,
doc = "Calculate shapelet coeff's by fitting ('fit') "\
"or integrating (None)\n"\
"If shapelet do is True, then this determines the "\
"method of calculating shapelet coefficients. If None, "\
"then these are calculated by integrating (actually, "\
"by summing over pixels, which introduces errors due to "\
"discretisation). If 'fit', then the coefficients are "\
"found by least-squares fitting of the shapelet basis "\
"functions to the image.",
group = "shapelet_do")
shapelet_gresid = Bool(False,
doc = "Use Gaussian residual image for shapelet "\
"decomposition?\n"\
"If True, then the shapelet decomposition is done "\
"on the Gaussian residual image rather that the "\
"ch0 image.",
group = "shapelet_do")
#-------------------------SPECTRAL INDEX OPTIONS--------------------------------
flagchan_rms = Bool(True,
doc = "Flag channels before (averaging and) "\
"extracting spectral index, if their rms is "\
"more than 5 (clipped) sigma outside the median "\
"rms over all channels, but only if <= 10% of "\
"channels\n"\
"If True, then the clipped rms and median (r and m) "\
"of the clipped rms of each channel is calculated. "\
"Those channels whose clipped rms is greater than "\
"4r away from m are flagged prior to averaging and "\
"calculating spectral indices from the image cube. "\
"However, these channels are flagged only if the "\
"total number of these bad channels does not exceed "\
"10% of the total number of channels themselves.",
group = "spectralindex_do")
flagchan_list = List(None,
doc = "List of channels to flag before (averaging and) "\
"extracting spectral index\n"\
"This parameter is a list of channels to be flagged. "\
"Flagged channels will not be used during fitting. If the "\
"value is an empty list ([]), then all channels are used. Else, the "\
"value is a Python list of channel numbers, starting "\
"from 0 (i.e., the first channel has number 0, the "\
"second has number 1, etc.).",
group = 'spectralindex_do')
flagchan_snr = Bool(True,
doc = "Flag channels that do not meet SNR criterion "\
"set by specind_snr\n"\
"If True, then channels (after averaging if needed) "\
"will be flagged and will not be used during fitting.",
group = "spectralindex_do")
specind_maxchan = Int(0,
doc = "Maximum number of channels to average for "\
"a given source when when attempting to meet target SNR. "\
"1 => no averaging; 0 => no maximum\n"\
"If spectralindex_do is True, then for a given source, "\
"if the flux densities in each channel are below a threshold, "\
"then this determines the maximum number of channels to "\
"average.",
group = "spectralindex_do")
specind_snr = Float(3.0,
doc = "Target SNR to use when fitting power law. If "\
"there is insufficient SNR, neighboring channels "\
"are averaged to attempt to obtain the target SNR. "\
"Channels with SNRs below this will be flagged if "\
"flagchan_snr = True\n"\
"The maximum allowable number of channels to average "\
"is determined by the specind_maxchan parameter.",
group = "spectralindex_do")
#-------------------------HIDDEN OPTIONS--------------------------------
debug = Bool(False,
doc = "Print debug info to the logfile",
group = "hidden")
outfile = Option(None, String(),
doc = "Output file name. None => file is named "\
"automatically; 'SAMP' => send to SAMP hub "\
"(e.g., to TOPCAT, ds9, or Aladin)",
group = 'hidden')
broadcast = Bool(False,
doc = "Broadcast Gaussian and source IDs and "\
"coordinates to SAMP hub when a Gaussian is "\
"clicked?\nNote that for the "\
"IDs to be useful, a catalog must have been sent "\
"to the SAMP hub previously using the write_catalog "\
"task (with outfile = 'SAMP').",
group = 'hidden')
clobber = Bool(False,
doc = "Overwrite existing file?",
group = 'hidden')
format = Enum('fits', 'ds9', 'ascii', 'bbs', 'star', 'kvis', 'sagecal', 'csv', 'casabox',
doc = "Format of output catalog: 'bbs', "\
"'ds9', 'fits', 'star', 'kvis', 'ascii', 'csv', 'casabox', or 'sagecal'\n"\
"The following formats are supported:\n"\
"'bbs' - BlackBoard Selfcal sky model format "\
"(Gaussian list only)\n"\
"'ds9' - ds9 region format\n"\
"'fits' - FITS catalog format, readable by many "\
"software packages, including IDL, TOPCAT, Python, "\
"fv, Aladin, etc.\n"\
"'star' - AIPS STAR format (Gaussian list only)\n"\
"'kvis' - kvis format (Gaussian list only)\n"\
"'ascii' - simple text file\n"\
"'sagecal' - SAGECAL format (Gaussian list only)\n"\
"Catalogues with the 'fits' and 'ascii' formats "\
"include all available information (see headers "\
"of the output file for column definitions). The "\
"other formats include only a subset of the full "\
"information.",
group = 'hidden')
srcroot = Option(None, String(),
doc = "Root name for entries in the output catalog "\
"(BBS format only). None => use image file name",
group = 'hidden')
incl_chan = Bool(False,
doc = "Include flux densities from each channel "\
"(if any)?",
group = 'hidden')
incl_empty = Bool(False,
doc = "Include islands without any valid Gaussians "\
"(source list only)?\n"\
"If True, islands for which Gaussian fitting "\
"failed will be included in the output catalog. "\
"In these cases, the source IDs "\
"are negative.",
group = 'hidden')
force_output = Bool(False,
doc = "Force creation of output file, even if the "\
"catalog is empty?\n"\
"If True, the output catalog will be created, "\
"even if there are no sources. In this case, "\
"the catalog will have a header but no entries.",
group = 'hidden')
catalog_type = Enum('srl', 'gaul', 'shap',
doc = "Type of catalog to write: 'gaul' - Gaussian "\
"list, 'srl' - source list (formed "\
"by grouping Gaussians), 'shap' - shapelet "\
"list (FITS format only)",
group = 'hidden')
correct_proj = Bool(True,
doc = "Correct source parameters for image projection (BBS format only)?\n"\
"If True, the source parameters in the output catalog will be "\
"corrected for first-order projection effects. If False, "\
"no correction is done. In this case, the position angle "\
"is relative to the +y axis, NOT true north, and source sizes "\
"are calculated assuming a constant pixel scale (equal to the "
"scale at the image center).\n "\
"If True, the position angle and source size "\
"are corrected using the average pixel size and "
"angle offset (between the +y axis and north) at "\
"the location of the source center.",
group = 'hidden')
img_format = Enum('fits', 'casa',
doc = "Format of output image: 'fits' or 'casa'",
group = 'hidden')
img_type = Enum('gaus_resid', 'shap_resid', 'rms', 'mean', 'gaus_model',
'shap_model', 'ch0', 'pi', 'psf_major', 'psf_minor',
'psf_pa', 'psf_ratio', 'psf_ratio_aper', 'island_mask',
doc = "Type of image to export: 'gaus_resid', "\
"'shap_resid', 'rms', 'mean', 'gaus_model', "\
"'shap_model', 'ch0', 'pi', 'psf_major', "\
"'psf_minor', 'psf_pa', 'psf_ratio', 'psf_ratio_aper', "\
"'island_mask'\nThe following images "\
"can be exported:\n"\
"'ch0' - image used for source detection\n"\
"'rms' - rms map image\n"\
"'mean' - mean map image\n"\
"'pi' - polarized intensity image\n"\
"'gaus_resid' - Gaussian model residual image\n"\
"'gaus_model' - Gaussian model image\n"\
"'shap_resid' - Shapelet model residual image\n"\
"'shap_model' - Shapelet model image\n"\
"'psf_major' - PSF major axis FWHM image (FWHM in arcsec)\n"\
"'psf_minor' - PSF minor axis FWHM image (FWHM in arcsec)\n"\
"'psf_pa' - PSF position angle image (degrees east of north)\n"\
"'psf_ratio' - PSF peak-to-total flux ratio (in units of 1/beam)\n"\
"'psf_ratio_aper' - PSF peak-to-aperture flux ratio (in units of 1/beam)\n"\
"'island_mask' - Island mask image (0 = outside island, 1 = inside island)",
group = 'hidden')
mask_dilation = Int(0,
doc = "Number of iterations to use for island-mask dilation. "\
"0 => no dilation\nThis option determines the number of "\
"dilation iterations to use when making the island mask. "\
"More iterations implies larger masked regions (one iteration "\
"expands the size of features in the mask by one pixel in all "\
"directions). After dilation, a closing operation is performed "\
"(using a structure array the size of the beam) to remove gaps "\
"and holes in the mask that are smaller than the beam.",
group = "hidden")
pad_image = Bool(False,
doc = "Pad image (with zeros) to original size\nIf True, the output "\
"image is padded to be the same size as the original "\
"image (without any trimming defined by the trim_box "\
"parameter). If False, the output image will have the "\
"size specified by the trim_box parameter.",
group = "hidden")
ch0_image = Bool(True,
doc = "Show the ch0 image. This is the image used for "\
"source detection",
group = "hidden")
rms_image = Bool(True,
doc = "Show the background rms image",
group = "hidden")
mean_image = Bool(True,
doc = "Show the background mean image",
group = "hidden")
ch0_islands = Bool(True,
doc = "Show the ch0 image with islands and Gaussians "\
"(if any) overplotted",
group = "hidden")
ch0_flagged = Bool(False,
doc = "Show the ch0 image with flagged Gaussians "\
"(if any) overplotted",
group = "hidden")
gresid_image = Bool(True,
doc = "Show the Gaussian residual image",
group = "hidden")
sresid_image = Bool(False,
doc = "Show the shapelet residual image",
group = "hidden")
gmodel_image = Bool(True,
doc = "Show the Gaussian model image",
group = "hidden")
smodel_image = Bool(False,
doc = "Show the shapelet model image",
group = "hidden")
pi_image = Bool(False,
doc = "Show the polarized intensity image",
group = "hidden")
source_seds = Bool(False,
doc = "Plot the source SEDs and best-fit spectral "\
"indices (if image was processed with "\
"spectralindex_do = True). "\
"Sources may be chosen by ID with the 'c' key "\
"or, if ch0_islands = True, by picking a source with "\
"the mouse",
group = "hidden")
psf_major = Bool(False,
doc = "Show the PSF major axis variation (values are "\
"FWHM in arcsec)",
group = "hidden")
psf_minor = Bool(False,
doc = "Show the FWHM of PSF minor axis variation (values are "\
"FWHM in arcsec)",
group = "hidden")
psf_pa = Bool(False,
doc = "Show the PSF position angle variation (values are "\
"angle E from N in degrees)",
group = "hidden")
def __init__(self, values = None):
"""Build an instance of Opts and (possibly)
initialize some variables.
Parameters:
values: dictionary of key->value for initialization
of variables
"""
TCInit(self)
if values is not None:
self.set_opts(values)
def _parse_string_as_bool(self, bool_string):
"""
'private' function performing parse of a string containing
a bool representation as defined in the parameter set/otdb
implementation
"""
true_chars = ['t', 'T', 'y', 'Y', '1']
false_chars = ['f', 'F', 'n', 'N', '0']
if bool_string[0] in true_chars:
return True
if bool_string[0] in false_chars:
return False
raise tcError(
"Supplied string cannot be parsed as a bool: {0}".format(bool_string))
def set_opts(self, opts):
"""Set multiple variables at once.
opts should be dictionary of name->value
"""
opts = dict(opts)
for k, v in opts.items():
try:
# Fix for lofar parameter set integration:
# If the attribute is a bool, test if it is a string.
# and then try to parse it
if hasattr(self, k):
if isinstance(self.__getattribute__(k), bool):
if isinstance(v, bool) or v is None:
# just enter the bool into the parameter
pass
elif isinstance(v, basestring):
# Try parse it as a parameter set bool string
v = self._parse_string_as_bool(v)
else:
# raise error
raise tcError("unknown type for bool variable")
if v == "none":
v = None
self.__setattr__(k, v)
except tcError as e:
# Catch and re-raise as a RuntimeError
raise RuntimeError(
'Parameter "{0}" is not defined properly. \n {1}'.format(k
, str(e)))
def set_default(self, opt_names = None):
"""Set one or more opts to default value.
opt_names should be a list of opt names as strings, but can be
a string of a single opt name.
If None, set all opts to default values."""
if opt_names is None:
TCInit(self)
else:
if isinstance(opt_names, str):
opt_names = [opt_names]
for k in opt_names:
if isinstance(k, str):
self.__delattr__(k)
def info(self):
"""Pretty-print current values of options"""
## enumerate all options
opts = self.to_list()
res = ""
fmt = "%20s = %5s ## %s\n"
for k, v in opts:
res += fmt % (k, str(self.__getattribute__(k)),
str(v.doc()).split('\n')[0])
return res
def to_list(self, group=None):
"""Returns a sorted list of (name, TC object) tuples for all opts.
If the group name is specified, only opts that belong to that group
are returned.
"""
from . import tc
opts_list = []
for k, v in self.__class__.__dict__.items():
if isinstance(v, tc.TC):
if group is not None:
if v.group() == group:
opts_list.append((k, v))
else:
opts_list.append((k, v))
opts_list = sorted(opts_list)
return opts_list
def to_dict(self):
"""Returns a dictionary of names and values for all opts."""
from . import tc
opts_dict = {}
for k, v in self.__class__.__dict__.items():
if isinstance(v, tc.TC):
opts_dict.update({k: self.__getattribute__(k)})
return opts_dict
def get_names(self, group=None):
"""Returns a sorted list of names for all opts.
If the group name is specified, only opts that belong to that group
are returned.
"""
from . import tc
opts_list = []
for k, v in self.__class__.__dict__.items():
if isinstance(v, tc.TC):
if group is not None:
if v.group() == group:
opts_list.append(k)
else:
opts_list.append(k)
opts_list = sorted(opts_list)
return opts_list
def __setstate__(self, state):
self.set_opts(state)
def __getstate__(self):
from . import tc
state = {}
for k, v in self.__class__.__dict__.items():
if isinstance(v, tc.TC):
state.update({k: self.__getattribute__(k)})
return state
| 103,331 | 65.537025 | 116 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/sourcecounts.py
|
"""Sourcecounts
s is flux in Jy and n is number > s per str
"""
import numpy as N
s=N.array([ 9.9999997e-05, 0.00010328281, 0.00010667340, 0.00011017529, 0.00011379215, 0.00011752774, 0.00012138595, \
0.00012537083, 0.00012948645, 0.00013373725, 0.00013812761, 0.00014266209, 0.00014734542, 0.00015218249, 0.00015717837, \
0.00016233824, 0.00016766752, 0.00017317173, 0.00017885664, 0.00018472817, 0.00019079246, 0.00019705582, 0.00020352470, \
0.00021020604, 0.00021710672, 0.00022423393, 0.00023159511, 0.00023919797, 0.00024705040, 0.00025516062, 0.00026353705, \
0.00027218851, 0.00028112394, 0.00029035273, 0.00029988447, 0.00030972913, 0.00031989696, 0.00033039862, 0.00034124497, \
0.00035244724, 0.00036401744, 0.00037596744, 0.00038830977, 0.00040105727, 0.00041422324, 0.00042782145, 0.00044186602, \
0.00045637166, 0.00047135353, 0.00048682719, 0.00050280854, 0.00051931484, 0.00053636299, 0.00055397081, 0.00057215663, \
0.00059093948, 0.00061033899, 0.00063037529, 0.00065106933, 0.00067244272, 0.00069451780, 0.00071731757, 0.00074086577, \
0.00076518703, 0.00079030672, 0.00081625103, 0.00084304705, 0.00087072275, 0.00089930650, 0.00092882907, 0.00095932081, \
0.00099081360, 0.0010233402, 0.0010569346, 0.0010916317, 0.0011274681, 0.0011644807, 0.0012027085, 0.0012421905, \
0.0012829694, 0.0013250869, 0.0013685870, 0.0014135153, 0.0014599183, 0.0015078448, 0.0015573446, 0.0016084694, \
0.0016612725, 0.0017158090, 0.0017721358, 0.0018303118, 0.0018903976, 0.0019524558, 0.0020165513, 0.0020827511, \
0.0021511239, 0.0022217415, 0.0022946771, 0.0023700071, 0.0024478103, 0.0025281659, 0.0026111610, 0.0026968806, \
0.0027854142, 0.0028768543, 0.0029712960, 0.0030688383, 0.0031695808, 0.0032736324, 0.0033810998, 0.0034920950, \
0.0036067341, 0.0037251366, 0.0038474260, 0.0039737299, 0.0041041803, 0.0042389128, 0.0043780687, 0.0045217923, \
0.0046702349, 0.0048235501, 0.0049818982, 0.0051454445, 0.0053143604, 0.0054888208, 0.0056690089, 0.0058551119, \
0.0060473247, 0.0062458473, 0.0064508831, 0.0066626542, 0.0068813767, 0.0071072797, 0.0073405989, 0.0075815772, \
0.0078304661, 0.0080875214, 0.0083530201, 0.0086272340, 0.0089104511, 0.0092029646, 0.0095050810, 0.0098171150, \
0.010139393, 0.010472251, 0.010816036, 0.011171106, 0.011537833, 0.011916599, 0.012307799, 0.012711842, 0.013129148, \
0.013560154, 0.014005309, 0.014465077, 0.014939931, 0.015430382, 0.015936933, 0.016460113, 0.017000468, 0.017558562, \
0.018134978, 0.018730316, 0.019345198, 0.019980265, 0.020636180, 0.021313628, 0.022013316, 0.022735972, 0.023482339, \
0.024253221, 0.025049411, 0.025871737, 0.026721058, 0.027598262, 0.028504262, 0.029440004, 0.030406466, 0.031404655, \
0.032435611, 0.033500414, 0.034600168, 0.035736032, 0.036909178, 0.038120817, 0.039372254, 0.040664773, 0.041999724, \
0.043378498, 0.044802535, 0.046273317, 0.047792386, 0.049361322, 0.050981764, 0.052655403, 0.054383982, 0.056169309, \
0.058013245, 0.059917714, 0.061884668, 0.063916229, 0.066014484, 0.068181612, 0.070419893, 0.072731644, 0.075119294, \
0.077585325, 0.080132306, 0.082762904, 0.085479856, 0.088286005, 0.091184273, 0.094177686, 0.097269312, 0.10046248, \
0.10376048, 0.10716674, 0.11068483, 0.11431842, 0.11807128, 0.12194734, 0.12595065, 0.13008538, 0.13435584, 0.13876650, \
0.14332195, 0.14802694, 0.15288639, 0.15790530, 0.16308904, 0.16844295, 0.17397262, 0.17968382, 0.18558250, 0.19167484, \
0.19796717, 0.20446607, 0.21117832, 0.21811092, 0.22527111, 0.23266634, 0.24030435, 0.24819310, 0.25634068, 0.26475587, \
0.27344733, 0.28242409, 0.29169556, 0.30127138, 0.31116158, 0.32137644, 0.33192664, 0.34282318, 0.35407743, 0.36570114, \
0.37770644, 0.39010584, 0.40291208, 0.41613895, 0.42980003, 0.44390959, 0.45848233, 0.47353345, 0.48907870, 0.50513422, \
0.52171689, 0.53884387, 0.55653316, 0.57480311, 0.59367281, 0.61316204, 0.63329101, 0.65408045, 0.67555267, 0.69772983, \
0.72063506, 0.74429214, 0.76872587, 0.79396176, 0.82002604, 0.84694600, 0.87474972, 0.90346611, 0.93312526, 0.96375805, \
0.99539644, 1.0280730, 1.0618227, 1.0966804, 1.1326823, 1.1698662, 1.2082708, 1.2479361, 1.2889036, 1.3312160, 1.3749173, \
1.4200534, 1.4666711, 1.5148191, 1.5645479, 1.6159091, 1.6689565, 1.7237452, 1.7803327, 1.8387777, 1.8991414, 1.9614867, \
2.0258787, 2.0923846, 2.1610713, 2.2320154, 2.3052883, 2.3809667, 2.4591296, 2.5398583, 2.6232371, 2.7093532, 2.7982962, \
2.8901591, 2.9850378, 3.0830312, 3.1842413, 3.2887743, 3.3967385, 3.5082474, 3.6234167, 3.7423668, 3.8652217, 3.9921098, \
4.1231637, 4.2585196, 4.3983188, 4.5427074, 4.6918364, 4.8458605, 5.0049415, 5.1692443, 5.3389411, 5.5142026, 5.6952238, \
5.8821878, 6.0752892, 6.2747297, 6.4807177, 6.6934676, 6.9132018, 7.1401496, 7.3745475, 7.6166406, 7.8666806, 8.1249294, \
8.3916559, 8.6671391, 8.9516649, 9.2455320, 9.5490456, 9.8625231, 10.186292, 10.520689, 10.866064, 11.222776, 11.591200, \
11.971718, 12.364727, 12.770638, 13.189876, 13.622874, 14.070073, 14.531968, 15.009026, 15.501744, 16.010639, 16.536238, \
17.079092, 17.639769, 18.218849, 18.816940, 19.434666, 20.072670, 20.731619, 21.412201, 22.115124, 22.841122, 23.590954, \
24.365402, 25.165274, 25.991404, 26.844654, 27.725914, 28.636105, 29.576176, 30.547108, 31.549913, 32.585640, 33.655365, \
34.760208, 35.901321, 37.079857, 38.297119, 39.554344, 40.852840, 42.193966, 43.579117, 45.009739, 46.487324, 48.013420, \
49.589611, 51.217548, 52.898926, 54.635498, 56.429081, 58.281548, 60.194820, 62.170906, 64.211861, 66.319824, 68.496979, \
70.745613, 73.068062, 75.466751, 77.944183, 80.502945, 83.145714, 85.875237, 88.694359, 91.606033, 94.613190, 97.719162, \
100.92711, 104.24036, 107.66238, 111.19673, 114.84712, 118.61734, 122.51133, 126.53315, 130.68700, 134.97722, 139.40826, \
143.98479, 148.71155, 153.59348, 158.63567, 163.84338, 169.22206, 174.77731, 180.51492, 186.44090, 192.56142, 198.88284, \
205.41180, 212.15511, 219.11977, 226.31306, 233.74251, 241.41557, 249.34081, 257.52621, 265.98032, 274.71198, 283.73026, \
293.04462, 302.66473, 312.60065, 322.86276, 333.46173, 344.40869, 355.71500, 367.39246, 379.45328, 391.91003, 404.77573, \
418.06375, 431.78802, 445.96283, 460.60297, 475.72372, 491.34085, 507.47067, 524.13000, 541.33624, 559.10730, 577.46179, \
596.41876, 615.99811, 636.21954, 657.10541, 678.67700, 700.95673, 723.96783, 747.73438, 772.28113, 797.63373, 823.81854, \
850.86298, 878.79529, 907.64453, 937.44080, 968.21527, 1000.0000])
n=N.array([ 3.7709775e+10, 3.6065767e+10, 3.4493432e+10, 3.2989649e+10, 3.1551425e+10, 3.0175900e+10, \
2.8860342e+10, 2.7602137e+10, \
2.6398808e+10, 2.5247922e+10, 2.4147204e+10, 2.3094475e+10, 2.2087643e+10, 2.1124704e+10, 2.0203747e+10, 1.9322939e+10, \
1.8480527e+10, 1.7674846e+10, 1.6904289e+10, 1.6167328e+10, 1.5462490e+10, 1.4788384e+10, 1.4143675e+10, 1.3527065e+10, \
1.2937335e+10, 1.2373316e+10, 1.1833886e+10, 1.1317971e+10, 1.0824550e+10, 1.0352640e+10, 9.9013028e+09, 9.4696428e+09, \
9.0568028e+09, 8.6619587e+09, 8.2843305e+09, 7.9231647e+09, 7.5777439e+09, 7.2473825e+09, 6.9314243e+09, 6.6292444e+09, \
6.3402342e+09, 6.0638244e+09, 5.7994639e+09, 5.5466291e+09, 5.3048166e+09, 5.0735457e+09, 4.8523587e+09, 4.6408141e+09, \
4.4384916e+09, 4.2449897e+09, 4.0599278e+09, 3.8829297e+09, 3.7136481e+09, 3.5517468e+09, 3.3969042e+09, 3.2488120e+09, \
3.1071754e+09, 2.9717143e+09, 2.8421588e+09, 2.7182515e+09, 2.5997458e+09, 2.4864064e+09, 2.3780086e+09, 2.2743360e+09, \
2.1751834e+09, 2.0803535e+09, 1.9896579e+09, 1.9029162e+09, 1.8199575e+09, 1.7406141e+09, 1.6647299e+09, 1.5921536e+09, \
1.5227420e+09, 1.4563558e+09, 1.3928644e+09, 1.3321405e+09, 1.2740643e+09, 1.2185199e+09, 1.1653979e+09, 1.1145907e+09, \
1.0659987e+09, 1.0195252e+09, 9.7507763e+08, 9.3256806e+08, 8.9191149e+08, 8.5302746e+08, 8.1583853e+08, 7.8027117e+08, \
7.4625421e+08, 7.1372032e+08, 6.8260474e+08, 6.5284576e+08, 6.2438406e+08, 5.9716326e+08, 5.7112922e+08, 5.4623008e+08, \
5.2241651e+08, 4.9964106e+08, 4.7785866e+08, 4.5702573e+08, 4.3710147e+08, 4.1804544e+08, 3.9982026e+08, 3.8238954e+08, \
3.6571878e+08, 3.4977482e+08, 3.3452595e+08, 3.1994208e+08, 3.0599382e+08, 2.9265363e+08, 2.7989501e+08, 2.6769266e+08, \
2.5602224e+08, 2.4486062e+08, 2.3418562e+08, 2.2397598e+08, 2.1421147e+08, 2.0487264e+08, 1.9594099e+08, 1.8739867e+08, \
1.7922877e+08, 1.7141509e+08, 1.6394203e+08, 1.5679477e+08, 1.4995909e+08, 1.4342146e+08, 1.3716880e+08, 1.3118874e+08, \
1.2546940e+08, 1.1999951e+08, 1.1476796e+08, 1.0976452e+08, 1.0497919e+08, 1.0040248e+08, 96025304., 91838968., \
87835200., 84005912., 80343576., 76840880., 73490912., 70286984., 67222736., 64292076., 61489172., 58808476., \
56244648., 53792588., 51447432., 49204512., 47059380., 45007768., 43045600., 41168972., 39374160., 37657620., \
36015888., 34445724., 32944024., 31507790., 30134168., 28820430., 27563966., 26362278., 25212982., 24113790., \
23062518., 22057078., 21095472., 20175804., 19296216., 18454972., 17650402., 16880912., 16144966., 15441105., \
14767931., 14124105., 13508346., 12919433., 12356192., 11817510., 11302309., 10809571., 10338324., 9887611.0, \
9456547.0, 9044277.0, 8649980.0, 8272873.0, 7912207.0, 7567264.5, 7237360.0, 6921837.5, 6620071.0, 6331461.0, \
6055433.0, 5791438.5, 5538953.0, 5297479.5, 5066528.5, 4845647.0, 4634395.5, 4432353.0, 4239119.0, 4054309.2, \
3877556.2, 3708509.5, 3546832.0, 3392203.5, 3244316.0, 3102876.0, 2967602.0, 2838228.0, 2729847.5, 2624870.5, \
2524750.2, 2429229.0, 2338061.0, 2251017.0, 2167880.5, 2088448.4, 2012529.5, 1939942.6, 1870518.1, 1804095.8, \
1740523.8, 1679660.2, 1621370.6, 1565526.9, 1512157.9, 1460823.1, 1411600.0, 1364385.6, 1319083.4, 1275602.0, \
1233855.0, 1193760.2, 1155241.0, 1118223.9, 1082639.1, 1048421.7, 1015509.1, 983842.56, 953365.38, 924024.94, \
895770.81, 868555.00, 842332.44, 817144.38, 792764.06, 769256.56, 746584.44, 724711.62, 703604.50, 683230.62, \
663559.44, 644562.06, 626210.06, 608477.38, 591338.81, 574770.50, 558749.50, 543254.06, 528263.38, 513757.69, \
499717.94, 486126.28, 473019.56, 460262.88, 447906.47, 435935.03, 424334.22, 413089.53, 402187.88, 391616.53, \
381363.44, 371416.84, 361765.66, 352399.28, 343307.47, 334480.50, 325909.12, 317584.28, 309497.50, 301640.47, \
294005.56, 286584.88, 279402.72, 272383.66, 265559.03, 258922.31, 252467.16, 246187.56, 240077.75, 234132.17, \
228345.47, 222712.61, 217228.62, 211888.83, 206688.67, 201623.84, 196690.11, 191883.45, 187200.03, 182636.05, \
178187.92, 173852.23, 169645.80, 165521.64, 161500.73, 157580.05, 153756.70, 150027.80, 146390.59, 142842.50, \
139380.91, 136003.44, 132707.70, 129491.38, 126352.36, 123288.48, 120297.67, 117378.02, 114527.58, 111744.49, \
109027.01, 106373.41, 103781.99, 101262.79, 98789.008, 96373.047, 94013.438, 91708.680, 89457.398, 87258.211, \
85109.805, 83010.930, 80960.391, 78956.891, 76999.320, 75086.586, 73217.594, 71391.312, 69606.703, 67862.789, \
66158.609, 64493.254, 62865.801, 61275.387, 59728.344, 58208.258, 56722.930, 55271.520, 53853.266, 52467.410, \
51113.223, 49789.961, 48496.941, 47233.500, 45998.977, 44792.723, 43614.117, 42462.578, 41337.504, 40238.328, \
39164.488, 38115.469, 37090.699, 36089.668, 35111.887, 34156.848, 33228.004, 32316.406, 31426.256, 30557.111, \
29708.504, 28880.010, 28071.193, 27281.650, 26510.949, 25758.721, 25024.562, 24308.115, 23608.990, 22926.832, \
22261.293, 21612.029, 20978.699, 20360.971, 19758.527, 19171.037, 18598.217, 18039.732, 17495.309, 16966.436, \
16448.930, 15944.685, 15453.382, 14974.762, 14508.550, 14054.481, 13612.296, 13181.744, 12762.577, 12354.543, \
11957.408, 11570.935, 11194.892, 10829.060, 10473.206, 10127.119, 9790.5850, 9463.3916, 9145.3301, 8836.2021, \
8535.8027, 8243.9434, 7961.2437, 7685.7393, 7418.2314, 7158.5264, 6906.4458, 6661.8105, 6424.4482, 6194.1807, \
5970.8477, 5754.2710, 5544.2944, 5340.7573, 5143.5054, 4952.3828, 4767.2373, 4587.9229, 4414.2944, 4246.2085, \
4083.5212, 3926.0977, 3773.8032, 3626.5049, 3484.0715, 3346.3752, 3213.5771, 3084.9297, 2960.6602, 2840.6472, \
2724.7744, 2612.9258, 2504.9900, 2400.8569, 2300.4167, 2203.5654, 2110.1995, 2020.2166, 1933.5188, 1850.0120, \
1769.5944, 1692.1769, 1617.6688, 1545.9810, 1477.0260, 1410.7202, 1346.9801, 1285.7245, 1226.8739, 1170.3518, \
1116.1688, 1064.0614, 1014.0633, 966.10516, 920.11682, 876.03217, 833.78497, 793.31201, 754.55164, 717.44275, \
681.92755, 647.94806, 615.44952, 584.37762, 554.67981, 526.30505, 499.20432, 473.32895, 448.63220, 425.07007, \
402.59656, 381.16980, 360.74893, 341.31854, 322.78470, 305.14084, 288.35059, 272.37881, 257.19098, 242.75432, \
229.03673, 216.00752, 203.63695, 191.89633])
s=s/1000.0
| 12,587 | 104.781513 | 123 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/functions.py
|
# some functions
from __future__ import print_function
from __future__ import absolute_import
try:
# For Python 2
basestring = basestring
except NameError:
basestring = str
def poly(c,x):
""" y = Sum { c(i)*x^i }, i=0,len(c)"""
import numpy as N
y=N.zeros(len(x))
for i in range(len(c)):
y += c[i]*(x**i)
return y
def sp_in(c, x):
""" Spectral index in freq-flux space """
import numpy as N
order = len(c)-1
if order == 1:
y = c[0]*N.power(x, c[1])
else:
if order == 2:
y = c[0]*N.power(x, c[1])*N.power(x, c[2]*N.log(x))
else:
print('Not yet implemented')
return y
def wenss_fit(c,x):
""" sqrt(c0*c0 + c1^2/x^2)"""
import numpy as N
y = N.sqrt(c[0]*c[0]+c[1]*c[1]/(x*x))
return y
def nanmean(x):
""" Mean of array with NaN """
import numpy as N
sum = N.nansum(x)
n = N.sum(~N.isnan(x))
if n > 0:
mean = sum/n
else:
mean = float("NaN")
return mean
def shapeletfit(cf, Bset, cfshape):
""" The function """
import numpy as N
ordermax = Bset.shape[0]
y = (Bset[0,0,::]).flatten()
y = N.zeros(y.shape)
index = [(i,j) for i in range(ordermax) for j in range(ordermax-i)] # i=0->nmax, j=0-nmax-i
for coord in index:
linbasis = (Bset[coord[0], coord[1], ::]).flatten()
y += cf.reshape(cfshape)[coord]*linbasis
return y
def func_poly2d(ord,p,x,y):
""" 2d polynomial.
ord=0 : z=p[0]
ord=1 : z=p[0]+p[1]*x+p[2]*y
ord=2 : z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y
ord=3 : z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y+
p[6]*x*x*x+p[7]*x*x*y+p[8]*x*y*y+p[9]*y*y*y"""
if ord == 0:
z=p[0]
if ord == 1:
z=p[0]+p[1]*x+p[2]*y
if ord == 2:
z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y
if ord == 3:
z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y+\
p[6]*x*x*x+p[7]*x*x*y+p[8]*x*y*y+p[9]*y*y*y
if ord > 3:
print(" We do not trust polynomial fits > 3 ")
z = None
return z
def func_poly2d_ini(ord, av):
""" Initial guess -- assume flat plane. """
if ord == 0:
p0 = N.asarray([av])
if ord == 1:
p0 = N.asarray([av] + [0.0]*2)
if ord == 2:
p0 = N.asarray([av] + [0.0]*5)
if ord == 3:
p0 = N.asarray([av] + [0.0]*9)
if ord > 3:
p0 = None
return p0
def ilist(x):
""" integer part of a list of floats. """
fn = lambda x : [int(round(i)) for i in x]
return fn(x)
def cart2polar(cart, cen):
""" convert cartesian coordinates to polar coordinates around cen. theta is
zero for +ve xaxis and goes counter clockwise. cart is a numpy array [x,y] where
x and y are numpy arrays of all the (>0) values of coordinates."""
import math
polar = N.zeros(cart.shape)
pi = math.pi
rad = 180.0/pi
cc = N.transpose(cart)
cc = (cc-cen)*(cc-cen)
polar[0] = N.sqrt(N.sum(cc,1))
th = N.arctan2(cart[1]-cen[1],cart[0]-cen[0])*rad
polar[1] = N.where(th > 0, th, 360+th)
return polar
def polar2cart(polar, cen):
""" convert polar coordinates around cen to cartesian coordinates. theta is
zero for +ve xaxis and goes counter clockwise. polar is a numpy array of [r], [heta]
and cart is a numpy array [x,y] where x and y are numpy arrays of all the (>0)
values of coordinates."""
import math
cart = N.zeros(polar.shape)
pi = math.pi
rad = 180.0/pi
cart[0]=polar[0]*N.cos(polar[1]/rad)+cen[0]
cart[1]=polar[0]*N.sin(polar[1]/rad)+cen[1]
return cart
def gaus_pixval(g, pix):
""" Calculates the value at a pixel pix due to a gaussian object g. """
from .const import fwsig, pi
from math import sin, cos, exp
cen = g.centre_pix
peak = g.peak_flux
bmaj_p, bmin_p, bpa_p = g.size_pix
a4 = bmaj_p/fwsig; a5 = bmin_p/fwsig
a6 = (bpa_p+90.0)*pi/180.0
spa = sin(a6); cpa = cos(a6)
dr1 = ((pix[0]-cen[0])*cpa + (pix[1]-cen[1])*spa)/a4
dr2 = ((pix[1]-cen[1])*cpa - (pix[0]-cen[0])*spa)/a5
pixval = peak*exp(-0.5*(dr1*dr1+dr2*dr2))
return pixval
def atanproper(dumr, dx, dy):
from math import pi
ysign = (dy >= 0.0)
xsign = (dx >= 0.0)
if ysign and (not xsign): dumr = pi - dumr
if (not ysign) and (not xsign): dumr = pi + dumr
if (not ysign) and xsign: dumr = 2.0*pi - dumr
return dumr
def gdist_pa(pix1, pix2, gsize):
""" Computes FWHM in degrees in the direction towards second source, of an elliptical gaussian. """
from math import atan, pi, sqrt, cos, sin, tan
dx = pix2[0] - pix1[0]
dy = pix2[1] - pix1[1]
if dx == 0.0:
val = pi/2.0
else:
dumr = atan(abs(dy/dx))
val = atanproper(dumr, dx, dy)
psi = val - (gsize[2]+90.0)/180.0*pi
# convert angle to eccentric anomaly
if approx_equal(gsize[1], 0.0):
psi = pi/2.0
else:
psi=atan(gsize[0]/gsize[1]*tan(psi))
dumr2 = gsize[0]*cos(psi)
dumr3 = gsize[1]*sin(psi)
fwhm = sqrt(dumr2*dumr2+dumr3*dumr3)
return fwhm
def gaus_2d(c, x, y):
""" x and y are 2d arrays with the x and y positions. """
import math
import numpy as N
rad = 180.0/math.pi
cs = math.cos(c[5]/rad)
sn = math.sin(c[5]/rad)
f1 = ((x-c[1])*cs+(y-c[2])*sn)/c[3]
f2 = ((y-c[2])*cs-(x-c[1])*sn)/c[4]
val = c[0]*N.exp(-0.5*(f1*f1+f2*f2))
return val
def gaus_2d_itscomplicated(c, x, y, p_tofix, ind):
""" x and y are 2d arrays with the x and y positions. c is a list (of lists) of gaussian parameters to fit, p_tofix
are gaussian parameters to fix. ind is a list with 0, 1; 1 = fit; 0 = fix. """
import math
import numpy as N
val = N.zeros(x.shape)
indx = N.array(ind)
if len(indx) % 6 != 0:
print(" Something wrong with the parameters passed - need multiples of 6 !")
else:
ngaus = int(len(indx)/6)
params = N.zeros(6*ngaus)
params[N.where(indx==1)[0]] = c
params[N.where(indx==0)[0]] = p_tofix
for i in range(ngaus):
gau = params[i*6:i*6+6]
val = val + gaus_2d(gau, x, y)
return val
def g2param(g, adj=False):
"""Convert gaussian object g to param list [amp, cenx, ceny, sigx, sigy, theta] """
from .const import fwsig
from math import pi
A = g.peak_flux
if adj and hasattr(g, 'size_pix_adj'):
sigx, sigy, th = g.size_pix_adj
else:
sigx, sigy, th = g.size_pix
cenx, ceny = g.centre_pix
sigx = sigx/fwsig; sigy = sigy/fwsig; th = th+90.0
params = [A, cenx, ceny, sigx, sigy, th]
return params
def g2param_err(g, adj=False):
"""Convert errors on gaussian object g to param list [Eamp, Ecenx, Eceny, Esigx, Esigy, Etheta] """
from .const import fwsig
from math import pi
A = g.peak_fluxE
if adj and hasattr(g, 'size_pix_adj'):
sigx, sigy, th = g.size_pix_adj
else:
sigx, sigy, th = g.size_pixE
cenx, ceny = g.centre_pixE
sigx = sigx/fwsig; sigy = sigy/fwsig
params = [A, cenx, ceny, sigx, sigy, th]
return params
def corrected_size(size):
""" convert major and minor axis from sigma to fwhm and angle from horizontal to P.A. """
from .const import fwsig
csize = [0,0,0]
csize[0] = size[0]*fwsig
csize[1] = size[1]*fwsig
bpa = size[2]
pa = bpa-90.0
pa = pa % 360
if pa < 0.0: pa = pa + 360.0
if pa > 180.0: pa = pa - 180.0
csize[2] = pa
return csize
def drawellipse(g):
import numpy as N
from .gausfit import Gaussian
rad = 180.0/N.pi
if isinstance(g, Gaussian):
param = g2param(g)
else:
if isinstance(g, list) and len(g)>=6:
param = g
else:
raise RuntimeError("Input to drawellipse neither Gaussian nor list")
size = [param[3], param[4], param[5]]
size_fwhm = corrected_size(size)
th=N.arange(0, 370, 10)
x1=size_fwhm[0]*N.cos(th/rad)
y1=size_fwhm[1]*N.sin(th/rad)
x2=x1*N.cos(param[5]/rad)-y1*N.sin(param[5]/rad)+param[1]
y2=x1*N.sin(param[5]/rad)+y1*N.cos(param[5]/rad)+param[2]
return x2, y2
def drawsrc(src):
import math
import numpy as N
import matplotlib.path as mpath
Path = mpath.Path
paths = []
xmin = []
xmax = []
ymin = []
ymax = []
ellx = []
elly = []
for indx, g in enumerate(src.gaussians):
gellx, gelly = drawellipse(g)
ellx += gellx.tolist()
elly += gelly.tolist()
yarr = N.array(elly)
minyarr = N.min(yarr)
maxyarr = N.max(yarr)
xarr = N.array(ellx)
for i in range(10):
inblock = N.where(yarr > minyarr + float(i)*(maxyarr-minyarr)/10.0)
yarr = yarr[inblock]
xarr = xarr[inblock]
inblock = N.where(yarr < minyarr + float(i+1)*(maxyarr-minyarr)/10.0)
xmin.append(N.min(xarr[inblock])-1.0)
xmax.append(N.max(xarr[inblock])+1.0)
ymin.append(N.mean(yarr[inblock]))
ymax.append(N.mean(yarr[inblock]))
xmax.reverse()
ymax.reverse()
pathdata = [(Path.MOVETO, (xmin[0], ymin[0]))]
for i in range(10):
pathdata.append((Path.LINETO, (xmin[i], ymin[i])))
pathdata.append((Path.CURVE3, (xmin[i], ymin[i])))
pathdata.append((Path.LINETO, ((xmin[9]+xmax[0])/2.0, (ymin[9]+ymax[0])/2.0+1.0)))
for i in range(10):
pathdata.append((Path.LINETO, (xmax[i], ymax[i])))
pathdata.append((Path.CURVE3, (xmax[i], ymax[i])))
pathdata.append((Path.LINETO, ((xmin[0]+xmax[9])/2.0, (ymin[0]+ymax[9])/2.0-1.0)))
pathdata.append((Path.CLOSEPOLY, (xmin[0], ymin[0])))
codes, verts = zip(*pathdata)
path = Path(verts, codes)
return path
def mask_fwhm(g, fac1, fac2, delc, shap):
""" take gaussian object g and make a mask (as True) for pixels which are outside (less flux)
fac1*FWHM and inside (more flux) fac2*FWHM. Also returns the values as well."""
import math
import numpy as N
from .const import fwsig
x, y = N.indices(shap)
params = g2param(g)
params[1] -= delc[0]; params[2] -= delc[1]
gau = gaus_2d(params, x, y)
dumr1 = 0.5*fac1*fwsig
dumr2 = 0.5*fac2*fwsig
flux1= params[0]*math.exp(-0.5*dumr1*dumr1)
flux2 = params[0]*math.exp(-0.5*dumr2*dumr2)
mask = (gau <= flux1) * (gau > flux2)
gau = gau * mask
return mask, gau
def flatten(x):
"""flatten(sequence) -> list
Taken from http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def moment(x,mask=None):
"""
Calculates first 3 moments of numpy array x. Only those values of x
for which mask is False are used, if mask is given. Works for any
dimension of x.
"""
import numpy as N
if mask is None:
mask=N.zeros(x.shape, dtype=bool)
m1=N.zeros(1)
m2=N.zeros(x.ndim)
m3=N.zeros(x.ndim)
for i, val in N.ndenumerate(x):
if not mask[i]:
m1 += val
m2 += val*N.array(i)
m3 += val*N.array(i)*N.array(i)
m2 /= m1
if N.all(m3/m1 > m2*m2):
m3 = N.sqrt(m3/m1-m2*m2)
return m1, m2, m3
def fit_mask_1d(x, y, sig, mask, funct, do_err, order=0, p0 = None):
"""
Calls scipy.optimise.leastsq for a 1d function with a mask.
Takes values only where mask=False.
"""
from scipy.optimize import leastsq
from math import sqrt, pow
import numpy as N
import sys
ind=N.where(~N.array(mask))[0]
if len(ind) > 1:
n=sum(mask)
if isinstance(x, list): x = N.array(x)
if isinstance(y, list): y = N.array(y)
if isinstance(sig, list): sig = N.array(sig)
xfit=x[ind]; yfit=y[ind]; sigfit=sig[ind]
if p0 is None:
if funct == poly:
p0=N.array([0]*(order+1))
p0[1]=(yfit[0]-yfit[-1])/(xfit[0]-xfit[-1])
p0[0]=yfit[0]-p0[1]*xfit[0]
if funct == wenss_fit:
p0=N.array([yfit[N.argmax(xfit)]] + [1.])
if funct == sp_in:
ind1 = N.where(yfit > 0.)[0]
if len(ind1) >= 2:
low = ind1[0]; hi = ind1[-1]
sp = N.log(yfit[low]/yfit[hi])/N.log(xfit[low]/xfit[hi])
p0=N.array([yfit[low]/pow(xfit[low], sp), sp] + [0.]*(order-1))
elif len(ind1) == 1:
p0=N.array([ind1[0], -0.8] + [0.]*(order-1))
else:
return [0, 0], [0, 0]
res=lambda p, xfit, yfit, sigfit: (yfit-funct(p, xfit))/sigfit
try:
(p, cov, info, mesg, flag)=leastsq(res, p0, args=(xfit, yfit, sigfit), full_output=True, warning=False)
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of (unnecessary) warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
(p, cov, info, mesg, flag)=leastsq(res, p0, args=(xfit, yfit, sigfit), full_output=True)
sys.stdout = original_stdout # turn STDOUT back on
if do_err:
if cov is not None:
if N.sum(sig != 1.) > 0:
err = N.array([sqrt(abs(cov[i,i])) for i in range(len(p))])
else:
chisq=sum(info["fvec"]*info["fvec"])
dof=len(info["fvec"])-len(p)
err = N.array([sqrt(abs(cov[i,i])*chisq/dof) for i in range(len(p))])
else:
p, err = [0, 0], [0, 0]
else: err = [0]
else:
p, err = [0, 0], [0, 0]
return p, err
def dist_2pt(p1, p2):
""" Calculated distance between two points given as tuples p1 and p2. """
from math import sqrt
dx=p1[0]-p2[0]
dy=p1[1]-p2[1]
dist=sqrt(dx*dx + dy*dy)
return dist
def angsep(ra1, dec1, ra2, dec2):
"""Returns angular separation between two coordinates (all in degrees)"""
import math
const = math.pi/180.
ra1 = ra1*const
rb1 = dec1*const
ra2 = ra2*const
rb2 = dec2*const
v1_1 = math.cos(ra1)*math.cos(rb1)
v1_2 = math.sin(ra1)*math.cos(rb1)
v1_3 = math.sin(rb1)
v2_1 = math.cos(ra2)*math.cos(rb2)
v2_2 = math.sin(ra2)*math.cos(rb2)
v2_3 = math.sin(rb2)
w = ( (v1_1-v2_1)**2 + (v1_2-v2_2)**2 + (v1_3-v2_3)**2 )/4.0
x = math.sqrt(w)
y = math.sqrt(max(0.0, 1.0-w))
angle = 2.0*math.atan2(x, y)/const
return angle
def std(y):
""" Returns unbiased standard deviation. """
from math import sqrt
import numpy as N
l=len(y)
s=N.std(y)
if l == 1:
return s
else:
return s*sqrt(float(l)/(l-1))
def imageshift(image, shift):
""" Shifts a 2d-image by the tuple (shift). Positive shift is to the right and upwards.
This is done by fourier shifting. """
import scipy.fft
from scipy import ndimage
shape=image.shape
f1=scipy.fft.fft(image, shape[0], axis=0)
f2=scipy.fft.fft(f1, shape[1], axis=1)
s=ndimage.fourier_shift(f2,shift, axis=0)
y1=scipy.fft.ifft(s, shape[1], axis=1)
y2=scipy.fft.ifft(y1, shape[0], axis=0)
return y2.real
def trans_gaul(q):
" transposes a tuple "
y=[]
if len(q) > 0:
for i in range(len(q[0])):
elem=[]
for j in range(len(q)):
elem.append(q[j][i])
y.append(elem)
return y
def momanalmask_gaus(subim, mask, isrc, bmar_p, allpara=True):
""" Compute 2d gaussian parameters from moment analysis, for an island with
multiple gaussians. Compute only for gaussian with index (mask value) isrc.
Returns normalised peak, centroid, fwhm and P.A. assuming North is top.
"""
from math import sqrt, atan, pi
from .const import fwsig
import numpy as N
N.seterr(all='ignore')
m1 = N.zeros(2); m2 = N.zeros(2); m11 = 0.0; tot = 0.0
mompara = N.zeros(6)
n, m = subim.shape[0], subim.shape[1]
index = [(i, j) for i in range(n) for j in range(m) if mask[i,j]==isrc]
for coord in index:
tot += subim[coord]
m1 += N.array(coord)*subim[coord]
mompara[0] = tot/bmar_p
mompara[1:3] = m1/tot
if allpara:
for coord in index:
co = N.array(coord)
m2 += (co - mompara[1:3])*(co - mompara[1:3])*subim[coord]
m11 += N.product(co - mompara[1:3])*subim[coord]
mompara[3] = sqrt((m2[0]+m2[1]+sqrt((m2[0]-m2[1])*(m2[0]-m2[1])+4.0*m11*m11))/(2.0*tot))*fwsig
mompara[4] = sqrt((m2[0]+m2[1]-sqrt((m2[0]-m2[1])*(m2[0]-m2[1])+4.0*m11*m11))/(2.0*tot))*fwsig
dumr = atan(abs(2.0*m11/(m2[0]-m2[1])))
dumr = atanproper(dumr, m2[0]-m2[1], 2.0*m11)
mompara[5] = 0.5*dumr*180.0/pi - 90.0
if mompara[5] < 0.0: mompara[5] += 180.0
return mompara
def fit_gaus2d(data, p_ini, x, y, mask = None, err = None):
""" Fit 2d gaussian to data with x and y also being 2d numpy arrays with x and y positions.
Takes an optional error array and a mask array (True => pixel is masked). """
from scipy.optimize import leastsq
import numpy as N
import sys
if mask is not None and mask.shape != data.shape:
print('Data and mask array dont have the same shape, ignoring mask')
mask = None
if err is not None and err.shape != data.shape:
print('Data and error array dont have the same shape, ignoring error')
err = None
if mask is None: mask = N.zeros(data.shape, bool)
g_ind = N.where(~N.ravel(mask))[0]
if err is None:
errorfunction = lambda p: N.ravel(gaus_2d(p, x, y) - data)[g_ind]
else:
errorfunction = lambda p: N.ravel((gaus_2d(p, x, y) - data)/err)[g_ind]
try:
p, success = leastsq(errorfunction, p_ini, warning=False)
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
p, success = leastsq(errorfunction, p_ini)
sys.stdout = original_stdout # turn STDOUT back on
return p, success
def deconv(gaus_bm, gaus_c):
""" Deconvolves gaus_bm from gaus_c to give gaus_dc.
Stolen shamelessly from aips DECONV.FOR.
All PA is in degrees."""
from math import pi, cos, sin, atan, sqrt
rad = 180.0/pi
gaus_d = [0.0, 0.0, 0.0]
phi_c = gaus_c[2]+900.0 % 180
phi_bm = gaus_bm[2]+900.0 % 180
maj2_bm = gaus_bm[0]*gaus_bm[0]; min2_bm = gaus_bm[1]*gaus_bm[1]
maj2_c = gaus_c[0]*gaus_c[0]; min2_c = gaus_c[1]*gaus_c[1]
theta=2.0*(phi_c-phi_bm)/rad
cost = cos(theta)
sint = sin(theta)
rhoc = (maj2_c-min2_c)*cost-(maj2_bm-min2_bm)
if rhoc == 0.0:
sigic = 0.0
rhoa = 0.0
else:
sigic = atan((maj2_c-min2_c)*sint/rhoc) # in radians
rhoa = ((maj2_bm-min2_bm)-(maj2_c-min2_c)*cost)/(2.0*cos(sigic))
gaus_d[2] = sigic*rad/2.0+phi_bm
dumr = ((maj2_c+min2_c)-(maj2_bm+min2_bm))/2.0
gaus_d[0] = dumr-rhoa
gaus_d[1] = dumr+rhoa
error = 0
if gaus_d[0] < 0.0: error += 1
if gaus_d[1] < 0.0: error += 1
gaus_d[0] = max(0.0,gaus_d[0])
gaus_d[1] = max(0.0,gaus_d[1])
gaus_d[0] = sqrt(abs(gaus_d[0]))
gaus_d[1] = sqrt(abs(gaus_d[1]))
if gaus_d[0] < gaus_d[1]:
sint = gaus_d[0]
gaus_d[0] = gaus_d[1]
gaus_d[1] = sint
gaus_d[2] = gaus_d[2]+90.0
gaus_d[2] = gaus_d[2]+900.0 % 180
if gaus_d[0] == 0.0:
gaus_d[2] = 0.0
else:
if gaus_d[1] == 0.0:
if (abs(gaus_d[2]-phi_c) > 45.0) and (abs(gaus_d[2]-phi_c) < 135.0):
gaus_d[2] = gaus_d[2]+450.0 % 180
# errors
#if rhoc == 0.0:
#if gaus_d[0] != 0.0:
# ed_1 = gaus_c[0]/gaus_d[0]*e_1
#else:
# ed_1 = sqrt(2.0*e_1*gaus_c[0])
#if gaus_d[1] != 0.0:
# ed_2 = gaus_c[1]/gaus_d[1]*e_2
#else:
# ed_2 = sqrt(2.0*e_2*gaus_c[1])
#ed_3 =e_3
#else:
# pass
return gaus_d
def deconv2(gaus_bm, gaus_c):
""" Deconvolves gaus_bm from gaus_c to give gaus_dc.
Stolen shamelessly from Miriad gaupar.for.
All PA is in degrees.
Returns deconvolved gaussian parameters and flag:
0 All OK.
1 Result is pretty close to a point source.
2 Illegal result.
"""
from math import pi, cos, sin, atan2, sqrt
rad = 180.0/pi
phi_c = gaus_c[2]+900.0 % 180.0
phi_bm = gaus_bm[2]+900.0 % 180.0
theta1 = phi_c / rad
theta2 = phi_bm / rad
bmaj1 = gaus_c[0]
bmaj2 = gaus_bm[0]
bmin1 = gaus_c[1]
bmin2 = gaus_bm[1]
alpha = ( (bmaj1*cos(theta1))**2 + (bmin1*sin(theta1))**2 -
(bmaj2*cos(theta2))**2 - (bmin2*sin(theta2))**2 )
beta = ( (bmaj1*sin(theta1))**2 + (bmin1*cos(theta1))**2 -
(bmaj2*sin(theta2))**2 - (bmin2*cos(theta2))**2 )
gamma = 2.0 * ( (bmin1**2-bmaj1**2)*sin(theta1)*cos(theta1) -
(bmin2**2-bmaj2**2)*sin(theta2)*cos(theta2) )
s = alpha + beta
t = sqrt((alpha-beta)**2 + gamma**2)
limit = min(bmaj1, bmin1, bmaj2, bmin2)
limit = 0.1*limit*limit
if alpha < 0.0 or beta < 0.0 or s < t:
if alpha < 0.0 or beta < 0.0:
bmaj = 0.0
bpa = 0.0
else:
bmaj = sqrt(0.5*(s+t))
bpa = rad * 0.5 * atan2(-gamma, alpha-beta)
bmin = 0.0
if 0.5*(s-t) < limit and alpha > -limit and beta > -limit:
ifail = 1
else:
ifail = 2
else:
bmaj = sqrt(0.5*(s+t))
bmin = sqrt(0.5*(s-t))
if abs(gamma) + abs(alpha-beta) == 0.0:
bpa = 0.0
else:
bpa = rad * 0.5 * atan2(-gamma, alpha-beta)
ifail = 0
return (bmaj, bmin, bpa), ifail
def get_errors(img, p, stdav, bm_pix=None, fixed_to_beam=False):
""" Returns errors on the fitted Gaussian parameters, using the
equations from Condon 1997 (PASP, 109, 166) and Condon et al.
1998 (ApJ, 115, 1693)
Parameters:
img: Image object (needed for pixel beam info)
p: list of Gaussian parameters: [peak, x0, y0, maj, min, pa, tot]
stdav: estimate of the image noise at the Gaussian's position
bm_pix: optional pixel beam to be used instead of that in img
fixed_to_beam: True if the fits were done with the
size fixed to that of the beam, False otherwise
Returned list includes errors on:
peak flux [Jy/beam]
x_0 [pix]
y_0 [pix]
e_maj [pix]
e_min [pix]
e_pa [deg]
e_tot [Jy]
"""
from .const import fwsig
from math import sqrt, log, pow, pi
from . import mylogger
import numpy as N
mylog = mylogger.logging.getLogger("PyBDSM.Compute")
if len(p) % 7 > 0:
mylog.error("Gaussian parameters passed have to have 7n numbers")
ngaus = int(len(p)/7)
errors = []
for i in range(ngaus):
pp = p[i*7:i*7+7]
### Now do error analysis as in Condon (and fBDSM)
size = pp[3:6]
size = corrected_size(size) # angle is now degrees CCW from +y-axis
if size[0] == 0.0 or size[1] == 0.0:
errors = errors + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
else:
sq2 = sqrt(2.0)
if bm_pix is None:
bm_pix = N.array([img.pixel_beam()[0]*fwsig, img.pixel_beam()[1]*fwsig, img.pixel_beam()[2]])
dumr = sqrt(abs(size[0] * size[1] / (4.0 * bm_pix[0] * bm_pix[1]))) # first term of Eq. 26 of Condon+ (1998)
dumrr1 = 1.0 + bm_pix[0] * bm_pix[1] / (size[0] * size[0]) # second term of Eq. 26 of Condon+ (1998)
dumrr2 = 1.0 + bm_pix[0] * bm_pix[1] / (size[1] * size[1]) # third term of Eq. 26 of Condon+ (1998)
dumrr3 = dumr * pp[0] / stdav # product of first and fourth terms of Eq. 26 of Condon+ (1998)
d1 = sqrt(8.0 * log(2.0))
d2 = (size[0] * size[0] - size[1] * size[1]) / (size[0] * size[1]) # last term of Eq. 30 of Condon+ (1998)
try:
# The following three errors are calculated using Eq. 21 of Condon (1997),
# using Eq. 26 of Condon+ (1998) for rho
e_peak = pp[0] * sq2 / (dumrr3 * pow(dumrr1, 0.75) * pow(dumrr2, 0.75))
e_maj = size[0] * sq2 / (dumrr3 * pow(dumrr1, 1.25) * pow(dumrr2, 0.25))
e_min = size[1] * sq2 / (dumrr3 * pow(dumrr1, 0.25) * pow(dumrr2, 1.25))
# The following two errors are calculated using Eq. 27 of Condon+ (1998)
pa_rad = size[2] * pi / 180.0
e_x0 = sqrt( (e_maj * N.sin(pa_rad))**2 + (e_min * N.cos(pa_rad))**2 ) / d1
e_y0 = sqrt( (e_maj * N.cos(pa_rad))**2 + (e_min * N.sin(pa_rad))**2 ) / d1
# The following error is calculated using Eq. 30 of Condon+ (1998)
e_pa = 2.0 / (d2 * dumrr3 * pow(dumrr1, 0.25) * pow(dumrr2, 1.25))
e_pa = e_pa * 180.0/pi
# The following error is calculated using Eq. 36 of Condon+ (1998)
e_tot = pp[6] * sqrt(e_peak * e_peak / (pp[0] * pp[0]) + (0.25 / dumr / dumr) *
(e_maj * e_maj / (size[0] * size[0]) + e_min * e_min / (size[1] * size[1])))
except:
e_peak = 0.0
e_x0 = 0.0
e_y0 = 0.0
e_maj = 0.0
e_min = 0.0
e_pa = 0.0
e_tot = 0.0
if abs(e_pa) > 180.0:
e_pa = 180.0
if fixed_to_beam:
# When the size was fixed to that of the beam during the fit, set
# uncertainties on the size to zero and reduce the error in the fluxes
# by sqrt(2) (see Eq. 25 of Condon 1997)
e_maj = 0.0
e_min = 0.0
e_pa = 0.0
e_peak /= sq2
e_tot /= sq2
errors = errors + [e_peak, e_x0, e_y0, e_maj, e_min, e_pa, e_tot]
return errors
def fit_chisq(x, p, ep, mask, funct, order):
import numpy as N
ind = N.where(N.array(mask)==False)[0]
if order == 0:
fit = [funct(p)]*len(p)
else:
fitpara, efit = fit_mask_1d(x, p, ep, mask, funct, True, order)
fit = funct(fitpara, x)
dev = (p-fit)*(p-fit)/(ep*ep)
num = order+1
csq = N.sum(dev[ind])/(len(fit)-num-1)
return csq
def calc_chisq(x, y, ey, p, mask, funct, order):
import numpy as N
if order == 0:
fit = [funct(y)]*len(y)
else:
fit = funct(p, x)
dev = (y-fit)*(y-fit)/(ey*ey)
ind = N.where(~N.array(mask))
num = order+1
csq = N.sum(dev[ind])/(len(mask)-num-1)
return csq
def get_windowsize_av(S_i, rms_i, chanmask, K, minchan):
import numpy as N
av_window = N.arange(2, int(len(S_i)/minchan)+1)
win_size = 0
for window in av_window:
fluxes, vars, mask = variance_of_wted_windowedmean(S_i, rms_i, chanmask, window)
minsnr = N.min(fluxes[~mask]/vars[~mask])
if minsnr > K*1.1: ### K*1.1 since fitted peak can be less than wted peak
win_size = window # is the size of averaging window
break
return win_size
def variance_of_wted_windowedmean(S_i, rms_i, chanmask, window_size):
from math import sqrt
import numpy as N
nchan = len(S_i)
nwin = nchan/window_size
wt = 1/rms_i/rms_i
wt = wt/N.median(wt)
fluxes = N.zeros(nwin); vars = N.zeros(nwin); mask = N.zeros(nwin, bool)
for i in range(nwin):
strt = i*window_size; stp = (i+1)*window_size
if i == nwin-1: stp = nchan
ind = N.arange(strt,stp)
m = chanmask[ind]
index = [arg for ii,arg in enumerate(ind) if not m[ii]]
if len(index) > 0:
s = S_i[index]; r = rms_i[index]; w = wt[index]
fluxes[i] = N.sum(s*w)/N.sum(w)
vars[i] = 1.0/sqrt(N.sum(1.0/r/r))
mask[i] = N.product(m)
else:
fluxes[i] = 0
vars[i] = 0
mask[i] = True
return fluxes, vars, mask
def fit_mulgaus2d(image, gaus, x, y, mask = None, fitfix = None, err = None, adj=False):
""" fitcode : 0=fit all; 1=fit amp; 2=fit amp, posn; 3=fit amp, size """
from scipy.optimize import leastsq
import numpy as N
import sys
if mask is not None and mask.shape != image.shape:
print('Data and mask array dont have the same shape, ignoring mask')
mask = None
if err is not None and err.shape != image.shape:
print('Data and error array dont have the same shape, ignoring error')
err = None
if mask is None: mask = N.zeros(image.shape, bool)
g_ind = N.where(~N.ravel(mask))[0]
ngaus = len(gaus)
if ngaus > 0:
p_ini = []
for g in gaus:
p_ini = p_ini + g2param(g, adj)
p_ini = N.array(p_ini)
if fitfix is None: fitfix = [0]*ngaus
ind = N.ones(6*ngaus) # 1 => fit ; 0 => fix
for i in range(ngaus):
if fitfix[i] == 1: ind[i*6+1:i*6+6] = 0
if fitfix[i] == 2: ind[i*6+3:i*6+6] = 0
if fitfix[i] == 3: ind[i*6+1:i*6+3] = 0
ind = N.array(ind)
p_tofit = p_ini[N.where(ind==1)[0]]
p_tofix = p_ini[N.where(ind==0)[0]]
if err is None: err = N.ones(image.shape)
errorfunction = lambda p, x, y, p_tofix, ind, image, err, g_ind: \
N.ravel((gaus_2d_itscomplicated(p, x, y, p_tofix, ind)-image)/err)[g_ind]
try:
p, success = leastsq(errorfunction, p_tofit, args=(x, y, p_tofix, ind, image, err, g_ind))
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
p, success = leastsq(errorfunction, p_tofit, args=(x, y, p_tofix, ind, image, err, g_ind))
sys.stdout = original_stdout # turn STDOUT back on
else:
p, sucess = None, 1
para = N.zeros(6*ngaus)
para[N.where(ind==1)[0]] = p
para[N.where(ind==0)[0]] = p_tofix
for igaus in range(ngaus):
para[igaus*6+3] = abs(para[igaus*6+3])
para[igaus*6+4] = abs(para[igaus*6+4])
return para, success
def gaussian_fcn(g, x1, x2):
"""Evaluate Gaussian on the given grid.
Parameters:
x1, x2: grid (as produced by numpy.mgrid f.e.)
g: Gaussian object or list of Gaussian paramters
"""
from math import radians, sin, cos
from .const import fwsig
import numpy as N
if isinstance(g, list):
A, C1, C2, S1, S2, Th = g
else:
A = g.peak_flux
C1, C2 = g.centre_pix
S1, S2, Th = g.size_pix
S1 = S1/fwsig; S2 = S2/fwsig; Th = Th + 90.0 # Define theta = 0 on x-axis
th = radians(Th)
cs = cos(th)
sn = sin(th)
f1 = ((x1-C1)*cs + (x2-C2)*sn)/S1
f2 = (-(x1-C1)*sn + (x2-C2)*cs)/S2
return A*N.exp(-(f1*f1 + f2*f2)/2)
def mclean(im1, c, beam):
""" Simple image plane clean of one gaussian at posn c and size=beam """
import numpy as N
amp = im1[c]
b1, b2, b3 = beam
b3 += 90.0
para = [amp, c[0], c[1], b1, b2, b3]
x, y = N.indices(im1.shape)
im = gaus_2d(para, x, y)
im1 = im1-im
return im1
def arrstatmask(im, mask):
""" Basic statistics for a masked array. dont wanna use numpy.ma """
import numpy as N
ind = N.where(~mask)
im1 = im[ind]
av = N.mean(im1)
std = N.std(im1)
maxv = N.max(im1)
x, y = N.where(im == maxv)
xmax = x[0]; ymax = y[0]
minv = N.min(im1)
x, y = N.where(im == minv)
xmin = x[0]; ymin = y[0]
return (av, std, maxv, (xmax, ymax), minv, (xmin, ymin))
def get_maxima(im, mask, thr, shape, beam, im_pos=None):
""" Gets the peaks in an image """
from copy import deepcopy as cp
import numpy as N
if im_pos is None:
im_pos = im
im1 = cp(im)
ind = N.array(N.where(~mask)).transpose()
ind = [tuple(coord) for coord in ind if im_pos[tuple(coord)] > thr]
n, m = shape
iniposn = []
inipeak = []
for c in ind:
goodlist = [im_pos[i,j] for i in range(c[0]-1,c[0]+2) for j in range(c[1]-1,c[1]+2) \
if i>=0 and i<n and j>=0 and j<m and (i,j) != c]
peak = N.sum(im_pos[c] > goodlist) == len(goodlist)
if peak:
iniposn.append(c)
inipeak.append(im[c])
im1 = mclean(im1, c, beam)
return inipeak, iniposn, im1
def watershed(image, mask=None, markers=None, beam=None, thr=None):
import numpy as N
from copy import deepcopy as cp
import scipy.ndimage as nd
#import matplotlib.pyplot as pl
#import pylab as pl
if thr is None: thr = -1e9
if mask is None: mask = N.zeros(image.shape, bool)
if beam is None: beam = (2.0, 2.0, 0.0)
if markers is None:
inipeak, iniposn, im1 = get_maxima(image, mask, thr, image.shape, beam)
ng = len(iniposn); markers = N.zeros(image.shape, int)
for i in range(ng): markers[iniposn[i]] = i+2
markers[N.unravel_index(N.argmin(image), image.shape)] = 1
im1 = cp(image)
if im1.min() < 0.: im1 = im1-im1.min()
im1 = 255 - im1/im1.max()*255
opw = nd.watershed_ift(N.array(im1, N.uint16), markers)
return opw, markers
def get_kwargs(kwargs, key, typ, default):
obj = True
if key in kwargs:
obj = kwargs[key]
if not isinstance(obj, typ):
obj = default
return obj
def read_image_from_file(filename, img, indir, quiet=False):
""" Reads data and header from indir/filename.
We can use either pyfits or python-casacore depending on the value
of img.use_io = 'fits'/'rap'
PyFITS is required, as it is used to standardize the header format. python-casacore
is optional.
"""
from . import mylogger
import os
import numpy as N
from copy import deepcopy as cp
from distutils.version import StrictVersion
import warnings
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Readfile")
if indir is None or indir == './':
prefix = ''
else:
prefix = indir + '/'
image_file = prefix + filename
# Check that file exists
if not os.path.exists(image_file):
img._reason = 'File does not exist'
return None
# If img.use_io is set, then use appropriate io module
if img.use_io != '':
if img.use_io == 'fits':
try:
from astropy.io import fits as pyfits
old_pyfits = False
use_sections = True
except ImportError as err:
import pyfits
if StrictVersion(pyfits.__version__) < StrictVersion('2.2'):
old_pyfits = True
use_sections = False
elif StrictVersion(pyfits.__version__) < StrictVersion('2.4'):
old_pyfits = False
use_sections = False
else:
old_pyfits = False
try:
if not old_pyfits:
fits = pyfits.open(image_file, mode="readonly", ignore_missing_end=True)
else:
fits = pyfits.open(image_file, mode="readonly")
except IOError as err:
img._reason = 'Problem reading file.\nOriginal error: {0}'.format(str(err))
return None
if img.use_io == 'rap':
import casacore.images as pim
try:
inputimage = pim.image(image_file)
except IOError as err:
img._reason = 'Problem reading file.\nOriginal error: {0}'.format(str(err))
return None
else:
# Simple check of whether casacore and pyfits are available
# We need pyfits version 2.2 or greater to use the
# "ignore_missing_end" argument to pyfits.open().
try:
try:
from astropy.io import fits as pyfits
old_pyfits = False
use_sections = True
except ImportError as err:
import pyfits
if StrictVersion(pyfits.__version__) < StrictVersion('2.2'):
old_pyfits = True
use_sections = False
elif StrictVersion(pyfits.__version__) < StrictVersion('2.4'):
old_pyfits = False
use_sections = False
else:
old_pyfits = False
use_sections = True
has_pyfits = True
except ImportError as err:
raise RuntimeError("Astropy or PyFITS is required.")
try:
import casacore.images as pim
has_casacore = True
except ImportError as err:
has_casacore = False
e_casacore = str(err)
# First assume image is a fits file, and use pyfits to open it (if
# available). If that fails, try to use casacore if available.
failed_read = False
reason = 0
try:
if not old_pyfits:
fits = pyfits.open(image_file, mode="readonly", ignore_missing_end=True)
else:
fits = pyfits.open(image_file, mode="readonly")
img.use_io = 'fits'
except IOError as err:
e_pyfits = str(err)
if has_casacore:
try:
inputimage = pim.image(image_file)
img.use_io = 'rap'
except IOError as err:
e_casacore = str(err)
failed_read = True
img._reason = 'File is not a valid FITS, CASA, or HDF5 image.'
else:
failed_read = True
e_casacore = "Casacore unavailable"
img._reason = 'Problem reading file.'
if failed_read:
img._reason += '\nOriginal error: {0}\n {1}'.format(e_pyfits, e_casacore)
return None
# Now that image has been read in successfully, get header (data is loaded
# later to take advantage of sectioning if trim_box is specified).
if not quiet:
mylogger.userinfo(mylog, "Opened '"+image_file+"'")
if img.use_io == 'rap':
tmpdir = os.path.join(img.outdir, img.parentname+'_tmp')
hdr = convert_casacore_header(inputimage, tmpdir)
coords = inputimage.coordinates()
img.coords_dict = coords.dict()
if 'telescope' in img.coords_dict:
img._telescope = img.coords_dict['telescope']
else:
img._telescope = None
if img.use_io == 'fits':
hdr = fits[0].header
img.coords_dict = None
if 'TELESCOP' in hdr:
img._telescope = hdr['TELESCOP']
else:
img._telescope = None
# Make sure data is in proper order. Final order is [pol, chan, x (RA), y (DEC)],
# so we need to rearrange dimensions if they are not in this order. Use the
# ctype FITS keywords to determine order of dimensions. Note that both PyFITS
# and casacore reverse the order of the axes relative to NAXIS, so we must too.
naxis = hdr['NAXIS']
data_shape = []
for i in range(naxis):
data_shape.append(hdr['NAXIS'+str(i+1)])
data_shape.reverse()
data_shape = tuple(data_shape)
mylog.info("Original data shape of " + image_file +': ' +str(data_shape))
ctype_in = []
for i in range(naxis):
key_val_raw = hdr['CTYPE' + str(i+1)]
key_val = key_val_raw.split('-')[0]
ctype_in.append(key_val.strip())
if 'RA' not in ctype_in or 'DEC' not in ctype_in:
if 'GLON' not in ctype_in or 'GLAT' not in ctype_in:
raise RuntimeError("Image data not found")
else:
lat_lon = True
else:
lat_lon = False
# Check for incorrect spectral units. For example, "M/S" is not
# recognized by PyWCS as velocity ("S" is actually Siemens, not
# seconds). Note that we check CUNIT3 and CUNIT4 even if the
# image has only 2 axes, as the header may still have these
# entries.
for i in range(4):
key_val_raw = hdr.get('CUNIT' + str(i+1))
if key_val_raw is not None:
if 'M/S' in key_val_raw or 'm/S' in key_val_raw or 'M/s' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'm/s'
if 'HZ' in key_val_raw or 'hZ' in key_val_raw or 'hz' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'Hz'
if 'DEG' in key_val_raw or 'Deg' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'deg'
# Make sure that the spectral axis has been identified properly
if len(ctype_in) > 2 and 'FREQ' not in ctype_in:
try:
from astropy.wcs import FITSFixedWarning
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
warnings.filterwarnings("ignore",category=FITSFixedWarning)
from astropy.wcs import WCS
t = WCS(hdr)
t.wcs.fix()
except ImportError as err:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
from pywcs import WCS
t = WCS(hdr)
t.wcs.fix()
spec_indx = t.wcs.spec
if spec_indx != -1:
ctype_in[spec_indx] = 'FREQ'
# Now reverse the axes order to match PyFITS/casacore order and define the
# final desired order (cytpe_out) and shape (shape_out).
ctype_in.reverse()
if lat_lon:
ctype_out = ['STOKES', 'FREQ', 'GLON', 'GLAT']
else:
ctype_out = ['STOKES', 'FREQ', 'RA', 'DEC']
indx_out = [-1, -1, -1, -1]
indx_in = range(naxis)
for i in indx_in:
for j in range(4):
if ctype_in[i] == ctype_out[j]:
indx_out[j] = i
shape_out = [1, 1, data_shape[indx_out[2]], data_shape[indx_out[3]]]
if indx_out[0] != -1:
shape_out[0] = data_shape[indx_out[0]]
if indx_out[1] != -1:
shape_out[1] = data_shape[indx_out[1]]
indx_out = [a for a in indx_out if a >= 0] # trim unused axes
# Read in data. If only a subsection of the image is desired (as defined
# by the trim_box option), we can try to use PyFITS to read only that section.
img._original_naxis = data_shape
img._original_shape = (shape_out[2], shape_out[3])
img._xy_hdr_shift = (0, 0)
if img.opts.trim_box is not None:
img.trim_box = [int(b) for b in img.opts.trim_box]
xmin, xmax, ymin, ymax = img.trim_box
if xmin < 0: xmin = 0
if ymin < 0: ymin = 0
if xmax > shape_out[2]: xmax = shape_out[2]
if ymax > shape_out[3]: ymax = shape_out[3]
if xmin >= xmax or ymin >= ymax:
raise RuntimeError("The trim_box option does not specify a valid part of the image.")
shape_out_untrimmed = shape_out[:]
shape_out[2] = xmax-xmin
shape_out[3] = ymax-ymin
if img.use_io == 'fits':
sx = slice(int(xmin),int(xmax))
sy = slice(int(ymin),int(ymax))
sn = slice(None)
s_array = [sx, sy]
for i in range(naxis-2):
s_array.append(sn)
s_array.reverse() # to match ordering of data array returned by PyFITS
if not old_pyfits and use_sections:
if naxis == 2:
data = fits[0].section[s_array[0], s_array[1]]
elif naxis == 3:
data = fits[0].section[s_array[0], s_array[1], s_array[2]]
elif naxis == 4:
data = fits[0].section[s_array[0], s_array[1], s_array[2], s_array[3]]
else:
# If more than 4 axes, just read in the whole image and
# do the trimming after reordering.
data = fits[0].data
else:
data = fits[0].data
fits.close()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
if naxis > 4 or not use_sections:
data = data.reshape(shape_out_untrimmed) # Add axes if needed
data = data[:, :, xmin:xmax, ymin:ymax] # trim to trim_box
else:
data = data.reshape(shape_out) # Add axes if needed
else:
# With casacore, just read in the whole image and then trim
data = inputimage.getdata()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
data = data.reshape(shape_out_untrimmed) # Add axes if needed
data = data[:, :, xmin:xmax, ymin:ymax] # trim to trim_box
# Adjust WCS keywords for trim_box starting x and y.
hdr['crpix1'] -= xmin
hdr['crpix2'] -= ymin
img._xy_hdr_shift = (xmin, ymin)
else:
if img.use_io == 'fits':
data = fits[0].data
fits.close()
else:
data = inputimage.getdata()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
data = data.reshape(shape_out) # Add axes if needed
mylog.info("Final data shape (npol, nchan, x, y): " + str(data.shape))
return data, hdr
def convert_casacore_header(casacore_image, tmpdir):
"""Converts a casacore header to a PyFITS header."""
import tempfile
import os
import atexit
import shutil
try:
from astropy.io import fits as pyfits
except ImportError as err:
import pyfits
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
tfile = tempfile.NamedTemporaryFile(delete=False, dir=tmpdir)
casacore_image.tofits(tfile.name)
hdr = pyfits.getheader(tfile.name)
if os.path.isfile(tfile.name):
os.remove(tfile.name)
# Register deletion of temp directory at exit to be sure it is deleted
atexit.register(shutil.rmtree, tmpdir, ignore_errors=True)
return hdr
def write_image_to_file(use, filename, image, img, outdir=None,
pad_image=False, clobber=True, is_mask=False):
""" Writes image array to outdir/filename"""
import numpy as N
import os
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Writefile")
wcs_obj = img.wcs_obj
if pad_image and img.opts.trim_box is not None:
# Pad image to original size
xsize, ysize = img._original_shape
xmin, ymin = img._xy_hdr_shift
image_pad = N.zeros((xsize, ysize), dtype=N.float32)
image_pad[xmin:xmin+image.shape[0], ymin:ymin+image.shape[1]] = image
image = image_pad
else:
xmin = 0
ymin = 0
if not hasattr(img, '_telescope'):
telescope = None
else:
telescope = img._telescope
if filename == 'SAMP':
import tempfile
if not hasattr(img,'samp_client'):
s, private_key = start_samp_proxy()
img.samp_client = s
img.samp_key = private_key
# Broadcast image to SAMP Hub
temp_im = make_fits_image(N.transpose(image), wcs_obj, img.beam,
img.frequency, img.equinox, telescope, xmin=xmin, ymin=ymin,
is_mask=is_mask)
tfile = tempfile.NamedTemporaryFile(delete=False)
try:
temp_im.writeto(tfile.name, overwrite=clobber)
except TypeError:
# The "overwrite" argument was added in astropy v1.3, so fall back to "clobber"
# if it doesn't work
temp_im.writeto(tfile.name, clobber=clobber)
send_fits_image(img.samp_client, img.samp_key, 'PyBDSM image', tfile.name)
else:
# Write image to FITS file
if outdir is None:
outdir = img.indir
if not os.path.exists(outdir) and outdir != '':
os.makedirs(outdir)
outfilename = os.path.join(outdir, filename)
if os.path.isfile(outfilename):
if clobber:
os.remove(outfilename)
else:
return
if os.path.isdir(outfilename):
if clobber:
os.system("rm -rf "+outfilename)
else:
return
temp_im = make_fits_image(N.transpose(image), wcs_obj, img.beam,
img.frequency, img.equinox, telescope, xmin=xmin, ymin=ymin,
is_mask=is_mask, shape=(img.shape[1], img.shape[0], image.shape[1],
image.shape[0]))
if use == 'rap':
outfile = outfilename + '.fits'
else:
outfile = outfilename
try:
temp_im.writeto(outfile, overwrite=clobber)
except TypeError:
# The "overwrite" argument was added in astropy v1.3, so fall back to "clobber"
# if it doesn't work
temp_im.writeto(outfile, clobber=clobber)
temp_im.close()
if use == 'rap':
# For CASA images, read in FITS image and convert
try:
import casacore.images as pim
import casacore.tables as pt
import os
outimage = pim.image(outfile)
outimage.saveas(outfilename, overwrite=clobber)
# For masks, use the coordinates dictionary from the input
# image, as this is needed in order for the
# image to work as a clean mask in CASA.
if is_mask:
if img.coords_dict is None:
mylog.warning('Mask header information may be incomplete.')
else:
outtable = pt.table(outfilename, readonly=False, ack=False)
outtable.putkeywords({'coords': img.coords_dict})
outtable.done()
except ImportError as err:
import os
os.remove(outfile)
raise RuntimeError("Error importing python-casacore. CASA image could not "
"be writen. Use img_format = 'fits' instead.")
def make_fits_image(imagedata, wcsobj, beam, freq, equinox, telescope, xmin=0, ymin=0,
is_mask=False, shape=None):
"""Makes a simple FITS hdulist appropriate for single-channel images"""
from distutils.version import StrictVersion
try:
from astropy.io import fits as pyfits
use_header_update = False
except ImportError as err:
import pyfits
# Due to changes in the way pyfits handles headers from version 3.1 on,
# we need to check for older versions and change the setting of header
# keywords accordingly.
if StrictVersion(pyfits.__version__) < StrictVersion('3.1'):
use_header_update = True
else:
use_header_update = False
import numpy as np
# If mask, expand to all channels and Stokes for compatibility with casa
if is_mask and shape is not None:
shape_out = shape
else:
shape_out = [1, 1, imagedata.shape[0], imagedata.shape[1]]
hdu = pyfits.PrimaryHDU(np.resize(imagedata, shape_out))
hdulist = pyfits.HDUList([hdu])
header = hdulist[0].header
# Add WCS info
if use_header_update:
header.update('CRVAL1', wcsobj.wcs.crval[0])
header.update('CDELT1', wcsobj.wcs.cdelt[0])
header.update('CRPIX1', wcsobj.wcs.crpix[0] + xmin)
header.update('CUNIT1', str(wcsobj.wcs.cunit[0]).strip().lower()) # needed due to bug in pywcs/astropy
header.update('CTYPE1', wcsobj.wcs.ctype[0])
header.update('CRVAL2', wcsobj.wcs.crval[1])
header.update('CDELT2', wcsobj.wcs.cdelt[1])
header.update('CRPIX2', wcsobj.wcs.crpix[1] + ymin)
header.update('CUNIT2', str(wcsobj.wcs.cunit[1]).strip().lower())
header.update('CTYPE2', wcsobj.wcs.ctype[1])
else:
header['CRVAL1'] = wcsobj.wcs.crval[0]
header['CDELT1'] = wcsobj.wcs.cdelt[0]
header['CRPIX1'] = wcsobj.wcs.crpix[0] + xmin
header['CUNIT1'] = str(wcsobj.wcs.cunit[0]).strip().lower() # needed due to bug in pywcs/astropy
header['CTYPE1'] = wcsobj.wcs.ctype[0]
header['CRVAL2'] = wcsobj.wcs.crval[1]
header['CDELT2'] = wcsobj.wcs.cdelt[1]
header['CRPIX2'] = wcsobj.wcs.crpix[1] + ymin
header['CUNIT2'] = str(wcsobj.wcs.cunit[1]).strip().lower()
header['CTYPE2'] = wcsobj.wcs.ctype[1]
# Add STOKES info
if use_header_update:
header.update('CRVAL3', 1.0)
header.update('CDELT3', 1.0)
header.update('CRPIX3', 1.0)
header.update('CUNIT3', ' ')
header.update('CTYPE3', 'STOKES')
else:
header['CRVAL3'] = 1.0
header['CDELT3'] = 1.0
header['CRPIX3'] = 1.0
header['CUNIT3'] = ''
header['CTYPE3'] = 'STOKES'
# Add frequency info
if use_header_update:
header.update('RESTFRQ', freq)
header.update('CRVAL4', freq)
header.update('CDELT4', 3e8)
header.update('CRPIX4', 1.0)
header.update('CUNIT4', 'HZ')
header.update('CTYPE4', 'FREQ')
header.update('SPECSYS', 'TOPOCENT')
else:
header['RESTFRQ'] = freq
header['CRVAL4'] = freq
header['CDELT4'] = 3e8
header['CRPIX4'] = 1.0
header['CUNIT4'] = 'HZ'
header['CTYPE4'] = 'FREQ'
header['SPECSYS'] = 'TOPOCENT'
# Add beam info
if not is_mask:
if use_header_update:
header.update('BMAJ', beam[0])
header.update('BMIN', beam[1])
header.update('BPA', beam[2])
else:
header['BMAJ'] = beam[0]
header['BMIN'] = beam[1]
header['BPA'] = beam[2]
# Add equinox
if use_header_update:
header.update('EQUINOX', equinox)
else:
header['EQUINOX'] = equinox
# Add telescope
if telescope is not None:
if use_header_update:
header.update('TELESCOP', telescope)
else:
header['TELESCOP'] = telescope
hdulist[0].header = header
return hdulist
def retrieve_map(img, map_name):
"""Returns a map cached on disk."""
import numpy as N
import os
filename = get_name(img, map_name)
if not os.path.isfile(filename):
return None
infile = open(filename, 'rb')
data = N.load(infile)
infile.close()
return data
def store_map(img, map_name, map_data):
"""Caches a map to disk."""
import numpy as N
filename = get_name(img, map_name)
outfile = open(filename, 'wb')
N.save(outfile, map_data)
outfile.close()
def del_map(img, map_name):
"""Deletes a cached map."""
import os
filename = get_name(img, map_name)
if os.path.isfile(filename):
os.remove(filename)
def get_name(img, map_name):
"""Returns name of cache file."""
import os
if img._pi:
pi_text = 'pi'
else:
pi_text = 'I'
suffix = '/w%i_%s/' % (img.j, pi_text)
dir = img.tempdir + suffix
if not os.path.exists(dir):
os.makedirs(dir)
return dir + map_name + '.bin'
def connect(mask):
""" Find if a mask is singly or multiply connected """
import scipy.ndimage as nd
connectivity = nd.generate_binary_structure(2,2)
labels, count = nd.label(mask, connectivity)
if count > 1 :
connected = 'multiple'
else:
connected = 'single'
return connected, count
def area_polygon(points):
""" Given an ANGLE ORDERED array points of [[x], [y]], find the total area by summing each successsive
triangle with the centre """
import numpy as N
x, y = points
n_tri = len(x)-1
cenx, ceny = N.mean(x), N.mean(y)
area = 0.0
for i in range(n_tri):
p1, p2, p3 = N.array([cenx, ceny]), N.array([x[i], y[i]]), N.array([x[i+1], y[i+1]])
t_area= N.linalg.norm(N.cross((p2 - p1), (p3 - p1)))/2.
area += t_area
return area
def convexhull_deficiency(isl):
""" Finds the convex hull for the island and returns the deficiency.
Code taken from http://code.google.com/p/milo-lab/source/browse/trunk/src/toolbox/convexhull.py?spec=svn140&r=140
"""
import random
import time
import numpy as N
import scipy.ndimage as nd
def _angle_to_point(point, centre):
"""calculate angle in 2-D between points and x axis"""
delta = point - centre
if delta[0] == 0.0:
res = N.pi/2.0
else:
res = N.arctan(delta[1] / delta[0])
if delta[0] < 0:
res += N.pi
return res
def area_of_triangle(p1, p2, p3):
"""calculate area of any triangle given co-ordinates of the corners"""
return N.linalg.norm(N.cross((p2 - p1), (p3 - p1)))/2.
def convex_hull(points):
"""Calculate subset of points that make a convex hull around points
Recursively eliminates points that lie inside two neighbouring points until only convex hull is remaining.
points : ndarray (2 x m) array of points for which to find hull
Returns: hull_points : ndarray (2 x n), convex hull surrounding points """
n_pts = points.shape[1]
#assert(n_pts > 5)
centre = points.mean(1)
angles = N.apply_along_axis(_angle_to_point, 0, points, centre)
pts_ord = points[:,angles.argsort()]
pts = [x[0] for x in zip(pts_ord.transpose())]
prev_pts = len(pts) + 1
k = 0
while prev_pts > n_pts:
prev_pts = n_pts
n_pts = len(pts)
i = -2
while i < (n_pts - 2):
Aij = area_of_triangle(centre, pts[i], pts[(i + 1) % n_pts])
Ajk = area_of_triangle(centre, pts[(i + 1) % n_pts], \
pts[(i + 2) % n_pts])
Aik = area_of_triangle(centre, pts[i], pts[(i + 2) % n_pts])
if Aij + Ajk < Aik:
del pts[i+1]
i += 1
n_pts = len(pts)
k += 1
return N.asarray(pts)
mask = ~isl.mask_active
points = N.asarray(N.where(mask ^ nd.binary_erosion(mask)))
hull_pts = list(convex_hull(points)) # these are already in angle-sorted order
hull_pts.append(hull_pts[0])
hull_pts = N.transpose(hull_pts)
isl_area = isl.size_active
hull_area = area_polygon(hull_pts)
ratio1 = hull_area/(isl_area - 0.5*len(hull_pts[0]))
return ratio1
def open_isl(mask, index):
""" Do an opening on a mask, divide left over pixels among opened sub islands. Mask = True => masked pixel """
import scipy.ndimage as nd
import numpy as N
connectivity = nd.generate_binary_structure(2,2)
ft = N.ones((index,index), int)
open = nd.binary_opening(~mask, ft)
open = check_1pixcontacts(open) # check if by removing one pixel from labels, you can split a sub-island
labels, n_subisl = nd.label(open, connectivity) # get label/rank image for open. label = 0 for masked pixels
labels, mask = assign_leftovers(mask, open, n_subisl, labels) # add the leftover pixels to some island
if labels is not None:
isl_pixs = [len(N.where(labels==i)[0]) for i in range(1,n_subisl+1)]
isl_pixs = N.array(isl_pixs)/float(N.sum(isl_pixs))
else:
isl_pixs = None
return n_subisl, labels, isl_pixs
def check_1pixcontacts(open):
import scipy.ndimage as nd
import numpy as N
from copy import deepcopy as cp
connectivity = nd.generate_binary_structure(2,2)
ind = N.transpose(N.where(open[1:-1,1:-1] > 0)) + [1,1] # exclude boundary to make it easier
for pixel in ind:
x, y = pixel
grid = cp(open[x-1:x+2, y-1:y+2]); grid[1,1] = 0
grid = N.where(grid == open[tuple(pixel)], 1, 0)
ll, nn = nd.label(grid, connectivity)
if nn > 1:
open[tuple(pixel)] = 0
return open
def assign_leftovers(mask, open, nisl, labels):
"""
Given isl and the image of the mask after opening (open) and the number of new independent islands n,
connect up the left over pixels to the new islands if they connect to only one island and not more.
Assign the remaining to an island. We need to assign the leftout pixels to either of many sub islands.
Easiest is to assign to the sub island with least size.
"""
import scipy.ndimage as nd
import numpy as N
from copy import deepcopy as cp
n, m = mask.shape
leftout = ~mask ^ open
connectivity = nd.generate_binary_structure(2,2)
mlabels, count = nd.label(leftout, connectivity)
npix = [len(N.where(labels==b)[0]) for b in range(1,nisl+1)]
for i_subisl in range(count):
c_list = [] # is list of all bordering pixels of the sub island
ii = i_subisl+1
coords = N.transpose(N.where(mlabels==ii)) # the coordinates of island i of left-out pixels
for co in coords:
co8 = [[x,y] for x in range(co[0]-1,co[0]+2) for y in range(co[1]-1,co[1]+2) if x >=0 and y >=0 and x <n and y<m]
c_list.extend([tuple(cc) for cc in co8 if mlabels[tuple(cc)] == 0])
c_list = list(set(c_list)) # to avoid duplicates
vals = N.array([labels[c] for c in c_list])
belongs = list(set(vals[N.nonzero(vals)]))
if len(belongs) == 0:
# No suitable islands found => mask pixels
for cc in coords:
mask = (mlabels == ii)
# mask[cc] = True
return None, mask
if len(belongs) == 1:
for cc in coords:
labels[tuple(cc)] = belongs[0]
else: # get the border pixels of the islands
nn = [npix[b-1] for b in belongs]
addto = belongs[N.argmin(nn)]
for cc in coords:
labels[tuple(cc)] = addto
return labels, mask
def _float_approx_equal(x, y, tol=1e-18, rel=1e-7):
if tol is rel is None:
raise TypeError('cannot specify both absolute and relative errors are None')
tests = []
if tol is not None: tests.append(tol)
if rel is not None: tests.append(rel*abs(x))
assert tests
return abs(x - y) <= max(tests)
def approx_equal(x, y, *args, **kwargs):
"""approx_equal(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approx_equal(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approx_equal__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approx_equal__ method.
__approx_equal__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approx_equal falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False
"""
if not (type(x) is type(y) is float):
# Skip checking for __approx_equal__ in the common case of two floats.
methodname = '__approx_equal__'
# Allow the objects to specify what they consider "approximately equal",
# giving precedence to x. If either object has the appropriate method, we
# pass on any optional arguments untouched.
for a,b in ((x, y), (y, x)):
try:
method = getattr(a, methodname)
except AttributeError:
continue
else:
result = method(b, *args, **kwargs)
if result is NotImplemented:
continue
return bool(result)
# If we get here without returning, then neither x nor y knows how to do an
# approximate equal comparison (or are both floats). Fall back to a numeric
# comparison.
return _float_approx_equal(x, y, *args, **kwargs)
def isl_tosplit(isl, opts):
""" Splits an island and sends back parameters """
import numpy as N
size_extra5 = opts.splitisl_size_extra5
frac_bigisl3 = opts.splitisl_frac_bigisl3
connected, count = connect(isl.mask_active)
index = 0
n_subisl3, labels3, isl_pixs3 = open_isl(isl.mask_active, 3)
n_subisl5, labels5, isl_pixs5 = open_isl(isl.mask_active, 5)
isl_pixs3, isl_pixs5 = N.array(isl_pixs3), N.array(isl_pixs5)
# take open 3 or 5
open3, open5 = False, False
if n_subisl3 > 0 and isl_pixs3 is not None: # open 3 breaks up island
max_sub3 = N.max(isl_pixs3)
if max_sub3 < frac_bigisl3 : open3 = True # if biggest sub island isnt too big
if n_subisl5 > 0 and isl_pixs5 is not None: # open 5 breaks up island
max_sub5 = N.max(isl_pixs5) # if biggest subisl isnt too big OR smallest extra islands add upto 10 %
if (max_sub5 < 0.75*max_sub3) or (N.sum(N.sort(isl_pixs5)[:len(isl_pixs5)-n_subisl3]) > size_extra5):
open5 = True
# index=0 => dont split
if open5: index = 5; n_subisl = n_subisl5; labels = labels5
else:
if open3: index = 3; n_subisl = n_subisl3; labels = labels3
else: index = 0
convex_def = convexhull_deficiency(isl)
#print 'CONVEX = ',convex_def
if opts.plot_islands:
try:
import matplotlib.pyplot as pl
pl.figure()
pl.suptitle('Island '+str(isl.island_id))
pl.subplot(2,2,1); pl.imshow(N.transpose(isl.image*~isl.mask_active), origin='lower', interpolation='nearest'); pl.title('Image')
pl.subplot(2,2,2); pl.imshow(N.transpose(labels3), origin='lower', interpolation='nearest'); pl.title('labels3')
pl.subplot(2,2,3); pl.imshow(N.transpose(labels5), origin='lower', interpolation='nearest'); pl.title('labels5')
except ImportError:
print("\033[31;1mWARNING\033[0m: Matplotlib not found. Plotting disabled.")
if index == 0: return [index, n_subisl5, labels5]
else: return [index, n_subisl, labels]
class NullDevice():
"""Null device to suppress stdout, etc."""
def write(self, s):
pass
def ch0_aperture_flux(img, posn_pix, aperture_pix):
"""Measure ch0 flux inside radius aperture_pix pixels centered on posn_pix.
Returns [flux, fluxE]
"""
import numpy as N
if aperture_pix is None:
return [0.0, 0.0]
# Make ch0 and rms subimages
ch0 = img.ch0_arr
shape = ch0.shape
xlo = int(posn_pix[0]) - int(aperture_pix) - 1
if xlo < 0:
xlo = 0
xhi = int(posn_pix[0]) + int(aperture_pix) + 1
if xhi > shape[0]:
xhi = shape[0]
ylo = int(posn_pix[1]) - int(aperture_pix) - 1
if ylo < 0:
ylo = 0
yhi = int(posn_pix[1]) + int(aperture_pix) + 1
if yhi > shape[1]:
yhi = shape[1]
mean = img.mean_arr
rms = img.rms_arr
aper_im = ch0[int(xlo):int(xhi), int(ylo):int(yhi)] - mean[int(xlo):int(xhi), int(ylo):int(yhi)]
aper_rms = rms[int(xlo):int(xhi), int(ylo):int(yhi)]
posn_pix_new = [int(posn_pix[0])-xlo, int(posn_pix[1])-ylo]
pixel_beamarea = img.pixel_beamarea()
aper_flux = aperture_flux(aperture_pix, posn_pix_new, aper_im, aper_rms, pixel_beamarea)
return aper_flux
def aperture_flux(aperture_pix, posn_pix, aper_im, aper_rms, beamarea):
"""Returns aperture flux and error"""
import numpy as N
dist_mask = generate_aperture(aper_im.shape[0], aper_im.shape[1], posn_pix[0], posn_pix[1], aperture_pix)
aper_mask = N.where(dist_mask.astype(bool))
if N.size(aper_mask) == 0:
return [0.0, 0.0]
aper_flux = N.nansum(aper_im[aper_mask])/beamarea # Jy
pixels_in_source = N.sum(~N.isnan(aper_im[aper_mask])) # number of unmasked pixels assigned to current source
aper_fluxE = nanmean(aper_rms[aper_mask]) * N.sqrt(pixels_in_source/beamarea) # Jy
return [aper_flux, aper_fluxE]
def generate_aperture(xsize, ysize, xcenter, ycenter, radius):
"""Makes a mask (1 = inside aperture) for a circular aperture"""
import numpy
x, y = numpy.mgrid[0.5:xsize, 0.5:ysize]
mask = ((x - xcenter)**2 + (y - ycenter)**2 <= radius**2) * 1
return mask
def make_src_mask(mask_size, posn_pix, aperture_pix):
"""Makes an island mask (1 = inside aperture) for a given source position.
"""
import numpy as N
xsize, ysize = mask_size
if aperture_pix is None:
return N.zeros((xsize, ysize), dtype=int)
# Make subimages
xlo = int(posn_pix[0]-int(aperture_pix)-1)
if xlo < 0:
xlo = 0
xhi = int(posn_pix[0]+int(aperture_pix)+1)
if xhi > xsize:
xhi = xsize
ylo = int(posn_pix[1]-int(aperture_pix)-1)
if ylo < 0:
ylo = 0
yhi = int(posn_pix[1]+int(aperture_pix)+1)
if yhi > ysize:
yhi = ysize
mask = N.zeros((xsize, ysize), dtype=int)
posn_pix_new = [posn_pix[0]-xlo, posn_pix[1]-ylo]
submask_xsize = xhi - xlo
submask_ysize = yhi - ylo
submask = generate_aperture(submask_xsize, submask_ysize, posn_pix_new[0], posn_pix_new[1], aperture_pix)
submask_slice = [slice(int(xlo), int(xhi)), slice(int(ylo), int(yhi))]
mask[tuple(submask_slice)] = submask
return mask
def getTerminalSize():
"""
returns (lines:int, cols:int)
"""
import os, struct
def ioctl_GWINSZ(fd):
import fcntl, termios
return struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
# try stdin, stdout, stderr
for fd in (0, 1, 2):
try:
return ioctl_GWINSZ(fd)
except:
pass
# try os.ctermid()
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
return ioctl_GWINSZ(fd)
finally:
os.close(fd)
except:
pass
# try `stty size`
try:
return tuple(int(x) for x in os.popen("stty size", "r").read().split())
except:
pass
# try environment variables
try:
return tuple(int(os.getenv(var)) for var in ("LINES", "COLUMNS"))
except:
pass
# Give up. return 0.
return (0, 0)
def eval_func_tuple(f_args):
"""Takes a tuple of a function and args, evaluates and returns result
This function (in addition to itertools) gets around limitation that
multiple-argument sequences are not supported by multiprocessing.
"""
return f_args[0](*f_args[1:])
def start_samp_proxy():
"""Starts (registers) and returns a SAMP proxy"""
import os
try:
# Python 3
from xmlrpc.client import ServerProxy
except ImportError:
# Python 2
from xmlrpclib import ServerProxy
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
HUB_PARAMS = {}
for line in open(lockfile):
if not line.startswith('#'):
key, value = line.split('=', 1)
HUB_PARAMS[key] = value.strip()
# Set up proxy
s = ServerProxy(HUB_PARAMS['samp.hub.xmlrpc.url'])
# Register with Hub
metadata = {"samp.name": 'PyBDSM', "samp.description.text": 'PyBDSM: the Python Blob Detection and Source Measurement software'}
result = s.samp.hub.register(HUB_PARAMS['samp.secret'])
private_key = result['samp.private-key']
s.samp.hub.declareMetadata(private_key, metadata)
return s, private_key
def stop_samp_proxy(img):
"""Stops (unregisters) a SAMP proxy"""
import os
if hasattr(img, 'samp_client'):
lockfile = os.path.expanduser('~/.samp')
if os.path.exists(lockfile):
img.samp_client.samp.hub.unregister(img.samp_key)
def send_fits_image(s, private_key, name, file_path):
"""Send a SAMP notification to load a fits image."""
import os
message = {}
message['samp.mtype'] = "image.load.fits"
message['samp.params'] = {}
message['samp.params']['url'] = 'file://' + os.path.abspath(file_path)
message['samp.params']['name'] = name
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_fits_table(s, private_key, name, file_path):
"""Send a SAMP notification to load a fits table."""
import os
message = {}
message['samp.mtype'] = "table.load.fits"
message['samp.params'] = {}
message['samp.params']['url'] = 'file://' + os.path.abspath(file_path)
message['samp.params']['name'] = name
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_highlight_row(s, private_key, url, row_id):
"""Send a SAMP notification to highlight a row in a table."""
import os
message = {}
message['samp.mtype'] = "table.highlight.row"
message['samp.params'] = {}
message['samp.params']['row'] = str(row_id)
message['samp.params']['url'] = url
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_coords(s, private_key, coords):
"""Send a SAMP notification to point at given coordinates."""
import os
message = {}
message['samp.mtype'] = "coord.pointAt.sky"
message['samp.params'] = {}
message['samp.params']['ra'] = str(coords[0])
message['samp.params']['dec'] = str(coords[1])
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def make_curvature_map(subim):
"""Makes a curvature map with the Aegean curvature algorithm
(Hancock et al. 2012)
The Aegean algorithm uses a curvature map to identify regions of negative
curvature. These regions then define distinct sources.
"""
import scipy.signal as sg
import numpy as N
import sys
# Make average curavature map:
curv_kernal = N.array([[1, 1, 1],[1, -8, 1],[1, 1, 1]])
# The next step prints meaningless warnings, so suppress them
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
curv_map = sg.convolve2d(subim, curv_kernal)
sys.stdout = original_stdout # turn STDOUT back on
return curv_map
def bstat(indata, mask, kappa_npixbeam):
"""Numpy version of the c++ bstat routine
Uses the PySE method for calculating the clipped mean and rms of an array.
This method is superior to the c++ bstat routine (see section 2.7.3 of
http://dare.uva.nl/document/174052 for details) and, since the Numpy
functions used here are written in c, there should be no big computational
penalty in using Python code.
"""
import numpy
from scipy.special import erf, erfcinv
# Flatten array
skpix = indata.flatten()
if mask is not None:
msk_flat = mask.flatten()
unmasked = numpy.where(~msk_flat)
skpix = skpix[unmasked]
ct = skpix.size
iter = 0
c1 = 1.0
c2 = 0.0
maxiter = 200
converge_num = 1e-6
m_raw = numpy.mean(skpix)
r_raw = numpy.std(skpix, ddof=1)
while (c1 >= c2) and (iter < maxiter):
npix = skpix.size
if kappa_npixbeam > 0.0:
kappa = kappa_npixbeam
else:
npixbeam = abs(kappa_npixbeam)
kappa = numpy.sqrt(2.0)*erfcinv(1.0 / (2.0*npix/npixbeam))
if kappa < 3.0:
kappa = 3.0
lastct = ct
medval = numpy.median(skpix)
sig = numpy.std(skpix)
wsm = numpy.where(abs(skpix-medval) < kappa*sig)
ct = len(wsm[0])
if ct > 0:
skpix = skpix[wsm]
c1 = abs(ct - lastct)
c2 = converge_num * lastct
iter += 1
mean = numpy.mean(skpix)
median = numpy.median(skpix)
sigma = numpy.std(skpix, ddof=1)
mode = 2.5*median - 1.5*mean
if sigma > 0.0:
skew_par = abs(mean - median)/sigma
else:
raise RuntimeError("A region with an unphysical rms value has been found. "
"Please check the input image.")
if skew_par <= 0.3:
m = mode
else:
m = median
r1 = numpy.sqrt(2.0*numpy.pi)*erf(kappa/numpy.sqrt(2.0))
r = numpy.sqrt(sigma**2 * (r1 / (r1 - 2.0*kappa*numpy.exp(-kappa**2/2.0))))
return m_raw, r_raw, m, r, iter
def centered(arr, newshape):
"""Return the center newshape portion of the array
This function is a copy of the private _centered() function in
scipy.signal.signaltools
"""
import numpy as np
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def set_up_output_paths(opts):
"""Returns various paths and filenames related to output
The opts input is either an instance of <class 'bdsf.opts.Opts'> or a
dict generated by that class.
The outputs are:
- parentname: the name of the image, with the path and extension removed
(if it is a common image extension)
- output_basedir: the output directory, where the log file and
other optional outputs of the process_image task are placed
"""
import os
# Get filename and outdir from opts
if type(opts) is dict:
filename = opts['filename']
outdir = opts['outdir']
else:
# opts is type <class 'bdsf.opts.Opts'>, so options are stored
# as attributes
filename = opts.filename
outdir = opts.outdir
# Try to trim common extensions from filename to make the parent filename,
# used for various output purposes
root, ext = os.path.splitext(filename)
if ext in ['.fits', '.FITS', '.image']:
fname = root
elif ext in ['.gz', '.GZ']:
root2, ext2 = os.path.splitext(root)
if ext2 in ['.fits', '.FITS', '.image']:
fname = root2
else:
fname = root
else:
fname = filename
parentname = os.path.basename(fname)
# Determine the base output directory
if outdir is None:
output_basedir = os.path.abspath(os.path.dirname(filename))
else:
output_basedir = os.path.abspath(outdir)
# Make the output directory if needed
if not os.path.exists(output_basedir):
os.makedirs(output_basedir)
# Check that we have write permission to the base directory
if not os.access(output_basedir, os.W_OK):
raise RuntimeError("Cannot write to the output directory '{0}' (permission denied). "
"Please specify an output directory to which you have "
"write permission using the 'outdir' option.".format(output_basedir))
return parentname, output_basedir
def fix_gaussian_axes(major, minor, pa):
"""Check a Gaussian for switched axes and fix if found
Returns corrected (major, minor, pa)
"""
if major < minor:
major, minor = minor, major
pa += 90.0
pa = divmod(pa, 180)[1] # restrict to range [0, 180)
return (major, minor, pa)
| 80,495 | 32.950232 | 141 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/psf_vary.py
|
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from .image import *
from . import mylogger
from copy import deepcopy as cp
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import scipy
import scipy.signal as S
from . import _cbdsm
from . import functions as func
from . import _pytesselate as _pytess
from . import shapelets as sh
from scipy.optimize import leastsq
from . import nat
from math import *
from . import statusbar
from .const import fwsig
from . import multi_proc as mp
import itertools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
class Op_psf_vary(Op):
"""Computes variation of psf across the image """
def __call__(self, img):
if img.opts.psf_vary_do:
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Psf_Vary")
mylogger.userinfo(mylog, '\nEstimating PSF variations')
opts = img.opts
dir = img.basedir + '/misc/'
plot = False # debug figures
image = img.ch0_arr
try:
from astropy.io import fits as pyfits
old_pyfits = False
except ImportError as err:
from distutils.version import StrictVersion
import pyfits
if StrictVersion(pyfits.__version__) < StrictVersion('2.2'):
old_pyfits = True
else:
old_pyfits = False
if old_pyfits:
mylog.warning('PyFITS version is too old: psf_vary module skipped')
return
if opts.psf_fwhm is not None:
# User has specified a constant PSF to use, so skip PSF fitting/etc.
psf_maj = opts.psf_fwhm[0] # FWHM in deg
psf_min = opts.psf_fwhm[1] # FWHM in deg
psf_pa = opts.psf_fwhm[2] # PA in deg
mylogger.userinfo(mylog, 'Using constant PSF (major, minor, pos angle)',
'(%.5e, %.5e, %s) degrees' % (psf_maj, psf_maj,
round(psf_pa, 1)))
else:
# Use did not specify a constant PSF to use, so estimate it
over = 2
generators = opts.psf_generators; nsig = opts.psf_nsig; kappa2 = opts.psf_kappa2
snrtop = opts.psf_snrtop; snrbot = opts.psf_snrbot; snrcutstack = opts.psf_snrcutstack
gencode = opts.psf_gencode; primarygen = opts.psf_primarygen; itess_method = opts.psf_itess_method
tess_sc = opts.psf_tess_sc; tess_fuzzy= opts.psf_tess_fuzzy
bright_snr_cut = opts.psf_high_snr
s_only = opts.psf_stype_only
if opts.psf_snrcut < 5.0:
mylogger.userinfo(mylog, "Value of psf_snrcut too low; increasing to 5")
snrcut = 5.0
else:
snrcut = opts.psf_snrcut
img.psf_snrcut = snrcut
if opts.psf_high_snr is not None:
if opts.psf_high_snr < 10.0:
mylogger.userinfo(mylog, "Value of psf_high_snr too low; increasing to 10")
high_snrcut = 10.0
else:
high_snrcut = opts.psf_high_snr
else:
high_snrcut = opts.psf_high_snr
img.psf_high_snr = high_snrcut
wtfns=['unity', 'roundness', 'log10', 'sqrtlog10']
if 0 <= itess_method < 4: tess_method=wtfns[itess_method]
else: tess_method='unity'
### now put all relevant gaussian parameters into a list
ngaus = img.ngaus
nsrc = img.nsrc
num = N.zeros(nsrc, dtype=N.int32)
peak = N.zeros(nsrc)
xc = N.zeros(nsrc)
yc = N.zeros(nsrc)
bmaj = N.zeros(nsrc)
bmin = N.zeros(nsrc)
bpa = N.zeros(nsrc)
code = N.array(['']*nsrc);
rms = N.zeros(nsrc)
src_id_list = []
for i, src in enumerate(img.sources):
src_max = 0.0
for gmax in src.gaussians:
# Take only brightest Gaussian per source
if gmax.peak_flux > src_max:
src_max = gmax.peak_flux
g = gmax
num[i] = i
peak[i] = g.peak_flux
xc[i] = g.centre_pix[0]
yc[i] = g.centre_pix[1]
bmaj[i] = g.size_pix[0]
bmin[i] = g.size_pix[1]
bpa[i] = g.size_pix[2]
code[i] = img.sources[g.source_id].code
rms[i] = img.islands[g.island_id].rms
gauls = (num, peak, xc, yc, bmaj, bmin, bpa, code, rms)
tr_gauls = self.trans_gaul(gauls)
# takes gaussians with code=S and snr > snrcut.
if s_only:
tr = [n for n in tr_gauls if n[1]/n[8]>snrcut and n[7] == 'S']
else:
tr = [n for n in tr_gauls if n[1]/n[8]>snrcut]
g_gauls = self.trans_gaul(tr)
# computes statistics of fitted sizes. Same as psfvary_fullstat.f in fBDSM.
bmaj_a, bmaj_r, bmaj_ca, bmaj_cr, ni = _cbdsm.bstat(bmaj, None, nsig)
bmin_a, bmin_r, bmin_ca, bmin_cr, ni = _cbdsm.bstat(bmin, None, nsig)
bpa_a, bpa_r, bpa_ca, bpa_cr, ni = _cbdsm.bstat(bpa, None, nsig)
# get subset of sources deemed to be unresolved. Same as size_ksclip_wenss.f in fBDSM.
flag_unresolved = self.get_unresolved(g_gauls, img.beam, nsig, kappa2, over, img.psf_high_snr, plot)
if len(flag_unresolved) == 0:
mylog.warning('Insufficient number of sources to determine PSF variation.\nTry changing the PSF options or specify a (constant) PSF with the "psf_fwhm" option')
return
# see how much the SNR-weighted sizes of unresolved sources differ from the synthesized beam.
wtsize_beam_snr = self.av_psf(g_gauls, img.beam, flag_unresolved)
# filter out resolved sources
tr_gaul = self.trans_gaul(g_gauls)
tr = [n for i, n in enumerate(tr_gaul) if flag_unresolved[i]]
g_gauls = self.trans_gaul(tr)
mylogger.userinfo(mylog, 'Number of unresolved sources', str(len(g_gauls[0])))
# get a list of voronoi generators. vorogenS has values (and not None) if generators='field'.
vorogenP, vorogenS = self.get_voronoi_generators(g_gauls, generators, gencode, snrcut, snrtop, snrbot, snrcutstack)
mylogger.userinfo(mylog, 'Number of generators for PSF variation', str(len(vorogenP[0])))
if len(vorogenP[0]) < 3:
mylog.warning('Insufficient number of generators')
return
mylogger.userinfo(mylog, 'Tesselating image')
# group generators into tiles
tile_prop = self.edit_vorogenlist(vorogenP, frac=0.9)
# tesselate the image
volrank, vorowts = self.tesselate(vorogenP, vorogenS, tile_prop, tess_method, tess_sc, tess_fuzzy, \
generators, gencode, image.shape)
if opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '.volrank.fits', volrank, img, dir)
tile_list, tile_coord, tile_snr = tile_prop
ntile = len(tile_list)
bar = statusbar.StatusBar('Determining PSF variation ............... : ', 0, ntile)
mylogger.userinfo(mylog, 'Number of tiles for PSF variation', str(ntile))
# For each tile, calculate the weighted averaged psf image. Also for all the sources in the image.
cdelt = list(img.wcs_obj.acdelt[0:2])
factor=3.
psfimages, psfcoords, totpsfimage, psfratio, psfratio_aper = self.psf_in_tile(image, img.beam, g_gauls, \
cdelt, factor, snrcutstack, volrank, tile_prop, plot, img)
npsf = len(psfimages)
if opts.psf_use_shap:
if opts.psf_fwhm is None:
# use totpsfimage to get beta, centre and nmax for shapelet decomposition. Use nmax=5 or 6
mask=N.zeros(totpsfimage.shape, dtype=bool)
(m1, m2, m3)=func.moment(totpsfimage, mask)
betainit=sqrt(m3[0]*m3[1])*2.0 * 1.4
tshape = totpsfimage.shape
cen = N.array(N.unravel_index(N.argmax(totpsfimage), tshape))+[1,1]
cen = tuple(cen)
nmax = 12
basis = 'cartesian'
betarange = [0.5,sqrt(betainit*max(tshape))]
beta, error = sh.shape_varybeta(totpsfimage, mask, basis, betainit, cen, nmax, betarange, plot)
if error == 1: print(' Unable to find minimum in beta')
# decompose all the psf images using the beta from above
nmax=12; psf_cf=[]
for i in range(npsf):
psfim = psfimages[i]
cf = sh.decompose_shapelets(psfim, mask, basis, beta, cen, nmax, mode='')
psf_cf.append(cf)
if img.opts.quiet == False:
bar.increment()
bar.stop()
# transpose the psf image list
xt, yt = N.transpose(tile_coord)
tr_psf_cf = N.transpose(N.array(psf_cf))
# interpolate the coefficients across the image. Ok, interpolate in scipy for
# irregular grids is crap. doesnt even pass through some of the points.
# for now, fit polynomial.
compress = 100.0
x, y = N.transpose(psfcoords)
if len(x) < 3:
mylog.warning('Insufficient number of tiles to do interpolation of PSF variation')
return
psf_coeff_interp, xgrid, ygrid = self.interp_shapcoefs(nmax, tr_psf_cf, psfcoords, image.shape, \
compress, plot)
psfshape = psfimages[0].shape
skip = 5
aa = self.create_psf_grid(psf_coeff_interp, image.shape, xgrid, ygrid, skip, nmax, psfshape, \
basis, beta, cen, totpsfimage, plot)
img.psf_images = aa
else:
if opts.psf_fwhm is None:
if ntile < 4:
mylog.warning('Insufficient number of tiles to do interpolation of PSF variation')
return
else:
# Fit stacked PSFs with Gaussians and measure aperture fluxes
bm_pix = N.array([img.pixel_beam()[0]*fwsig, img.pixel_beam()[1]*fwsig, img.pixel_beam()[2]])
psf_maj = N.zeros(npsf)
psf_min = N.zeros(npsf)
psf_pa = N.zeros(npsf)
if img.opts.quiet == False:
bar.start()
for i in range(ntile):
psfim = psfimages[i]
mask = N.zeros(psfim.shape, dtype=bool)
x_ax, y_ax = N.indices(psfim.shape)
maxv = N.max(psfim)
p_ini = [maxv, (psfim.shape[0]-1)/2.0*1.1, (psfim.shape[1]-1)/2.0*1.1, bm_pix[0]/fwsig*1.3,
bm_pix[1]/fwsig*1.1, bm_pix[2]*2]
para, ierr = func.fit_gaus2d(psfim, p_ini, x_ax, y_ax, mask)
### first extent is major
if para[3] < para[4]:
para[3:5] = para[4:2:-1]
para[5] += 90
### clip position angle
para[5] = divmod(para[5], 180)[1]
psf_maj[i] = para[3]
psf_min[i] = para[4]
posang = para[5]
while posang >= 180.0:
posang -= 180.0
psf_pa[i] = posang
if img.opts.quiet == False:
bar.increment()
bar.stop()
# Interpolate Gaussian parameters
if img.aperture is None:
psf_maps = [psf_maj, psf_min, psf_pa, psfratio]
else:
psf_maps = [psf_maj, psf_min, psf_pa, psfratio, psfratio_aper]
nimgs = len(psf_maps)
bar = statusbar.StatusBar('Interpolating PSF images ................ : ', 0, nimgs)
if img.opts.quiet == False:
bar.start()
map_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.interp_prop),
psf_maps, itertools.repeat(psfcoords),
itertools.repeat(image.shape)), numcores=opts.ncores,
bar=bar)
if img.aperture is None:
psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int = map_list
else:
psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int = map_list
# Smooth if desired
if img.opts.psf_smooth is not None:
sm_scale = img.opts.psf_smooth / img.pix2beam([1.0, 1.0, 0.0])[0] / 3600.0 # pixels
if img.opts.aperture is None:
psf_maps = [psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int]
else:
psf_maps = [psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int]
nimgs = len(psf_maps)
bar = statusbar.StatusBar('Smoothing PSF images .................... : ', 0, nimgs)
if img.opts.quiet == False:
bar.start()
map_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.blur_image),
psf_maps, itertools.repeat(sm_scale)), numcores=opts.ncores,
bar=bar)
if img.aperture is None:
psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int = map_list
else:
psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int = map_list
# Make sure all smoothed, interpolated images are ndarrays
psf_maj_int = N.array(psf_maj_int)
psf_min_int = N.array(psf_min_int)
psf_pa_int = N.array(psf_pa_int)
psf_ratio_int = N.array(psf_ratio_int)
if img.aperture is None:
psf_ratio_aper_int = N.zeros(psf_maj_int.shape, dtype=N.float32)
else:
psf_ratio_aper_int = N.array(psf_ratio_aper_int, dtype=N.float32)
# Blank with NaNs if needed
mask = img.mask_arr
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
psf_maj_int[pix_masked] = N.nan
psf_min_int[pix_masked] = N.nan
psf_pa_int[pix_masked] = N.nan
psf_ratio_int[pix_masked] = N.nan
psf_ratio_aper_int[pix_masked] = N.nan
# Store interpolated images. The major and minor axis images are
# the sigma in units of arcsec, the PA image in units of degrees east of
# north, the ratio images in units of 1/beam.
img.psf_vary_maj_arr = psf_maj_int * img.pix2beam([1.0, 1.0, 0.0])[0] * 3600.0 # sigma in arcsec
img.psf_vary_min_arr = psf_min_int * img.pix2beam([1.0, 1.0, 0.0])[0] * 3600.0 # sigma in arcsec
img.psf_vary_pa_arr = psf_pa_int
img.psf_vary_ratio_arr = psf_ratio_int # in 1/beam
img.psf_vary_ratio_aper_arr = psf_ratio_aper_int # in 1/beam
if opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_maj.fits', img.psf_vary_maj_arr*fwsig, img, dir)
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_min.fits', img.psf_vary_min_arr*fwsig, img, dir)
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_pa.fits', img.psf_vary_pa_arr, img, dir)
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_ratio.fits', img.psf_vary_ratio_arr, img, dir)
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_ratio_aper.fits', img.psf_vary_ratio_aper_arr, img, dir)
# Loop through source and Gaussian lists and deconvolve the sizes using appropriate beam
bar2 = statusbar.StatusBar('Correcting deconvolved source sizes ..... : ', 0, img.nsrc)
if img.opts.quiet == False:
bar2.start()
for src in img.sources:
src_pos = img.sky2pix(src.posn_sky_centroid)
src_pos_int = (int(src_pos[0]), int(src_pos[1]))
gaus_c = img.gaus2pix(src.size_sky, src.posn_sky_centroid)
if opts.psf_fwhm is None:
gaus_bm = [psf_maj_int[src_pos_int]*fwsig, psf_min_int[src_pos_int]*fwsig, psf_pa_int[src_pos_int]]
else:
# Use user-specified constant PSF instead
gaus_bm = img.beam2pix(opts.psf_fwhm)
gaus_dc, err = func.deconv2(gaus_bm, gaus_c)
src.deconv_size_sky = img.pix2gaus(gaus_dc, src_pos)
src.deconv_size_skyE = [0.0, 0.0, 0.0]
for g in src.gaussians:
gaus_c = img.gaus2pix(g.size_sky, src.posn_sky_centroid)
gaus_dc, err = func.deconv2(gaus_bm, gaus_c)
g.deconv_size_sky = img.pix2gaus(gaus_dc, g.centre_pix)
g.deconv_size_skyE = [0.0, 0.0, 0.0]
if img.opts.quiet == False:
bar2.spin()
if img.opts.quiet == False:
bar2.increment()
bar2.stop()
img.completed_Ops.append('psf_vary')
##################################################################################################
def trans_gaul(self, q):
" transposes a tuple of .gaul values "
y=[]
for i in range(len(q[0])):
elem=[]
for j in range(len(q)):
elem.append(q[j][i])
y.append(elem)
return y
##################################################################################################
def bindata(self, over, num): #ptpbin,nbin,ptplastbin, same as get_bins in fBDSM.
if num <= 100: ptpbin=num/5
if num > 100: ptpbin=num/10
if num > 1000: ptpbin=num/20
if ptpbin % 2 == 1: ptpbin=ptpbin+1
if num < 10: ptpbin=num
ptpbin = float(ptpbin) # cast to float to avoid integer division errors
nbin=int((num-ptpbin)/(ptpbin/over)+1)
ptplastbin=int((num-1)-(nbin-1)*ptpbin/over)
nbin=nbin+1
return ptpbin, nbin, ptplastbin
##################################################################################################
def bin_and_stats_ny(self, x,y,over,ptpbin,nbin,ptplastbin,nsig):
import math
n1=N.array(range(nbin))+1 # bin number
n2=N.array([ptpbin]*nbin); n2[nbin-2]=ptplastbin; n2[nbin-1]=ptpbin/over
n3=N.array([ptpbin]*nbin, dtype=float); n3[nbin-1]=float(over)*(len(x)-ptpbin/2)/(nbin-1)
xval=N.zeros(nbin)
meany=N.zeros(nbin); stdy=N.zeros(nbin); mediany=N.zeros(nbin)
for i in range(nbin):
lb=int(round(1+(n1[i]-1)*n3[i]/over+(1-1))-1) # -1 for python indexing
ub=int(round(1+(n1[i]-1)*n3[i]/over+(n2[i]-1))-1) # -1 for python indexing
x1=x[lb:ub+1]; y1=y[lb:ub+1]
# do calcmedianclip2vec.f for code=YYN
if len(x1) > 0 and len(y1) > 0:
nout=100; niter=0
while nout>0 and niter<6:
med1=N.median(y1[:])
med2=10.**(N.median(N.log10(x1[:])))
medstd=0 # calcmedianstd.f
for j in y1: medstd += (j-med1)*(j-med1)
medstd=math.sqrt(medstd/len(y1)) #
av1=N.mean(y1); std1=func.std(y1)
av2=N.mean(x1); std2=func.std(x1)
# get_medianclip_vec2
z=N.transpose([x1, y1])
z1=N.transpose([n for n in z if abs(n[1]-med1)<=nsig*medstd])
nout=len(x1)-len(z1[0])
x1=z1[0]; y1=z1[1];
niter+=1
xval[i]=med2;
meany[i]=av1; stdy[i]=std1; mediany[i]=med1
if stdy[nbin-1]/mediany[nbin-1] > stdy[nbin-2]/mediany[nbin-2]:
stdy[nbin-1]=stdy[nbin-2]/mediany[nbin-2]*mediany[nbin-1]
return xval, meany, stdy, mediany
##################################################################################################
def LM_fit(self, x, y, err, funct, order=0):
if funct == func.poly:
p0=N.array([y[N.argmax(x)]] + [0]*order)
if funct == func.wenss_fit:
p0=N.array([y[N.argmax(x)]] + [1.])
res=lambda p, x, y, err: (y-funct(p, x))/err
(p, flag)=leastsq(res, p0, args=(x, y, err))
return p
##################################################################################################
def fit_bins_func(self, x,y,over,ptpbin,nbin,ptplastbin,nsig): # sub_size_ksclip
import math
(xval,meany,stdy,medy)=self.bin_and_stats_ny(x,y,over,ptpbin,nbin,ptplastbin,nsig)
yfit=stdy/medy
err=N.array([1.]*nbin)
if ptplastbin > 0:
err[nbin-2]=err[0]*math.sqrt(1.0*ptpbin/ptplastbin)
err[nbin-1]=err[0]*math.sqrt(1.0*ptpbin*over/ptplastbin)
i=0
while i<nbin-4 and (N.all(N.sort(yfit[i:i+4])[::-1] == yfit[i:i+4]) == False):
i+=1
if i==nbin-4: sind=0
else: sind=i-1
if sind < 1:
sind = 0
if sind > 0.25*nbin:
sind=int(round(0.25*nbin))-1
s_c=self.LM_fit(xval[sind:],yfit[sind:],err[sind:], func.wenss_fit)
err[:]=1.
s_cm=self.LM_fit(N.log10(xval),medy,err,func.poly, order=1)
if len(xval) >= 3:
s_dm=self.LM_fit(N.log10(xval),medy,err,func.poly, order=2)
else:
s_dm = (N.array([s_cm[0], s_cm[1], 0.0]), 0)
if ptpbin<75: s_dm=N.append(s_cm[:], [0.])
return s_c, s_dm
##################################################################################################
def get_unresolved(self, g_gauls, beam, nsig, kappa2, over, bright_snr_cut=20.0, plot=False):
""""Gets subset of unresolved sources
Also flags as unresolved all sources with SNRs above
bright_cut_snr, since fitting below is unreliable for bright
sources.
"""
num=len(g_gauls[0])
if num < 10:
# Too few sources to do fitting
return []
b1=N.asarray(g_gauls[4])/(beam[0]*3600.)
b2=N.asarray(g_gauls[5])/(beam[1]*3600.)
s1=N.asarray(g_gauls[1])/N.array(g_gauls[8])
snr=N.array(s1)
index=snr.argsort()
snr=snr[index]
nmaj=N.array(b1)[index]
nmin=N.array(b2)[index]
# if plot: pl.figure()
f_sclip=N.zeros((2,num), dtype=bool)
for idx, nbeam in enumerate([nmaj, nmin]):
xarr=N.copy(snr)
yarr=N.copy(nbeam)
niter=0; nout=num; noutold=nout*2
while niter<10 and nout >0.75*num:
(ptpbin, nbin, ptplastbin)=self.bindata(over,nout) # get_bins in fBDSM
(s_c,s_dm) = self.fit_bins_func(xarr,yarr,over,ptpbin,nbin,ptplastbin,nsig) # size_ksclip_wenss in fBDSM
noutold = len(xarr)
z = N.transpose([xarr, yarr, s_dm[0]+s_dm[1]*N.log10(xarr)+s_dm[2]*(N.log10(xarr)**2.), \
N.sqrt(s_c[0]*s_c[0]+s_c[1]*s_c[1]/(xarr*xarr)) ])
z1 = N.transpose([n for n in z if abs(n[1]-n[2])/(n[2]*n[3])<kappa2]) # sub_size_wenss_getnum in fBDSM
if len(z1) == 0:
break
nout = len(z1[0])
niter += 1
xarr = z1[0]; yarr = z1[1]; # end of sub_size_wenss_getnum
if noutold == nout: break
# flag in the 'unresolved' sources. returns flag array, True ==> unresolved
logsnr=N.log10(snr)
dumr = N.sqrt(s_c[0]*s_c[0]+s_c[1]*s_c[1]/(snr*snr))
med = s_dm[0]+s_dm[1]*logsnr+s_dm[2]*(logsnr*logsnr)
f_sclip[idx] = N.abs((nbeam-med)/(med*dumr)) < N.array([kappa2]*num)
f_s = f_sclip[0]*f_sclip[1]
# Add bright sources
if bright_snr_cut is not None:
if bright_snr_cut < 20.0:
bright_snr_cut = 20.0
bright_srcs = N.where(snr >= bright_snr_cut)
if len(bright_srcs[0]) > 0:
f_s[bright_srcs] = True
# now make plots
# if plot:
# bb=[b1, b2]
# pl.subplot(211+idx)
# pl.semilogx(s1, bb[idx], 'og')
# f0=f_sclip[idx][index.argsort()]
# sf=[n for i, n in enumerate(s1) if f0[i]]
# b1f=[n for i, n in enumerate(bb[idx]) if f0[i]]
# pl.semilogx(sf, b1f, 'or')
# pl.semilogx(snr,med,'-')
# pl.semilogx(snr,med+med*dumr*(N.array([kappa2]*num)),'-')
# pl.semilogx(snr,med-med*dumr*(N.array([kappa2]*num)),'-')
# pl.title(' axis ' + str(idx))
#
return f_s[index.argsort()]
##################################################################################################
def av_psf(self, g_gauls, beam, flag):
""" calculate how much the SNR-weighted sizes of unresolved sources differs from the
synthesized beam. Same as av_psf.f in fBDSM."""
from math import sqrt
bmaj = N.asarray(g_gauls[4])
bmin = N.asarray(g_gauls[5])
bpa = N.asarray(g_gauls[6])
wt = N.asarray(g_gauls[1])/N.asarray(g_gauls[8])
flagwt = wt*flag
sumwt = N.sum(flagwt)
w1 = N.sum(flagwt*flagwt)
wtavbm = N.array([N.sum(bmaj*flagwt), N.sum(bmin*flagwt), N.sum(bpa*flagwt)])/sumwt
dumrar = N.array([N.sum(bmaj*bmaj*flagwt), N.sum(bmin*bmin*flagwt), N.sum(bpa*bpa*flagwt)])
dd = sumwt*sumwt-w1
wtstdbm = N.sqrt((dumrar - wtavbm*wtavbm*sumwt)*sumwt/dd)
avpa = N.sum(bpa*flagwt-180.0*flagwt*N.array(bpa >= 90))/sumwt
stdpa = N.sum(bpa*flagwt+(180.0*180.0-360.0*bpa)*flagwt*N.array(bpa >= 90))
stdpa = sqrt(abs((stdpa-avpa*avpa*sumwt)*sumwt/dd))
if stdpa < wtstdbm[2]:
wtstdbm[2] = stdpa
wtavbm[2] = avpa
return (wtavbm - N.array([beam[0]*3600.0, beam[1]*3600.0, beam[2]]))/wtstdbm
##################################################################################################
def get_voronoi_generators(self, g_gauls, generators, gencode, snrcut, snrtop, snrbot, snrcutstack):
"""This gets the list of all voronoi generators. It is either the centres of the brightest
sources, or is imported from metadata (in future)."""
from math import sqrt
num=len(g_gauls[0])
snr=N.asarray(g_gauls[1])/N.asarray(g_gauls[8])
index=snr.argsort()
snr_incr = snr[index]
snr = snr_incr[::-1]
x = N.asarray(g_gauls[2])[index]
y = N.asarray(g_gauls[3])[index]
cutoff = 0
if generators == 'calibrators' or generators == 'field':
if gencode != 'file':
gencode = 'list'
if gencode == 'list':
cutoff = int(round(num*(snrtop)))
if cutoff > len(snr):
cutoff = len(snr)
# Make sure we don't fall below snrcutstack (SNR cut for stacking of PSFs), since
# it makes no sense to make tiles with generators that fall below this cut.
if snr[cutoff-1] < snrcutstack:
cutoff = num - snr_incr.searchsorted(snrcutstack)
if generators == 'calibrators':
if gencode == 'file':
raise NotImplementedError("gencode=file not yet implemented.")
x1 = x.tolist()
y1 = y.tolist()
x1.reverse()
y1.reverse()
snr1 = snr.tolist()
vorogenP = N.asarray([x1[0:cutoff], y1[0:cutoff], snr1[0:cutoff]])
vorogenS = None
return vorogenP, vorogenS
##################################################################################################
def edit_vorogenlist(self, vorogenP, frac):
""" Edit primary voronoi generator list. Each tile has a tile centre and can
have more than one generator to be averaged. tile_list is a list of arrays, indexed
by the tile number and each array is an array of numbers in the ngen list which are
the generators in that tile. xtile, ytile and snrtile are arrays of length number_of_tiles
and have x,y,snr of each tile. Group together generators
if closer than a fraction of dist to third closest."""
xgen, ygen, snrgen = vorogenP
flag = N.zeros(len(xgen))
coord = N.array([xgen,ygen]).transpose()
tile_list = []
tile_coord = []; tile_snr = []
for i in range(len(xgen)):
dist = N.array([func.dist_2pt(coord[i], t) for t in coord])
# dist = N.array(map(lambda t: func.dist_2pt(coord[i], t), coord))
indi = N.argsort(dist)
sortdist = dist[indi]
if sortdist[1] < frac * sortdist[2]: # first is the element itself
if flag[indi[1]] + flag[i] == 0: # not already deleted from other pair
tile_list.append([i, indi[1]])
tile_coord.append((coord[i]*snrgen[i]+coord[indi[1]]*snrgen[indi[1]])/(snrgen[i]+snrgen[indi[1]]))
tile_snr.append(snrgen[i]+snrgen[indi[1]])
flag[i] = 1
flag[indi[1]] = 1
else:
if len(dist) > 3:
if sortdist[1]+sortdist[2] < 2.0*frac*sortdist[3]: # for 3 close-by sources
in1=indi[1]
in2=indi[2]
if flag[in1]+flag[in2]+flag[i] == 0: # not already deleted from others
tile_list.append([i, in1, in2])
tile_coord.append((coord[i]*snrgen[i]+coord[in1]*snrgen[in1]+coord[in2]*snrgen[in2]) \
/(snrgen[i]+snrgen[in1]+snrgen[in2]))
tile_snr.append(snrgen[i]+snrgen[in1]+snrgen[in2])
flag[i] = 1
flag[in1] = 1
flag[in2] = 1
else:
tile_list.append([i])
tile_coord.append(coord[i])
tile_snr.append(snrgen[i])
# Assign any leftover generators
for i in range(len(xgen)):
if flag[i] == 0:
tile_list.append([i])
tile_coord.append(coord[i])
tile_snr.append(snrgen[i])
return tile_list, tile_coord, tile_snr
##################################################################################################
def tess_simple(self, vorogenP, wts, tess_sc, tess_fuzzy, shape):
""" Simple tesselation """
xgen, ygen, snrgen = vorogenP
volrank = _pytess.pytess_simple(shape[0], shape[1], xgen, ygen, snrgen, \
wts, tess_fuzzy, tess_sc)
return volrank
##################################################################################################
def tess_roundness(self, vorogenP, tess_sc, tess_fuzzy, shape):
""" Tesselation, modified to make the tiles more round. """
xgen, ygen, snrgen = vorogenP
volrank = _pytess.pytess_roundness(shape[0], shape[1], xgen, ygen, snrgen, \
tess_fuzzy, tess_sc)
return volrank
##################################################################################################
def pixintile(self, tilecoord, pixel, tess_method, wts, tess_sc, tess_fuzzy):
""" This has routines to find out which tile a given pixel belongs to. """
if tess_method == 'roundness':
#tilenum = pytess_roundness(tilecoord, pixel, wts, tess_sc, tess_fuzzy)
print(" Not yet implemented !!!! ")
return 0
else:
xgen, ygen = tilecoord
xgen = N.asarray(xgen)
ygen = N.asarray(ygen)
ngen = len(xgen)
i,j = pixel
dist = N.sqrt((i-xgen)*(i-xgen)+(j-ygen)*(j-ygen))/wts
minind = dist.argmin()
if tess_sc == 's':
tilenum=minind
else:
print(" Not yet implemented !!!! ")
return tilenum
##################################################################################################
def tesselate(self, vorogenP, vorogenS, tile_prop, tess_method, tess_sc, tess_fuzzy, generators, gencode, shape):
""" Various ways of tesselating. If generators='calibrator', no need to tesselate, just get
modified list based on very nearby sources. If generators='field' then tesselate. The image
is tesselated based on tile_prop. """
wtfn={'unity' : lambda x : N.ones(len(x)), \
'log10' : N.log10, \
'sqrtlog10' : lambda x : N.sqrt(N.log10(x)), \
'roundness' : N.array}
tile_list, tile_coord, tile_snr = tile_prop
xt = self.trans_gaul(tile_coord)[0]
yt = self.trans_gaul(tile_coord)[1]
vorogenT = xt, yt, tile_snr
wt_fn = wtfn[tess_method]
wts = wt_fn(tile_snr)
if tess_method == 'roundness':
volrank = self.tess_roundness(vorogenT, tess_sc, tess_fuzzy, shape)
else:
volrank = self.tess_simple(vorogenT, wts, tess_sc, tess_fuzzy, shape)
return volrank, wts
##################################################################################################
def edit_tile(self, ltnum, g_gauls, flag_unresolved, snrcutstack, volrank, tile_prop, tess_sc, \
tess_fuzzy, wts, tess_method, plot):
""" Looks at tiles with no (or one) unresolved source inside it and deletes it and recomputes
the tiling. For now, does not recompute since we wont use the rank for those pixels anyway."""
if ltnum > 1: raise NotImplementedError("NOT YET IMPLEMENTED FOR LTNUM>1")
tile_list, tile_coord, tile_snr = tile_prop
tr_gaul = self.trans_gaul(g_gauls)
tr=[n for i, n in enumerate(tr_gaul) if flag_unresolved[i] and n[1]/n[8] >= snrcutstack]
ntile = len(tile_list)
ngenpertile=N.zeros(ntile)
for itile in range(ntile):
tile_gauls = [n for n in tr if volrank[int(round(n[2])),int(round(n[3]))]-1 \
== itile]
ngenpertile[itile]=len(tile_gauls)
new_n = N.sum(ngenpertile >= ltnum)
# prepare list of good tiles to pass to pixintile
goodtiles = N.array(N.where(ngenpertile >= ltnum)[0])
new_n = len(goodtiles)
tile_coord_n = [n for i,n in enumerate(tile_coord) if i in goodtiles]
wts_n = [n for i,n in enumerate(wts) if i in goodtiles]
r2t = N.zeros(ntile, dtype=int)
entry = -1
for itile in range(ntile):
if ngenpertile[itile] >= ltnum:
r2t[itile] = itile
else:
pixel = tile_coord[itile]
tilenum = self.pixintile(self.trans_gaul(tile_coord_n), pixel, tess_method, wts_n, tess_sc, tess_fuzzy)
r2t[itile] = tilenum
for itile in range(new_n):
num = N.sum(r2t == itile)
if num == 0:
minarr = -999
while minarr != itile:
arr = N.where(r2t > itile)[0]
minarr = r2t[arr].min()-1
for i in arr: r2t[i]=r2t[i]-1
n_tile_list = []; n_tile_coord = []; n_tile_snr = []
for itile in range(new_n):
ind = N.where(r2t == itile)[0]; ind1 = []
for i in ind: ind1 = ind1 + tile_list[i]
n_tile_list.append(ind1)
snrs = N.array([tile_snr[i] for i in ind])
coords = N.array([tile_coord[i] for i in ind])
n_tile_snr.append(N.sum(snrs))
n_tile_coord.append(N.sum([snrs[i]*coords[i] for i in range(len(snrs))], 0)/N.sum(snrs))
ngenpertile=N.zeros(new_n)
for itile in range(new_n):
tile_gauls = [n for n in tr if r2t[volrank[int(round(n[2])),int(round(n[3]))]-1] \
== itile]
ngenpertile[itile]=len(tile_gauls)
tile_prop = n_tile_list, n_tile_coord, n_tile_snr
return ngenpertile, tile_prop, r2t
##################################################################################################
def stackpsf(self, image, beam, g_gauls, wts, cdelt, factor):
""" Stacks all the images of sources in the gaussian list gauls from image, out to
a factor times the beam size. Currently the mask is for the whole image but need to
modify it for masks for each gaussian. These gaussians are supposed to be relatively
isolated unresolved sources. Cut out an image a big bigger than facXbeam and imageshift
to nearest half pixel and then add.
Does not handle masks etc well at all. Masks for image for blanks, masks for \
islands, etc."""
gxcens_pix = g_gauls[2]
gycens_pix = g_gauls[3]
peak = g_gauls[1]
psfimsize = int(round(max(beam[0], beam[1])/max(cdelt[0], cdelt[1]) * factor)) # fac X fwhm; fac ~ 2
psfimage = N.zeros((psfimsize, psfimsize), dtype=N.float32)
cs2=cutoutsize2 = int(round(psfimsize*(1. + 2./factor)/2.)) # size/2. factor => to avoid edge effects etc
cc = cutoutcen_ind=[cs2, cs2]
cpsf=cen_psf_ind = N.array([int(int(round(psfimsize))/2)]*2)
wt=0.
num=len(gxcens_pix)
for isrc in range(num): # MASK !!!!!!!!!!!
wt += wts[isrc]
gcp=N.array([gxcens_pix[isrc], gycens_pix[isrc]])
gcen_ind=gcp-1
rc=rcen_ind = N.asarray(N.round(gcen_ind), dtype=int)
shift=cc-(gcen_ind-(rc-cs2))
cutimage = image[rc[0]-cs2:rc[0]+cs2,rc[1]-cs2:rc[1]+cs2]
if len(cutimage.shape) == 3: cutimage=cutimage[:,:,0]
if 0 not in cutimage.shape:
if sum(sum(N.isnan(cutimage))) == 0:
im_shift = func.imageshift(cutimage, shift)
im_shift = im_shift/peak[isrc]*wts[isrc]
subim_shift = im_shift[cc[0]-cpsf[0]:cc[0]-cpsf[0]+psfimsize,cc[1]-cpsf[1]:cc[1]-cpsf[1]+psfimsize]
if subim_shift.shape == psfimage.shape:
# Check shapes, as they can differ if source is near edge of image.
# If they do differ, don't use that source (may be distorted).
psfimage += subim_shift
psfimage = psfimage/wt
return psfimage
##################################################################################################
def psf_in_tile(self, image, beam, g_gauls, cdelt, factor, snrcutstack, volrank, \
tile_prop, plot, img):
""" For each tile given by tile_prop, make a list of all gaussians in the constituent tesselations
and pass it to stackpsf with a weight for each gaussian, to calculate the average psf per tile.
Should define weights inside a tile to include closure errors """
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Psf_Vary")
tile_list, tile_coord, tile_snr = tile_prop
tr_gaul = self.trans_gaul(g_gauls)
tr=[n for i, n in enumerate(tr_gaul)]# if n[1]/n[8] >= snrcutstack]
ntile = len(tile_list)
psfimages = []
psfcoords = []
psfratio = [] # ratio of peak flux to total flux
psfratio_aper = [] # ratio of peak flux to aperture flux
srcpertile = N.zeros(ntile)
snrpertile = N.zeros(ntile)
xt, yt = N.transpose(tile_coord)
if plot:
pl.figure(None)
colours=['b','g','r','c','m','y','k']*(len(xt)/7+1)
pl.axis([0.0, image.shape[0], 0.0, image.shape[1]])
pl.title('Tesselated image with tile centres and unresolved sources')
for i in range(ntile):
pl.plot([xt[i]], [yt[i]], 'D'+colours[i])
pl.text(xt[i], yt[i], str(i))
for itile in range(ntile):
tile_gauls = [n for n in tr if volrank[int(round(n[2])),int(round(n[3]))]-1 \
== itile]
t_gauls = self.trans_gaul(tile_gauls)
srcpertile[itile] = len(tile_gauls)
if plot:
pl.plot(t_gauls[2], t_gauls[3], 'x'+'k', mew=1.3)#colours[itile])
for i, ig in enumerate(t_gauls[2]):
xx=[xt[itile], ig]
yy=[yt[itile], t_gauls[3][i]]
pl.plot(xx,yy,'-'+colours[itile])
wts = N.asarray(t_gauls[1])/N.asarray(t_gauls[8]) # wt is SNR
snrpertile[itile] = sum(wts)
mylog.info('PSF tile #%i (center = %i, %i): %i unresolved sources, SNR = %.1f' %
(itile, xt[itile], yt[itile], srcpertile[itile], snrpertile[itile]))
a = self.stackpsf(image, beam, t_gauls, wts, cdelt, factor)
psfimages.append(a)
psfcoords.append([sum(N.asarray(t_gauls[2])*wts)/sum(wts), sum(N.asarray(t_gauls[3])*wts)/sum(wts)])
# Find peak/total flux ratio for sources in tile. If an aperture is given,
# use the aperture flux as well.
# t_gauls[0] is source_id
src_ratio = []
src_wts = []
src_ratio_aper = []
src_wts_aper = []
for gt in tile_gauls:
src = img.sources[gt[0]]
if img.aperture is not None:
src_ratio_aper.append(src.peak_flux_max / src.aperture_flux)
src_wts_aper.append(src.total_flux / src.aperture_fluxE)
src_ratio.append(src.peak_flux_max / src.total_flux)
src_wts.append(src.total_flux / src.total_fluxE)
if img.aperture is not None:
psfratio_aper.append(sum(N.asarray(src_ratio_aper)*src_wts_aper)/sum(src_wts_aper))
else:
psfratio_aper.append(0.0)
psfratio.append(sum(N.asarray(src_ratio)*src_wts)/sum(src_wts))
totpsfimage = psfimages[0]*snrpertile[0]
for itile in range(1,ntile):
totpsfimage += psfimages[itile]*snrpertile[itile]
totpsfimage = totpsfimage/sum(snrpertile)
if plot:
pl.imshow(N.transpose(volrank), origin='lower', interpolation='nearest'); pl.colorbar()
if plot:
pl.figure(None)
pl.clf()
ax = pl.subplot(1,1,1)
pax = ax.get_position()
start = N.array((pax.xmin, pax.ymin))
stop = N.array((pax.xmax, pax.ymax))
plaxis = pl.axis([0, image.shape[0], 0, image.shape[1]])
pl.title('Stacked psf for each tile')
for itile in range(ntile):
im=psfimages[itile]
sz=0.07
spt = int(round(snrpertile[itile]*10))/10.
titl='n='+str(int(round(srcpertile[itile])))+'; SNR='+str(spt)
posn=[psfcoords[itile][0], psfcoords[itile][1]]
normposn=N.array(stop-start, dtype=float)/N.array(image.shape[0:2])*posn+start
a=pl.axes([normposn[0]-sz/2., normposn[1]-sz/2., sz, sz])
pl.contour(im,15)
pl.title(titl, fontsize='small')
pl.setp(a, xticks=[], yticks=[])
pl.show()
return psfimages, psfcoords, totpsfimage, psfratio, psfratio_aper
##################################################################################################
def interp_shapcoefs(self, nmax, tr_psf_cf, psfcoords, imshape, compress, plot):
"""Interpolate using natgrid.
Check to see if variation is significant.
"""
x, y = N.transpose(psfcoords)
index = [(i,j) for i in range(nmax+1) for j in range(nmax+1-i)]
xi=x
yi=y
xo=N.arange(0.0,round(imshape[0]), round(compress))
yo=N.arange(0.0,round(imshape[1]), round(compress))
rgrid=nat.Natgrid(xi,yi,xo,yo)
p={}
for coord in index:
z = N.array(tr_psf_cf[coord]) # else natgrid cant deal with noncontiguous memory
p[coord] = rgrid.rgrd(z)
# if plot:
# for i,coord in enumerate(index):
# if i % 36 == 0:
# pl.figure(None)
# pl.clf()
# title = 'Interpolated shapelet coefficients'
# if i>0: title = title+' (cont)'
# pl.suptitle(title)
# pl.subplot(6,6,(i%36)+1)
# pl.title(str(coord))
# pl.plot(xi/compress, yi/compress, 'xk')
# pl.imshow(p[coord], interpolation='nearest')
# pl.colorbar()
return p, xo, yo
##################################################################################################
def interp_prop(self, prop, psfcoords, imshape, compress=1):
"""Interpolate using natgrid.
Should check to see if variation is significant.
"""
x, y = N.transpose(psfcoords)
xi=x
yi=y
xo=N.arange(0.0,round(imshape[0]), round(compress))
yo=N.arange(0.0,round(imshape[1]), round(compress))
rgrid=nat.Natgrid(xi,yi,xo,yo)
prop_int = rgrid.rgrd(prop)
return prop_int
##################################################################################################
def create_psf_grid(self, psf_coeff_interp, imshape, xgrid, ygrid, skip, nmax, psfshape, basis, beta,
cen, totpsfimage, plot):
""" Creates a image with the gridded interpolated psfs. xgrid and ygrid are 1d numpy arrays
with the x and y coordinates of the grids. """
# if plot:
# plnum=N.zeros(2)
# for i in range(2):
# dum=pl.figure(None)
# plnum[i]=dum.number
# pl.clf()
# if i == 0: pl.suptitle('Gridded psfs')
# if i == 1: pl.suptitle('Gridded residual psfs')
# ax = pl.subplot(1,1,1)
# plaxis = pl.axis([0, imshape[0], 0, imshape[1]])
# pax = ax.get_position()
# start = N.array((pax.xmin, pax.ymin))
# stop = N.array((pax.xmax, pax.ymax))
# sz=0.07
mask=N.zeros(psfshape, dtype=bool) # right now doesnt matter
xg=xgrid[::skip+1]
yg=ygrid[::skip+1]
index = [(i,j) for i in range(0,len(xgrid),skip+1) for j in range(0,len(ygrid),skip+1)]
xy = [(i,j) for i in xgrid[::skip+1] for j in ygrid[::skip+1]]
blah=[]
for i, coord in enumerate(index):
maxpsfshape = [0, 0]
for k in psf_coeff_interp:
if k[0]+1 > maxpsfshape[0]:
maxpsfshape[0] = k[0]+1
if k[1]+1 > maxpsfshape[1]:
maxpsfshape[1] = k[1]+1
cf = N.zeros(maxpsfshape)
for k in psf_coeff_interp:
cf[k]=psf_coeff_interp[k][coord]
cf = N.transpose(cf)
psfgridim = sh.reconstruct_shapelets(psfshape, mask, basis, beta, cen, nmax, cf)
blah.append(psfgridim)
# if plot:
# for j in range(2):
# pl.figure(plnum[j])
# posn = [xy[i][0], xy[i][1]]
# normposn =N.array(stop-start, dtype=float)/N.array(imshape[0:2])*posn+start
# a=pl.axes([normposn[0]-sz/2., normposn[1]-sz/2., sz, sz])
# if j == 0: pl.contour(psfgridim,15)
# if j == 1: pl.contour(psfgridim-totpsfimage,15)
# pl.setp(a, xticks=[], yticks=[])
# pl.colorbar()
# if plot:
# pl.figure(plnum[0])
# pl.figure(plnum[1])
#
return blah
##################################################################################################
def blur_image(self, im, n, ny=None) :
""" blurs the image by convolving with a gaussian kernel of typical
size n. The optional keyword argument ny allows for a different
size in the y direction.
"""
from scipy.ndimage import gaussian_filter
sx = n
if ny is not None:
sy = ny
else:
sy = n
improc = gaussian_filter(im, [sy, sx])
return improc
| 49,211 | 44.821229 | 176 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/pybdsf.py
|
"""Interactive PyBDSF shell.
This module initializes the interactive PyBDSF shell, which is a customized
IPython enviroment. It should be called from the terminal prompt using the
command "pybdsf".
"""
from __future__ import print_function
import bdsf
from bdsf.image import Image
import pydoc
import sys
import inspect
###############################################################################
# Functions needed only in the custom IPython shell are defined here. Other
# functions used by both the custom shell and normal Python or IPython
# environments are defined in interface.py.
#
# Before starting the IPython shell, we need to define all the functions and
# variables that we want in the namespace. Note that we adopt the convention
# for this UI of using lines of 72 characters max for doc strings and the
# start-up banner. However, the parameter list will fill the entire available
# terminal width to consume as few vertical lines as possible.
global _img
_img = Image({'filename':''})
_img._is_interactive_shell = True
T = True
F = False
true = True
false = False
def inp(cur_cmd=None):
"""List inputs for current task.
If a task is given as an argument, inp sets the current task
to the given task. If no task is given, inp lists the parameters
of the current task.
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
if cur_cmd is not None:
if not hasattr(cur_cmd, 'arg_list'):
print('\033[31;1mERROR\033[0m: not a valid task')
return
_set_current_cmd(cur_cmd)
else:
if not hasattr(_img, '_current_cmd'):
print('\033[31;1mERROR\033[0m: no task is set')
return
bdsf.interface.list_pars(_img, opts_list=_img._current_cmd_arg_list,
banner=_img._current_cmd_desc,
use_groups=_img._current_cmd_use_groups)
def go(cur_cmd=None):
"""Executes the current task.
If a task is given as an argument, go executes the given task,
even if it is not the current task. The current task is not
changed in this case.
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
if cur_cmd is None:
if not hasattr(_img, '_current_cmd'):
print('\033[31;1mERROR\033[0m: no task is set')
return
cur_cmd = _img._current_cmd
if not hasattr(cur_cmd, 'arg_list'):
print('\033[31;1mERROR\033[0m: not a valid task')
return
cur_cmd()
def default(cur_cmd=None):
"""Resets all parameters for a task to their default values.
If a task name is given (e.g., "default show_fit"), the
parameters for that task are reset. If no task name is
given, the parameters of the current task are reset.
"""
global _img
if cur_cmd is None:
if not hasattr(_img, '_current_cmd'):
print('\033[31;1mERROR\033[0m: no task is set')
return
cur_cmd = _img._current_cmd
if hasattr(cur_cmd, 'arg_list'):
opts_list = cur_cmd.arg_list
else:
print('\033[31;1mERROR\033[0m: not a valid task')
return
_img.opts.set_default(opts_list)
_replace_vals_in_namespace(opt_names=opts_list)
def tget(filename=None):
"""Load processing parameters from a parameter save file.
A file name may be given (e.g., "tget 'savefile.sav'"), in which case the
parameters are loaded from the file specified. If no file name is given,
the parameters are loaded from the file 'pybdsf.last' if it exists.
Normally, the save file is created by the tput command (try "help tput"
for more info).
The save file is a "pickled" python dictionary which can be loaded into
python and edited by hand. See the pickle module for more information.
Below is an example of how to edit a save file by hand:
BDSF [1]: import pickle
BDSF [2]: with open('savefile.sav', 'r') as savefile:
BDSF [3]: pars = pickle.load(savefile)
BDSF [4]: pars['rms_box'] = (80, 20) --> change rms_box parameter
BDSF [5]: with open('savefile.sav', 'w') as savefile:
BDSF [6]: pickle.dump(pars, savefile) --> save changes
"""
try:
import cPickle as pickle
except ImportError:
import pickle
import os
global _img
# Check whether user has given a task name as input (as done in casapy).
# If so, reset filename to None.
if hasattr(filename, 'arg_list'):
filename = None
if filename is None or filename == '':
if os.path.isfile('pybdsf.last'):
filename = 'pybdsf.last'
else:
print('\033[31;1mERROR\033[0m: No file name given and '\
'"pybdsf.last" not found.\nPlease specify a file to load.')
return
if os.path.isfile(filename):
try:
pkl_file = open(filename, 'rb')
pars = pickle.load(pkl_file)
pkl_file.close()
_img.opts.set_opts(pars)
_replace_vals_in_namespace()
print("--> Loaded parameters from file '" + filename + "'.")
except:
print("\033[31;1mERROR\033[0m: Could not read file '" + \
filename + "'.")
else:
print("\033[31;1mERROR\033[0m: File '" + filename + "' not found.")
def tput(filename=None, quiet=False):
"""Save processing parameters to a file.
A file name may be given (e.g., "tput 'savefile.sav'"), in which case the
parameters are saved to the file specified. If no file name is given, the
parameters are saved to the file 'pybdsf.last'. The saved parameters can
be loaded using the tget command (try "help tget" for more info).
The save file is a "pickled" python dictionary which can be loaded into
python and edited by hand. See the pickle module for more information.
Below is an example of how to edit a save file by hand:
BDSF [1]: import pickle
BDSF [2]: with open('savefile.sav', 'r') as savefile:
BDSF [3]: pars = pickle.load(savefile)
BDSF [4]: pars['rms_box'] = (80, 20) --> change rms_box parameter
BDSF [5]: with open('savefile.sav', 'w') as savefile:
BDSF [6]: pickle.dump(pars, savefile) --> save changes
"""
try:
import cPickle as pickle
except ImportError:
import pickle
global _img
success = _set_pars_from_prompt()
if not success:
return
if filename is None or filename == '':
filename = 'pybdsf.last'
# convert opts to dictionary
pars = _img.opts.to_dict()
output = open(filename, 'wb')
pickle.dump(pars, output, protocol=0)
output.close()
if not quiet:
print("--> Saved parameters to file '" + filename + "'.")
def _set_pars_from_prompt():
"""Gets parameters and value and stores them in _img.
To do this, we extract all the valid parameter names
and values from the f_globals directory. Then, use
set_pars() to set them all.
Returns True if successful, False if not.
"""
global _img
f = sys._getframe(len(inspect.stack())-2)
f_dict = f.f_globals
# Check through all possible options and
# build options dictionary
opts = _img.opts.to_dict()
user_entered_opts = {}
for k, v in opts.items():
if k in f_dict:
if f_dict[k] == '':
# Set option to default value in _img and namespace
_img.opts.set_default(k)
f_dict[k] = _img.opts.__getattribute__(k)
user_entered_opts.update({k: f_dict[k]})
# Finally, set the options
try:
_img.opts.set_opts(user_entered_opts)
return True
except RuntimeError as err:
# If an opt fails to set, replace its value in the namespace
# with its current value in _img. Then print error so user knows.
err_msg = str(err)
err_msg_trim = err_msg.split('(')[0]
indx1 = err_msg_trim.find('"') + 1
indx2 = err_msg_trim.find('"', indx1)
k = err_msg_trim[indx1:indx2]
orig_opt_val = opts[k]
f_dict[k] = orig_opt_val
print('\033[31;1mERROR\033[0m: ' + err_msg_trim + \
'\nResetting to previous value.')
return False
def _replace_vals_in_namespace(opt_names=None):
"""Replaces opt values in the namespace with the ones in _img.
opt_names - list of option names to replace (can be string if only one)
"""
global _img
f = sys._getframe(len(inspect.stack())-2)
f_dict = f.f_globals
if opt_names is None:
opt_names = _img.opts.get_names()
if isinstance(opt_names, str):
opt_names = [opt_names]
for opt_name in opt_names:
if opt_name in f_dict:
f_dict[opt_name] = _img.opts.__getattribute__(opt_name)
def _set_current_cmd(cmd):
"""Sets information about current command in img.
This function is used to emulate a casapy interface.
"""
global _img
cmd_name = cmd.__name__
doc = cmd.__doc__
_img._current_cmd = cmd
_img._current_cmd_name = cmd_name
_img._current_cmd_desc = cmd_name.upper() + ': ' + doc.split('\n')[0]
_img._current_cmd_arg_list = cmd.arg_list
_img._current_cmd_use_groups = cmd.use_groups
###############################################################################
# Next, we define the tasks such that they may be called directly by
# the user if so desired. These functions simply pass on the user-
# specified arguments to the appropriate Image method. Here we also
# define the detailed doc strings used by help, and, after each task
# definition, we define its list of arguments and whether it should
# use the opts 'group' attribute, both needed when inp is called. If
# a new parameter is added to a task, it needs to be added to opts.py
# and to the list of arguments for the task below (the "arg_list")
# attribute.
def process_image(**kwargs):
"""Find and measure sources in an image.
There are many possible parameters and options for process_image. Use
"inp process_image" to list them. To get more information about a
parameter, use help. E.g.,
> help 'rms_box'
When process_image is executed, PyBDSF performs the following steps in
order:
1. Reads in the image.
2. Calculates basic statistics of the image and stores them in the Image
object. Calculates sensible values of processing parameters and stores
them. First calculates mean and rms, with and without (3-sigma)
clipping, min and max pixel and values, solid angle. Hereafter, rms
indicates the 3-sigma clipped measure. Next, the number of beams per
source is calculated (see help on algorithms for details), using a
sensible estimate of boxsize and stepsize (which can be set using the
rms_box parameter). Finally, the thresholds are set. They can either be
hard-thresholded (by the user or set as 5-sigma for pixel threshold and
3-sigma for island boundaries internally) or can be calculated using the
False Detection Rate (FDR) method using an user defined value for
alpha. If the user does not specify whether hard thresholding or FDR
should be applied, one or the other is chosen internally based on the
ratio of expected false pixels and true pixels (the choice is written
out in the log file).
3. Calculates rms image. 3-sigma clipped rms and mean are calculated
inside boxes of size boxsize in steps of stepsize. Intermediate values
are calculated using bilinear interpolation (it was seen that bicubic
spline did not yield appreciably better results but is also
available). Depending on the resulting statistics (see help on
algorithms for details), we either adopt the rms image or a constant rms
in the following analysis.
4. Identifies islands of contiguous emission. First all pixels greater
than the pixel threshold are identified (and sorted by descending flux
order). Next, starting from each of these pixels, all contiguous pixels
(defined by 8-connectivity, i.e., the surrounding eight pixels) higher
than the island boundary threshold are identified as belonging to one
island, accounting properly for overlaps of islands.
5. Fit multiple gaussians and/or shapelets to each island. For each
island, the subimages of emission and rms are cut out. The number of
multiple gaussians to be fit can be determined by three different
methods (see help on algorithms for details). With initial guesses
corresponding to these peaks, gaussians are simultaneously fit to the
island using the Levenberg-Marqhardt algorithm. Sensible criteria for bad
solutions are defined. If multiple gaussians are fit and one of them is
a bad solution then the number of gaussians is decreased by one and fit
again, till all solutions in the island are good (or zero in number, in
which case its flagged). After the final fit to the island, the
deconvolved size is computed assuming the theoretical beam and the
statistics in the source area and in the island are computed and
stored. Errors on each of the fitted parameters are computed using the
formulae in Condon (1997). Finally all good solutions are written into
the gaussian catalog as an ascii and binary file. If shapelets are
required, the program calculates optimal nmax, beta and the centre, and
stores these and the shapelet coefficients in a file.
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
# Save current command, as it might be overwritten when process
# is called by the user directly and is not the current command.
cur_cmd = _img._current_cmd
# Run process. Note that process automatically picks up options
# from the Image object, so we don't need to get_task_kwargs as
# we do for the other tasks.
success = _img.process(**kwargs)
# Now restore parameters and save to pybdsf.last
if success:
_set_current_cmd(cur_cmd)
tput(quiet=True)
task_list = _img.opts.get_names()
process_image.arg_list = task_list
process_image.use_groups = True
def show_fit(**kwargs):
"""Show results of fit.
Selected plots are displayed to give the user a quick overview of the
results of the fit. The plots may be zoomed, saved to a file, etc. using
the controls at the bottom of the plot window.
In addition, the following commands are available:
Press "i" ........ : Get integrated flux densities and mean rms
values for the visible portion of the image
Press "m" ........ : Change min and max scaling values
Press "n" ........ : Show / hide island IDs
Press "0" ........ : Reset scaling to default
Press "c" ........ : Change source for SED plot
Click Gaussian ... : Print Gaussian and source IDs (zoom_rect mode,
toggled with the "zoom" button and indicated in
the lower right corner, must be off)
The SED plot will also show the chosen source.
Parameters: ch0_image, rms_image, mean_image, ch0_islands,
gresid_image, sresid_image, gmodel_image,
smodel_image, source_seds, ch0_flagged, pi_image,
psf_major, psf_minor, psf_pa, broadcast
For more information about a parameter, use help. E.g.,
> help 'ch0_image'
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
img_kwargs = _get_task_kwargs(show_fit)
for k in kwargs:
# If user enters an argument, use it instead of
# that in _img
img_kwargs[k] = kwargs[k]
try:
success = _img.show_fit(**img_kwargs)
if success:
tput(quiet=True)
except KeyboardInterrupt:
print("\n\033[31;1mAborted\033[0m")
show_fit.arg_list = ['ch0_image', 'rms_image', 'mean_image', 'ch0_islands',
'gresid_image', 'sresid_image', 'gmodel_image',
'smodel_image', 'source_seds', 'ch0_flagged', 'pi_image',
'psf_major', 'psf_minor', 'psf_pa', 'broadcast']
show_fit.use_groups = False
def write_catalog(**kwargs):
"""Write the Gaussian, source, or shapelet list to a file.
The lists can be written in a number of formats. The information
included in the output file varies with the format used. Use
"help 'format'" for more information.
Parameters: outfile, format, srcroot, bbs_patches, incl_chan, clobber,
catalog_type, incl_empty, correct_proj, bbs_patches_mask
For more information about a parameter, use help. E.g.,
> help 'bbs_patches'
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
img_kwargs = _get_task_kwargs(write_catalog)
for k in kwargs:
# If user enters an argument, use it instead of
# that in _img
img_kwargs[k] = kwargs[k]
try:
success = _img.write_catalog(**img_kwargs)
if success:
tput(quiet=True)
except KeyboardInterrupt:
print("\n\033[31;1mAborted\033[0m")
write_catalog.arg_list = ['bbs_patches', 'format', 'outfile', 'srcroot',
'incl_chan', 'clobber', 'catalog_type', 'incl_empty',
'correct_proj', 'bbs_patches_mask']
write_catalog.use_groups = False
def export_image(**kwargs):
"""Write an image to disk.
Parameters: outfile, img_type, img_format, mask_dilation, pad_image, clobber
For more information about a parameter, use help. E.g.,
> help 'img_type'
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
img_kwargs = _get_task_kwargs(export_image)
for k in kwargs:
# If user enters an argument, use it instead of
# that in _img
img_kwargs[k] = kwargs[k]
try:
success = _img.export_image(**img_kwargs)
if success:
tput(quiet=True)
except KeyboardInterrupt:
print("\n\033[31;1mAborted\033[0m")
export_image.arg_list = ['outfile', 'img_type', 'img_format', 'mask_dilation',
'pad_image', 'clobber']
export_image.use_groups = False
def _get_task_kwargs(task):
"""Returns dictionary of keyword arguments from _img for the given task."""
global _img
arg_list = task.arg_list
kwargs = {}
for a in arg_list:
kwargs.update({a: _img.opts.__getattribute__(a)})
return kwargs
###############################################################################
# Customize the help system for PyBDSF. The user can type "help task" to get
# help on a task (it prints the doc string) or "help 'opt'" to get help on
# a option (it prints the doc string defined in opts.py).
class bdsmDocHelper(pydoc.Helper):
def help(self, request):
global _img
topbar = '_' * 72 + '\n' # 72-character divider
if hasattr(request, '__name__'):
pydoc.pager(topbar + 'Help on ' + pydoc.text.bold(request.__name__)
+ ':\n\n' + pydoc.getdoc(request))
else:
opts = _img.opts.__class__.__dict__
try:
opt = opts[request]
desc_list = str(opt.doc()).split('\n')
desc = '\n\n'.join(desc_list)
default_val = opt._default
if isinstance(default_val, str):
valstr = "'" + default_val + "'"
else:
valstr = str(default_val)
default_val_text = 'Default value: ' + valstr
if opt.group() is not None and opt.group() != 'hidden':
group_text = '\nBelongs to group: ' + opt.group()
else:
group_text = ''
desc_text = bdsf.interface.wrap(desc, 72)
desc_text = '\n'.join(desc_text)
pydoc.pager(topbar + 'Help on the ' + pydoc.text.bold(request)
+ ' parameter:\n\n' + default_val_text
+ group_text
+ '\n\n' + desc_text)
except(KeyError):
print("Parameter '" + request + "' not recognized.")
pydoc.help = bdsmDocHelper(sys.stdin, sys.stdout)
###############################################################################
# Now run the IPython shell with this namespace and a customized autocompleter.
# The custom autocompleter is below. It adds task, command, and option names and
# a few common values to ipython's autocompleter. It also adds files in the
# local directory when they might be needed (but only if the user has started
# to enter a string -- this behavior is to help avoid entering filenames as
# non-strings; this is also done for the help autocomplete).
def _opts_completer(self, event):
""" Returns a list of strings with possible completions."""
import os
import glob
from bdsf.image import Image
img = Image({'filename':''})
opts = img.opts.get_names()
# Split the command entered by user when TAB was pressed
# and check for up to three components (from e.g. "par = val",
# which gives cmd1 = "par", cmd2 = "=", and cmd3 = "val")
cmd1 = (event.line).rsplit(None)[0]
if len((event.line).rsplit(None)) > 1:
cmd2 = (event.line).rsplit(None)[1]
else:
cmd2 = ''
if len((event.line).rsplit(None)) > 2:
cmd3 = (event.line).rsplit(None)[2]
else:
cmd3 = ''
# First, check to see if user has entered a parameter name
# and an equals sign. If so, check parameter type. If Enum
# or Option, match only to the allowable values.
# Allowable values are available from v._type.values if v is
# type Enum (v has no attribute _type.values if not).
if "=" in cmd1 or "=" in cmd2:
par_vals = []
if "=" in cmd1:
cmd3 = cmd1.split('=')[1]
cmd1 = cmd1.split('=')[0]
if cmd1 in opts:
from bdsf.tc import tcEnum, tcOption
v = img.opts.__class__.__dict__[cmd1]
partype = v._type
if isinstance(partype, tcOption):
par_vals = ['None']
elif isinstance(partype, tcEnum):
if ('"' in cmd2 or "'" in cmd2 or
'"' in cmd3 or "'" in cmd3):
par_vals = v._type.values
if not isinstance(par_vals, list):
par_vals = list(par_vals)
if None in par_vals:
# Remove None from list
pindx = par_vals.index(None)
par_vals.pop(pindx)
else:
if None in v._type.values:
par_vals.append('None')
if True in v._type.values:
par_vals.append('True')
if False in v._type.values:
par_vals.append('False')
elif v._default == True or v._default == False:
par_vals = ['True', 'False']
if cmd1 == 'filename' or cmd1 == 'outfile':
if ('"' in cmd2 or "'" in cmd2 or
'"' in cmd3 or "'" in cmd3):
# Also add files in current directory
found = [f.replace('\\','/') for f in glob.glob('*')]
if len(found) > 0:
for fnd in found:
par_vals.append(fnd)
return par_vals
elif cmd1 == 'inp' or cmd1 == 'go':
# Match task names only
cmds = ['process_image', 'write_catalog', 'export_image', 'show_fit']
return cmds
elif cmd1 == 'cd' or cmd1 == 'tput' or cmd1 == 'tget' or '!' in cmd1:
# Match to files in current directory (force use of ' or " with
# tput and tget, as filename must be a string).
files = []
found = [f.replace('\\','/') for f in glob.glob('*')]
if len(found) > 0:
for fnd in found:
files.append(fnd)
if cmd1 == 'tput' or cmd1 == 'tget' and not ('"' in cmd2 or
"'" in cmd2):
# User has not (yet) started to enter a string, so don't
# return filenames
return []
return files
elif cmd1 == 'help':
if '"' in cmd2 or "'" in cmd2:
# User has started to enter a string:
# Match to parameter names, as they must be strings
par_vals = opts
return par_vals
else:
# User has not started to enter a string:
# Match to commands + tasks only
cmds = ['process_image', 'write_catalog', 'export_image',
'show_fit', 'go', 'inp', 'tget', 'tput', 'default',
'changelog']
return cmds
else:
# Match to parameter, task, and command names only
# Add command names
opts.append('inp')
opts.append('go')
opts.append('tget')
opts.append('tput')
opts.append('default')
opts.append('help')
# Add task names
opts.append('process_image')
opts.append('show_fit')
opts.append('write_catalog')
opts.append('export_image')
return opts
def main():
# Define the welcome banner to print on startup. Also check if there is a newer
# version on the STRW ftp server. If there is, print a message to the user
# asking them to update.
from bdsf._version import __version__, changelog
divider1 = '=' * 72 + '\n'
divider2 = '_' * 72 + '\n'
banner = '\nPyBDSF version ' + __version__ + '\n'\
+ divider1 + 'PyBDSF commands\n'\
' inp task ............ : Set current task and list parameters\n'\
" par = val ........... : Set a parameter (par = '' sets it to default)\n"\
' Autocomplete (with TAB) works for par and val\n'\
' go .................. : Run the current task\n'\
' default ............. : Set current task parameters to default values\n'\
" tput ................ : Save parameter values\n"\
" tget ................ : Load parameter values\n"\
'PyBDSF tasks\n'\
' process_image ....... : Process an image: find sources, etc.\n'\
' show_fit ............ : Show the results of a fit\n'\
' write_catalog ....... : Write out list of sources to a file\n'\
' export_image ........ : Write residual/model/rms/mean image to a file\n'\
'PyBDSF help\n'\
' help command/task ... : Get help on a command or task\n'\
' (e.g., help process_image)\n'\
" help 'par' .......... : Get help on a parameter (e.g., help 'rms_box')\n"\
' help changelog ...... : See list of recent changes\n'\
+ divider2
# Go ahead and set the current task to process_image, so that the user does not
# need to enter "inp process_image" as the first step (the first task needed
# after startup will almost always be process_image).
_set_current_cmd(process_image)
# Now start the ipython shell. Due to (non-backward-compatible) changes in
# ipython with version 0.11, we must support both versions until 0.11 or
# greater is in common use.
try:
# IPython >= 0.11
from distutils.version import LooseVersion
from IPython import __version__ as ipython_version
if LooseVersion(ipython_version) < LooseVersion('1.0.0'):
from IPython.frontend.terminal.embed import InteractiveShellEmbed
else:
from IPython.terminal.embed import InteractiveShellEmbed
try:
# Use the traitlets config
from traitlets.config.loader import Config
from IPython.terminal.prompts import Prompts, Token
cfg = Config()
class CustomPrompt(Prompts):
def in_prompt_tokens(self, cli=None):
return [
(Token.Prompt, 'BDSF ['),
(Token.PromptNum, str(self.shell.execution_count)),
(Token.Prompt, ']: '),
]
def out_prompt_tokens(self):
return [
(Token.OutPrompt, ''),
]
cfg.TerminalInteractiveShell.prompts_class = CustomPrompt
except ImportError:
# fall back to old config
from IPython.config.loader import Config
cfg = Config()
prompt_config = cfg.PromptManager
if ipython_version == '0.11':
cfg.InteractiveShellEmbed.prompt_in1 = "BDSF [\#]: "
else:
prompt_config.in_template = "BDSF [\#]: "
cfg.InteractiveShellEmbed.autocall = 2
user_ns = globals()
user_ns.update(locals())
ipshell = InteractiveShellEmbed(config=cfg, banner1=banner,
user_ns=user_ns)
ipshell.set_hook('complete_command', _opts_completer, re_key = '.*')
except ImportError:
# IPython < 0.11
from IPython.Shell import IPShellEmbed
argv = ['-prompt_in1','BDSF [\#]: ','-autocall','2']
ipshell = IPShellEmbed(argv=argv, banner=banner, user_ns=user_ns)
ipshell.IP.set_hook('complete_command', _opts_completer, re_key = '.*')
ipshell()
| 29,572 | 38.378162 | 83 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/plotresults.py
|
"""Plotting module
This module is used to display fits results.
"""
from __future__ import print_function
from __future__ import absolute_import
from .image import *
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import matplotlib.cm as cm
import matplotlib.patches as mpatches
from matplotlib.widgets import Button
from matplotlib.patches import Ellipse
from matplotlib.lines import Line2D
from matplotlib import collections
from math import log10
from . import functions as func
from .const import fwsig
import os
import warnings
import numpy as N
warnings.simplefilter(action='ignore', category=FutureWarning)
def plotresults(img, ch0_image=True, rms_image=True, mean_image=True,
ch0_islands=True, gresid_image=True, sresid_image=False,
gmodel_image=True, smodel_image=False, pyramid_srcs=False,
source_seds=False, ch0_flagged=False, pi_image=False,
psf_major=False, psf_minor=False, psf_pa=False, broadcast=False):
"""Show the results of a fit."""
global img_ch0, img_rms, img_mean, img_gaus_mod, img_shap_mod
global img_gaus_resid, img_shap_resid, pixels_per_beam, pix2sky
global vmin, vmax, vmin_cur, vmax_cur, ch0min, ch0max, img_pi
global low, fig, images, src_list, srcid_cur, sky2pix, markers
global img_psf_maj, img_psf_min, img_psf_pa, do_broadcast, samp_client
global samp_key, samp_gaul_table_url, samp_srl_table_url
if not has_pl:
print("\033[31;1mWARNING\033[0m: Matplotlib not found. Plotting is disabled.")
return
if hasattr(img, 'samp_client'):
samp_client = img.samp_client
samp_key = img.samp_key
if hasattr(img, 'samp_srl_table_url'):
samp_srl_table_url = img.samp_srl_table_url
else:
samp_srl_table_url = None
if hasattr(img, 'samp_gaul_table_url'):
samp_gaul_table_url = img.samp_gaul_table_url
else:
samp_gaul_table_url = None
else:
samp_clent = None
samp_key = None
samp_srl_table_url = None
samp_gaul_table_url = None
do_broadcast = broadcast
# Define the images. The images are used both by imshow and by the
# on_press() and coord_format event handlers
pix2sky = img.pix2sky
sky2pix = img.sky2pix
gfactor = 2.0 * N.sqrt(2.0 * N.log(2.0))
pixels_per_beam = 2.0 * N.pi * (img.beam2pix(img.beam)[0]
* img.beam2pix(img.beam)[1]) / gfactor**2
# Construct lists of images, titles, etc.
images = []
titles = []
names = []
markers = []
img_gaus_mod = None # default needed for key press event
img_shap_mod = None # default needed for key press event
if ch0_image:
img_ch0 = img.ch0_arr
images.append(img_ch0)
titles.append('Original (ch0) Image\n(arbitrary logarithmic scale)')
names.append('ch0')
if ch0_islands:
img_ch0 = img.ch0_arr
images.append(img_ch0)
if hasattr(img, 'ngaus'):
if hasattr(img, 'ch0_pi_arr'):
ch0_str = 'Islands (hatched boundaries; red = PI only) and\nGaussians'
else:
ch0_str = 'Islands (hatched boundaries) and\nGaussians'
if hasattr(img, 'atrous_gaussians'):
ch0_str += ' (red = wavelet)'
titles.append(ch0_str)
else:
titles.append('Islands (hatched boundaries)')
names.append('ch0')
if ch0_flagged:
if not hasattr(img, 'ngaus'):
print('Image was not fit with Gaussians. Skipping display of flagged Gaussians.')
else:
img_ch0 = img.ch0_arr
images.append(img_ch0)
titles.append('Flagged Gaussians')
names.append('ch0')
if pi_image:
if not hasattr(img, 'ch0_pi_arr'):
print('Polarization module not run. Skipping PI image.')
else:
img_pi = img.ch0_pi_arr
images.append(img_pi)
titles.append('Polarized Intensity Image')
names.append('ch0_pi')
if rms_image:
img_rms = img.rms_arr
images.append(img_rms)
titles.append('Background rms Image')
names.append('rms')
if gresid_image:
if not hasattr(img, 'ngaus'):
print('Image was not fit with Gaussians. Skipping residual Gaussian image.')
else:
img_gaus_resid = img.resid_gaus_arr
images.append(img_gaus_resid)
titles.append('Gaussian Residual Image')
names.append('gaus_resid')
if gmodel_image:
if not hasattr(img, 'ngaus'):
print('Image was not fit with Gaussians. Skipping model Gaussian image.')
else:
img_gaus_mod = img.model_gaus_arr
images.append(img_gaus_mod)
titles.append('Gaussian Model Image')
names.append('gaus_mod')
if mean_image:
img_mean = img.mean_arr
images.append(img_mean)
titles.append('Background mean Image')
names.append('mean')
if sresid_image:
if img.opts.shapelet_do == False:
print('Image was not decomposed into shapelets. Skipping residual shapelet image.')
else:
img_shap_resid = img.resid_shap_arr
images.append(img_shap_resid)
titles.append('Shapelet Residual Image')
names.append('shap_resid')
if smodel_image:
if img.opts.shapelet_do == False:
print('Image was not decomposed into shapelets. Skipping model shapelet image.')
else:
img_shap_mod = img.model_shap_arr
images.append(img_shap_mod)
titles.append('Shapelet Model Image')
names.append('shap_mod')
if source_seds:
if img.opts.spectralindex_do == False:
print('Source SEDs were not fit. Skipping source SED plots.')
else:
src_list = img.sources
sed_src = get_src(src_list, 0)
if sed_src is None:
print('No sources found. Skipping source SED plots.')
else:
images.append('seds')
titles.append('')
names.append('seds')
srcid_cur = 0
if pyramid_srcs:
if img.opts.atrous_do == False:
print('Image was not decomposed into wavelets. Skipping wavelet images.')
else:
# Get the unique j levels and store them. Only make subplots for
# occupied j levels
print('Pyramidal source plots not yet supported.')
# j_list = []
# for p in img.pyrsrcs:
# for l in p.jlevels:
# j_list.append(l)
# j_set = set(j_list)
# j_with_gaus = list(j_set)
# index_first_waveplot = len(images)
# for i in range(len(j_with_gaus)):
# images.append('wavelets')
# names.append('pyrsrc'+str(i))
if psf_major or psf_minor or psf_pa:
if img.opts.psf_vary_do == False:
print('PSF variation not calculated. Skipping PSF variation images.')
else:
if psf_major:
img_psf_maj = img.psf_vary_maj_arr*fwsig
images.append(img_psf_maj)
titles.append('PSF Major Axis FWHM (pixels)')
names.append('psf_maj')
if psf_minor:
img_psf_min = img.psf_vary_min_arr*fwsig
images.append(img_psf_min)
titles.append('PSF Minor Axis FWHM (pixels)')
names.append('psf_min')
if psf_pa:
img_psf_pa = img.psf_vary_pa_arr
images.append(img_psf_pa)
titles.append('PSF Pos. Angle FWhM (degrees)')
names.append('psf_pa')
if images == []:
print('No images to display.')
return
im_mean = img.clipped_mean
im_rms = img.clipped_rms
if img.resid_gaus_arr is None:
low = 1.1*abs(img.min_value)
else:
low = N.max([1.1*abs(img.min_value),1.1*abs(N.nanmin(img.resid_gaus_arr))])
if low <= 0.0:
low = 1E-6
vmin_est = im_mean - im_rms*5.0 + low
if vmin_est <= 0.0:
vmin = N.log10(low)
else:
vmin = N.log10(vmin_est)
vmax = N.log10(im_mean + im_rms*30.0 + low)
ch0min = vmin
ch0max = N.log10(img.max_value + low)
vmin_cur = vmin
vmax_cur = vmax
origin = 'lower'
colours = ['m', 'b', 'c', 'g', 'y', 'k'] # reserve red ('r') for wavelets
styles = ['-', '-.', '--']
print('=' * 72)
print('NOTE -- With the mouse pointer in plot window:')
print(' Press "i" ........ : Get integrated flux densities and mean rms')
print(' values for the visible portion of the image')
print(' Press "m" ........ : Change min and max scaling values')
print(' Press "n" ........ : Show / hide island IDs')
print(' Press "0" ........ : Reset scaling to default')
if 'seds' in images:
print(' Press "c" ........ : Change source for SED plot')
if ch0_islands and hasattr(img, 'ngaus'):
print(' Click Gaussian ... : Print Gaussian and source IDs (zoom_rect mode, ')
print(' toggled with the "zoom" button and indicated in ')
print(' the lower right corner, must be off)')
if 'seds' in images:
print(' The SED plot will also show the chosen source.')
print('_' * 72)
if len(images) > 1:
numx = 2
else:
numx = 1
numy = int(N.ceil(float(len(images))/float(numx)))
fig = pl.figure(figsize=(max(15, 10.0*float(numy)/float(numx)), 10.0))
fig.canvas.manager.set_window_title('PyBDSM Fit Results for '+ img.filename)
gray_palette = cm.gray
gray_palette.set_bad('k')
for i, image in enumerate(images):
if image != 'wavelets' and image != 'seds':
if i == 0:
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ')'
else:
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ', sharex=ax1' + \
', sharey=ax1)'
exec(cmd)
if 'PSF' in titles[i]:
im = image
else:
im = N.log10(image + low)
if 'Islands' in titles[i]:
island_offsets_x = []
island_offsets_y = []
border_color = []
ax = pl.gca()
for iisl, isl in enumerate(img.islands):
xb, yb = isl.border
if hasattr(isl, '_pi'):
for c in range(len(xb)):
border_color.append('r')
else:
for c in range(len(xb)):
border_color.append('#afeeee')
island_offsets_x += xb.tolist()
island_offsets_y += yb.tolist()
marker = ax.text(N.max(xb)+2, N.max(yb), str(isl.island_id),
color='#afeeee', clip_on=True)
marker.set_visible(not marker.get_visible())
markers.append(marker)
# draw the gaussians with one colour per source or island
# (if gaul2srl was not run)
if hasattr(img, 'nsrc'):
nsrc = len(isl.sources)
for isrc in range(nsrc):
col = colours[int(isrc % 6)]
style = styles[int(isrc/6 % 3)]
src = isl.sources[isrc]
for g in src.gaussians:
if hasattr(g, 'valid'):
valid = g.valid
else:
valid = True
if g.jlevel == 0 and valid and g.gaus_num >= 0:
gidx = g.gaus_num
e = Ellipse(xy=g.centre_pix, width=g.size_pix[0],
height=g.size_pix[1], angle=g.size_pix[2]+90.0)
ax.add_artist(e)
e.set_picker(3)
e.set_clip_box(ax.bbox)
e.set_facecolor(col)
e.set_alpha(0.5)
e.gaus_id = gidx
e.src_id = src.source_id
e.jlevel = g.jlevel
e.isl_id = g.island_id
e.tflux = g.total_flux
e.pflux = g.peak_flux
e.centre_sky = g.centre_sky
if len(img.islands) > 0:
island_offsets = list(zip(N.array(island_offsets_x), N.array(island_offsets_y)))
isl_borders = collections.AsteriskPolygonCollection(4, offsets=island_offsets, color=border_color,
transOffset=ax.transData, sizes=(10.0,))
ax.add_collection(isl_borders)
if hasattr(img, 'gaussians'):
for atrg in img.gaussians:
if atrg.jlevel > 0 and atrg.gaus_num >= 0:
col = 'r'
style = '-'
gidx = atrg.gaus_num
e = Ellipse(xy=atrg.centre_pix, width=atrg.size_pix[0], height=atrg.size_pix[1], angle=atrg.size_pix[2]+90.0)
ax.add_artist(e)
e.set_picker(3)
e.set_clip_box(ax.bbox)
e.set_edgecolor(col)
e.set_facecolor('none')
e.set_alpha(0.8)
e.gaus_id = gidx
e.src_id = atrg.source_id
e.jlevel = atrg.jlevel
e.isl_id = atrg.island_id
e.tflux = atrg.total_flux
e.pflux = atrg.peak_flux
e.centre_sky = atrg.centre_sky
if 'Flagged' in titles[i]:
for iisl, isl in enumerate(img.islands):
ax = pl.gca()
style = '-'
for ig, g in enumerate(isl.fgaul):
col = colours[ig % 6]
ellx, elly = func.drawellipse(g)
gline, = ax.plot(ellx, elly, color = col,
linestyle = style, picker=3)
gline.flag = g.flag
if 'PSF' in titles[i]:
cmd = 'ax' + str(i+1) + ".imshow(N.transpose(im), origin=origin, "\
"interpolation='nearest', cmap=gray_palette)"
else:
cmd = 'ax' + str(i+1) + ".imshow(N.transpose(im), origin=origin, "\
"interpolation='nearest',vmin=vmin, vmax=vmax, cmap=gray_palette)"
exec(cmd)
cmd = 'ax' + str(i+1) + '.format_coord = format_coord_'+names[i]
exec(cmd)
pl.title(titles[i])
elif image == 'seds':
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ')'
exec(cmd)
ax = pl.gca()
plot_sed(sed_src, ax)
elif image == 'wavelets':
if i == index_first_waveplot:
for j in range(len(j_with_gaus)):
cmd = 'ax' + str(j+i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(j+i+1) + ', sharex=ax1, '+\
'sharey=ax1)'
exec(cmd)
pl.title('Pyramidal Sources for\nWavelet Scale J = ' +
str(j_with_gaus[j]))
for pyr in img.pyrsrcs:
for iisl, isl in enumerate(pyr.islands):
jj = pyr.jlevels[iisl]
jindx = j_with_gaus.index(jj)
col = colours[pyr.pyr_id % 6]
ind = N.where(~isl.mask_active)
cmd = "ax" + str(jindx + index_first_waveplot + 1) + \
".plot(ind[0]+isl.origin[0], "\
"ind[1]+isl.origin[1], '.', color=col)"
exec(cmd)
fig.canvas.mpl_connect('key_press_event', on_press)
fig.canvas.mpl_connect('pick_event', on_pick)
pl.show()
pl.close('all')
def on_pick(event):
global images, srcid_cur, samp_client, samp_key, do_broadcast, samp_gaul_table_url, samp_srl_table_url
g = event.artist
if hasattr(g, 'gaus_id'):
gaus_id = g.gaus_id
src_id = g.src_id
isl_id = g.isl_id
tflux = g.tflux
pflux = g.pflux
wav_j = g.jlevel
if wav_j == 0:
print('Gaussian #' + str(gaus_id) + ' (in src #' + str(src_id) + \
', isl #' + str(isl_id) + '): F_tot = ' + str(round(tflux,4)) + \
' Jy, F_peak = ' + str(round(pflux,4)) + ' Jy/beam')
else:
print('Gaussian #' + str(gaus_id) + ' (in src #' + str(src_id) + \
', isl #' + str(isl_id) + ', wav #' + str(wav_j) + \
'): F_tot = ' + str(round(tflux,3)) + ' Jy, F_peak = ' + \
str(round(pflux,4)) + ' Jy/beam')
# Transmit src_id, gaus_id, and coordinates to SAMP Hub (if we are connected)
if do_broadcast and samp_key is not None:
if samp_gaul_table_url is not None:
func.send_highlight_row(samp_client, samp_key, samp_gaul_table_url, gaus_id)
if samp_srl_table_url is not None:
func.send_highlight_row(samp_client, samp_key, samp_srl_table_url, src_id)
func.send_coords(samp_client, samp_key, g.centre_sky)
# Change source SED
# First check that SEDs are being plotted and that the selected Gaussian
# is from the zeroth wavelet image
has_sed = False
if 'seds' in images and wav_j == 0:
has_sed = True
if not has_sed:
return
ax_indx = images.index('seds')
sed_src = get_src(src_list, src_id)
if srcid_cur == src_id:
return
srcid_cur = src_id
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] == 'seds':
plot_sed(sed_src, ax)
else:
print('Flagged Gaussian (flag = ' + str(g.flag) + '; use "' + \
"help 'flagging_opts'" + '" for flag meanings)')
pl.draw()
def on_press(event):
"""Handle keypresses"""
from .interface import raw_input_no_history
import numpy
global img_ch0, img_rms, img_mean, img_gaus_mod, img_shap_mod
global pixels_per_beam, vmin, vmax, vmin_cur, vmax_cur, img_pi
global ch0min, ch0max, low, fig, images, src_list, srcid_cur
global markers
if event.key == '0':
print('Resetting limits to defaults (%.4f -- %.4f Jy/beam)' \
% (pow(10, vmin)-low,
pow(10, vmax)-low))
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
im = ax.get_images()[0]
im.set_clim(vmin, vmax)
vmin_cur = vmin
vmax_cur = vmax
pl.draw()
if event.key == 'm':
# Modify scaling
# First check that there are images to modify
has_image = False
for im in images:
if isinstance(im, numpy.ndarray):
has_image = True
if not has_image:
return
minscl = 'a'
while isinstance(minscl, str):
try:
if minscl == '':
minscl = pow(10, vmin_cur) - low
break
minscl = float(minscl)
except ValueError:
prompt = "Enter min value (current = %.4f Jy/beam) : " % (pow(10, vmin_cur)-low,)
try:
minscl = raw_input_no_history(prompt)
except RuntimeError:
print('Sorry, unable to change scaling.')
return
minscl = N.log10(minscl + low)
maxscl = 'a'
while isinstance(maxscl, str):
try:
if maxscl == '':
maxscl = pow(10, vmax_cur) - low
break
maxscl = float(maxscl)
except ValueError:
prompt = "Enter max value (current = %.4f Jy/beam) : " % (pow(10, vmax_cur)-low,)
try:
maxscl = raw_input_no_history(prompt)
except RuntimeError:
print('Sorry, unable to change scaling.')
return
maxscl = N.log10(maxscl + low)
if maxscl <= minscl:
print('Max value must be greater than min value!')
return
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
im = ax.get_images()[0]
im.set_clim(minscl, maxscl)
vmin_cur = minscl
vmax_cur = maxscl
pl.draw()
if event.key == 'c':
# Change source SED
# First check that SEDs are being plotted
has_sed = False
if 'seds' in images:
has_sed = True
if not has_sed:
return
srcid = 'a'
while isinstance(srcid, str):
try:
if srcid == '':
srcid = srcid_cur
break
srcid = int(srcid)
except ValueError:
prompt = "Enter source ID (current = %i) : " % (srcid_cur,)
try:
srcid = raw_input_no_history(prompt)
except RuntimeError:
print('Sorry, unable to change source.')
return
ax_indx = images.index('seds')
sed_src = get_src(src_list, srcid)
if sed_src is None:
print('Source not found!')
return
srcid_cur = srcid
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] == 'seds':
plot_sed(sed_src, ax)
pl.draw()
if event.key == 'i':
# Print info about visible region
has_image = False
axes_list = fig.get_axes()
# Get limits of visible region
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
has_image = True
break
if not has_image:
return
if xmin < 0:
xmin = 0
if xmax > img_ch0.shape[0]:
xmax = img_ch0.shape[0]
if ymin < 0:
ymin = 0
if ymax > img_ch0.shape[1]:
ymax = img_ch0.shape[1]
flux = N.nansum(img_ch0[xmin:xmax, ymin:ymax])/pixels_per_beam
mask = N.isnan(img_ch0[xmin:xmax, ymin:ymax])
num_pix_unmasked = float(N.size(N.where(mask == False), 1))
mean_rms = N.nansum(img_rms[xmin:xmax, ymin:ymax])/num_pix_unmasked
mean_map_flux = N.nansum(img_mean[xmin:xmax, ymin:ymax])/pixels_per_beam
if img_gaus_mod is None:
gaus_mod_flux = 0.0
else:
gaus_mod_flux = N.nansum(img_gaus_mod[xmin:xmax, ymin:ymax])/pixels_per_beam
print('Visible region (%i:%i, %i:%i) :' % (xmin, xmax, ymin, ymax))
print(' ch0 flux density from sum of pixels ... : %f Jy'\
% (flux,))
print(' Background mean map flux density ...... : %f Jy'\
% (mean_map_flux,))
print(' Gaussian model flux density ........... : %f Jy'\
% (gaus_mod_flux,))
if img_shap_mod is not None:
shap_mod_flux = N.nansum(img_shap_mod[xmin:xmax, ymin:ymax])/pixels_per_beam
print(' Shapelet model flux density ........... : %f Jy'\
% (shap_mod_flux,))
print(' Mean rms (from rms map) ............... : %f Jy/beam'\
% (mean_rms,))
if event.key == 'n':
# Show/Hide island numbers
if markers:
for marker in markers:
marker.set_visible(not marker.get_visible())
pl.draw()
# The following functions add ra, dec and flux density to the
# coordinates in the lower-right-hand corner of the figure window.
# Since each axis needs its own function (to return its particular
# flux), we need a separate function for each subplot.
def format_coord_ch0(x, y):
"""Custom coordinate format for ch0 image"""
global img_ch0
im = img_ch0
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_ch0_pi(x, y):
"""Custom coordinate format for ch0 image"""
global img_pi
im = img_pi
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_rms(x, y):
"""Custom coordinate format for rms image"""
global img_rms
im = img_rms
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_mean(x, y):
"""Custom coordinate format for mean image"""
global img_mean
im = img_mean
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_gaus_mod(x, y):
"""Custom coordinate format for Gaussian model image"""
global img_gaus_mod
im = img_gaus_mod
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_shap_mod(x, y):
"""Custom coordinate format for shapelet model image"""
global img_shap_mod
im = img_shap_mod
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_gaus_resid(x, y):
"""Custom coordinate format for Gaussian residual image"""
global img_gaus_resid
im = img_gaus_resid
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_shap_resid(x, y):
"""Custom coordinate format for shapelet residual image"""
global img_shap_resid
im = img_shap_resid
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_psf_maj(x, y):
"""Custom coordinate format for PSF major image"""
global img_psf_maj
im = img_psf_maj
coord_str = make_coord_str(x, y, im, unit='arcsec')
return coord_str
def format_coord_psf_min(x, y):
"""Custom coordinate format for PSF minor image"""
global img_psf_min
im = img_psf_min
coord_str = make_coord_str(x, y, im, unit='arcsec')
return coord_str
def format_coord_psf_pa(x, y):
"""Custom coordinate format for PSF pos. ang. image"""
global img_psf_pa
im = img_psf_pa
coord_str = make_coord_str(x, y, im, unit='degrees')
return coord_str
def xy_to_radec_str(x, y):
"""Converts x, y in image coords to a sexigesimal string"""
from .output import ra2hhmmss, dec2ddmmss
global pix2sky
ra, dec = pix2sky([x, y])
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.1f" % (ra[2])).zfill(3)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+':'+str(dec[1]).zfill(2)+':'+str("%.1f" % (dec[2])).zfill(3)
return sra, sdec
def make_coord_str(x, y, im, unit='Jy/beam'):
"""Makes the x, y, ra, dec, flux string"""
rastr, decstr = xy_to_radec_str(x, y)
col = int(x + 0.5)
row = int(y + 0.5)
numcols, numrows = im.shape
if col >= 0 and col < numcols\
and row >= 0 and row < numrows:
z = im[col, row]
return 'x=%1.1f, y=%1.1f, RA=%s, Dec=%s, F=%+1.4f %s' % (x, y, rastr, decstr, z, unit)
else:
return 'x=%1.1f, y=%1.1f' % (x, y)
def plot_sed(src, ax):
"""Plots the SED for source 'src' to axis 'ax'"""
global sky2pix
global fig
ax.cla()
norm = src.spec_norm
spin = src.spec_indx
espin = src.e_spec_indx
y = N.array(src.specin_flux)
ey = N.array(src.specin_fluxE)
x = N.array(src.specin_freq)
ax.errorbar(N.log10(x/1e6), N.log10(y), yerr=ey/y, fmt='bo')
ax.plot(N.log10(x/1e6), N.log10(norm)+N.log10(x/src.specin_freq0)*spin,
'-g', label="alpha = %.2f" % (spin,))
pos = sky2pix(src.posn_sky_centroid)
xpos = int(pos[0])
ypos = int(pos[1])
pl.title('SED of source #'+str(src.source_id)+'\n'
+'(x = '+str(xpos)+', y = '+str(ypos)+')')
pl.xlabel('log Frequency (MHz)')
pl.ylabel('log Flux Density (Jy)')
pl.legend()
def get_src(src_list, srcid):
"""Returns the source for srcid or None if not found"""
for src in src_list:
if src.source_id == srcid:
return src
return None
| 29,396 | 38.672065 | 137 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/tc.py
|
"""Defines some basic facilities for handling typed values.
It's quite basic and limited implementation tailored specifically for
use in the PyBDSM user-options and derived properties. For a user
option, one can define a group that is used when listing the options to
the screen. For a property (e.g., flux density), one can define the
column name to be used on output and the associated units.
For a much more generic and capable implementation I can recommend
to look at Enthought Traits package:
http://code.enthought.com/projects/traits
Defined are:
- a number tc-handlers which allow to type-check and/or cast
values to the specific type (tcCType, tcEnum, tcTuple,
tcOption, tcInstance, tcList, tcAny). These aren't really
inteded for use by end-user.
- class TC, which implements a concept of type-checked property
with default value.
- a number of wrappers around TC to simplify it's usage (Int,
Float, Bool, String, Tuple, Enum, Option, NArray, Instance,
tInstance, List, Any)
Usage:
For the most needs it's enough to use wrapper-interface.
One important remark -- class containing tc-variables should be
new-style class, thus you should explicitly inherit from 'object'
for Python < 2.6.
Example:
from tc import Int, Float, Bool, String, Tuple, Enum, \\
Option, NArray, Instance, Any, TCInit
class tst(object):
intval = Int(doc="Integer value")
boolval = Bool(True, "Some boolean flag")
op_type = Enum("op1", "op2", doc="Some enumerated value")
def __init__(self):
TCInit(self) ### this is optional
v = tst()
v.intval = 1 # OK
v.intval = "33" # OK, casted to 33
v.intval = "failure" # FAILS
v.op_type= "op2" # OK
v.op_type= "op3" # FAILS
"""
try:
import exceptions
except ImportError:
import builtins as exceptions
import types
_sequence_types = (list, tuple)
_class_types = (type, type)
_basic_types = (bool, int, int,
float, complex,
bytes, str)
############################################################
## Wrappers around TC to simplify it's usage for end-users
############################################################
def Int(value=0, doc=None, group=None, colname=None, units=None):
"""Create tc-value of type int"""
return TC(value, tcCType(int), doc, group, colname, units)
def Float(value=0., doc=None, group=None, colname=None, units=None):
"""Create tc-value of type float"""
return TC(value, tcCType(float), doc, group, colname, units)
def Bool(value=False, doc=None, group=None):
"""Create tc-value of type bool"""
return TC(value, tcCType(bool), doc, group)
def String(value='', doc=None, group=None, colname=None, units=None):
"""Create tc-value of type string"""
return TC(value, tcCType(str), doc, group, colname, units)
def Tuple(*values, **kws):
"""Create tc-value of type tuple.
Parameters:
values: zero or more arguments
kws: keyword arguments. Currently only 'doc' and 'group'
are recognized
If the first item of values is a tuple, it's used as the
default value. The remaining arguments are used to build
type constraints and should be TC values.
Examples:
Tuple((1,2,3)) # tuple of 3 integers, default = (1,2,3)
Tuple(Int(3), Float(2)) # tuple of int&float, default = (3, 2.0)
Tuple((1,2), Int(3), Float(2)) # tuple of int+float, default = (1, 2.0)
"""
doc = kws.pop('doc', None)
group = kws.pop('group', None)
if len(values) == 0:
return TC((), tcTuple(), doc, group)
default = None
if isinstance(values[0], tuple):
default, values = values[0], values[1:]
if default is None:
default = tuple([x._default for x in values])
if len(values) == 0:
values = [tc_from(x) for x in default]
return TC(default, tcTuple(*values), doc, group)
def Enum(*values, **kws):
"""Create tc-value of type enum.
Parameters:
values: list or tuple of valid values
kws: keyword arguments. Currently only 'doc' and 'group'
are recognized
Default value is taken to be values[0].
Examples:
Enum(3, [1,2,3]) # enum of 1,2,3 with default of 3
Enum(1,2,3) # enum of 1,2,3 with default of 1
"""
default = values[0]
if (len(values) == 2) and (type(values[1]) in _sequence_types):
values = values[1]
doc = kws.pop('doc', None)
group = kws.pop('group', None)
return TC(default, tcEnum(*values), doc, group)
def Option(value, type=None, doc=None, group=None):
"""Creates optional tc-value.
Parameters:
value, type: default value and type
doc: doc-string for the value
group: group designation for the value
"""
if type is None:
type = tc_from(value)
if isinstance(value, TC):
value = value._default
return TC(value, tcOption(type), doc, group)
def NArray(value=None, or_none=True, doc=None, group=None, colname=None,
units=None):
"""Creates tc-value which holds Numpy arrays
Parameters:
value: default value
or_none: if 'None' is valid value
group: group designation for the value
colname: name of column if quantity is to be output
units: units if quantity is to be output
"""
try:
import numpy as N
except:
raise tcError("Can't create tc-value of type NArray " \
"without access to numpy module")
return Instance(value, N.ndarray, or_none, doc, group, colname, units)
def Instance(value, type=None, or_none=True, doc=None, group=None,
colname=None, units=None):
"""Creates tc-value which holds instances of specific class.
Parameters:
value, type: default value and type
or_none: flag if 'None' is valid value for this variable
group: group designation for the value
colname: name of column if quantity is to be output
units: units if quantity is to be output
Examples:
Instance(instance, class)
Instance(instance)
Instance(class)
"""
if type is None:
if isinstance(value, _class_types):
value, type = None, value
else:
type = value.__class__
return TC(value, tcInstance(type, or_none), doc, group, colname, units)
def tInstance(type, or_none=False):
"""Create tc-handler for values which are instances of
the specific class.
This function is useless on it's own, and should be
used to create Instane-constrain for compound tc-values.
It's especially usefull for classes which have non-trivial
constructors.
Parameters:
type: target type/class
or_none: flag if 'None' is valid value for this variable
Example: we want to define tc-variable holding a list of objects
List(Instance(slice, or_none=False) ## FAILS, no default value
List(Instance(slice)) ## works, but list MAY contain None's
List(tInstance(slice)) ## GOOD
"""
if not isinstance(type, _class_types):
type = type.__class__
return tcInstance(type, or_none)
def List(value, type=None, doc=None, group=None, colname=None, units=None):
"""Creates tc-value which represents a list, where each element
obeys specific type-constrains.
Parameters:
doc: docstring for the object
value, type: default value and type
group: parameter group to which the option belongs
colname: name of column if quantity is to be output
units: units if quantity is to be output
Examples:
List(Int()) # list of integers, default value is []
List([1,2], Int()) # list of integers, default value is [1,2]
Just one more warning -- List always has default value
([] in the simples case), and this default value is shared
between the instances, so be carefull to not modify it.
Counter-example for it:
class tst(object):
l = List(Int())
x1 = tst()
x2 = tst() # both instances share default value
x1.l.append(1)
print x2.l # this will print [1]
x1.l = [2]
print x2.l # still [1], as x1 has it's own local value now
"""
if type is None:
value, type = [], tc_from(value)
return TC(value, tcList(type), doc, group, colname, units)
def Any(value=None, doc=None, group=None):
"""Creates tc-value of arbitrary type
(e.g. no type-checking is done)
"""
return TC(value, tcAny(), doc, group)
def TCInit(obj):
"""Initialize tc-variables in the new instance"""
TC.set_property_names(obj.__class__)
obj._tc_values = {}
############################################################
## Exception type
############################################################
class tcError(exceptions.Exception):
"""Custom exception type to simplify exception handling"""
pass
############################################################
## TC -- type-checked variable
############################################################
class TC(object):
"""TC is an implementation of the typed-checked value.
The primary usage pattern is via class attributes:
class Test(object): ### MUST be new-style object
value1 = Int(3)
value2 = Tuple(Int(5), Option(Any()))
test = Test()
print test.value1
test.value2 = (3, None)
An important restriction -- it might only be used with
new-style objects (e.g. objects derived from 'object'
or 'type'. And the attribute should be defined in the
class of the object.
"""
def __init__(self, value, _type=None, doc=None, group=None, colname=None,
units=None):
"""Create typed-checked object.
Parameters:
value: default value
_type: type specification (instance of tcHandler) or None
doc: docstring for the object
group: parameter group to which the option belongs
colname: name of column if quantity is to be output
units: units if quantity is to be output
"""
if _type is not None:
self._type = _type
else:
self._type = tc_from(value)
self._default = self._type.cast(value)
self._name = None # name is unknown atm
self._group = group
self._doc = doc
self._colname = colname
self._units = units
self.__doc__ = "default value is %s (%s)" % \
(str(self._default), self._type.info())
if doc is not None:
self.__doc__ += "\n" + doc
def __get__(self, instance, cls):
"""Get a value from instance (or return default value)"""
if instance is None:
return self
try:
return instance._tc_values[self]
except:
return self._default
def __set__(self, instance, value):
"""Set a value"""
try:
values = instance._tc_values
except:
values = instance._tc_values = {}
if not self._name:
self.set_property_names(instance.__class__)
values[self] = self._type.cast(value, self._name,
instance.__class__.__name__)
def __delete__(self, instance):
"""Revert value to default"""
try:
del instance._tc_values[self]
except:
pass
def cast(self, value, *args):
"""See tcHandler.cast"""
return self._type.cast(value, *args)
def info(self):
"""Return description of tc-value"""
return self.__doc__
def doc(self):
"""Return short description of tc-value"""
return self._doc
def group(self):
"""Return group designation of tc-value"""
return self._group
def colname(self):
"""Return column name designation of tc-value"""
return self._colname
def units(self):
"""Return units designation of tc-value"""
return self._units
@staticmethod
def set_property_names(klass):
"""Scan class definition and update _name for all
TC objects defined there"""
for k,v in klass.__dict__.items():
if isinstance(v, TC):
v._name = k
############################################################
## tcHandler and derived handlers for the specific
## types/values
############################################################
class tcHandler(object):
"""Base class for all tc-handlers"""
def cast(self, value, *args):
"""Check that provided value meets type requirements
or cast it to the specific type.
"""
self.error(strx(value), *args)
def is_valid(self, value):
"""Check if provided value can be safely casted to the
proper type"""
try:
self.cast(value)
return True
except:
return False
def info(self):
"""A description of a valid values"""
return "value of unknown type"
def error(self, value, *args):
if len(args) == 2 and args[0]:
error = "Failed to set property %s of class %s " \
"to a value of %s; expected %s." % \
(args[0], args[1], value, self.info())
else:
error = "A value of %s can't be casted to %s" % \
(value, self.info())
raise tcError(error, value, self.info(), *args)
############################################################
class tcAny(tcHandler):
"""Allows any values of any type"""
def cast(self, value, *args):
return value
def info(self):
return "any value"
############################################################
class tcCType(tcHandler):
"""Ensures that value has a specific python type
This handler implements so-called casting-approach, where
it will accept all values which can be converted to the
required type by the means of casting operation. For
example:
v = tcCType(int)
print v.cast(3) # casted to 3
print v.cast(3.3) # casted to 3
print v.cast("3") # casted to 3
"""
def __init__(self, _type):
"""Creates tcType handler.
Parameters:
_type: Python type object or a value of a reqired type
"""
if not isinstance(_type, type):
_type = type(_type)
self.type = _type
def cast(self, value, *args):
if type(value) is self.type:
return value
try:
return self.type(value)
except:
self.error("%s (%s)" % (str_type(value), reprx(value)),
*args)
def info(self):
return "a value of %s" % str_type(self.type)
############################################################
class tcEnum(tcHandler):
"""Ensures that a value is a member of a specified list of values"""
def __init__(self, *values):
"""Creates a tcEnum handler.
Parameters:
values: list or tuple of all legal values
Description:
The list of values can be provided as a list/tuple of values
or just specified in-line. So that ''tcEnum([1,2,3])'' and
''tcEnum(1,2,3)'' are equivalent.
"""
if len(values) == 1 and type(values[0]) in _sequence_types:
values = values[0]
self.values = values
def cast(self, value, *args):
if value in self.values:
return value
self.error(repr(value), *args)
def info(self):
res = "a value of %s" % \
" or ".join([repr(x) for x in self.values])
return res
############################################################
class tcTuple(tcHandler):
"""Ensures that a value is a tuple of specified length,
with elements that are of specified type
"""
def __init__(self, *args):
"""Creates a tcTuple handler.
Parameters:
args: list of tuple components
Description:
Each tuple component should be either a specific
tc-handler or a value which can be converted to it
(by the means of tc_from function)
"""
self.tcs = tuple([tc_from(x) for x in args])
def cast(self, value, *args):
try:
if type(value) in _sequence_types:
if len(value) == len(self.tcs):
res = []
for i, h in enumerate(self.tcs):
res.append(h.cast(value[i]))
return tuple(res)
except:
pass
self.error(reprx(value), *args)
def info(self):
res = "a tuple of the form: (%s)" % \
", ".join([x.info() for x in self.tcs])
return res
############################################################
class tcOption(tcHandler):
"""Implements an optional value: None or a value
restricted by another tcHandler"""
def __init__(self, _type):
"""Creates tcOption handler.
Parameters:
_type: tc-handle, Python type object or a value of
a reqired type
"""
self.type = tc_from(_type)
def cast(self, value, *args):
try:
if value is None:
return value
return self.type.cast(value)
except:
self.error("%s (%s)" % (str_type(value), reprx(value)),
*args)
def info(self):
return self.type.info() + " or None"
############################################################
class tcInstance(tcHandler):
"""Ensures that a value belongs to a specified python
class or type (or one of it's subclasses).
"""
def __init__(self, klass, or_none=True):
"""Creates tcInstance handler.
Parameters:
klass: Python class, type or an instance of python class
or_none: whether we should accept None as a valid value
(defaults to True)
"""
if not isinstance(klass, _class_types):
klass = klass.__class__
self.klass = klass
self.or_none = or_none
def cast(self, value, *args):
if (value is None) and self.or_none:
return value
if isinstance(value, self.klass):
return value
self.error(reprx(value), *args)
def info(self):
res = "an instance of " + str_type(self.klass)
if self.or_none:
res += " or None"
return res
############################################################
class tcList(tcHandler):
"""Ensures that a value is a list containing elements of
a specified kind. It also ensures that any change made
to the list does't violate the list type constrains.
"""
def __init__(self, kind):
"""Creates tcList handler.
Parameters:
kind: tc-handler constraining elements of the list
"""
self.type = tc_from(kind)
def cast(self, value, *args):
if isinstance(value, _sequence_types):
v = [self.type.cast(x, *args) for x in value]
return list(v)
self.error(reprx(value), *args)
def info(self):
return "a list where each element is " + self.type.info()
############################################################
def tc_from(v):
"""tc_from tries to guess an appropriate tc-handler for the
provided object.
The basic logic is a following:
- TC object results in it's internal type constrain
- for a instances and type-objects of the basic numerica
types we use tcCType handler
- a list of values results in tcEnum handler
- a tuple of values results in tcTuple handler
- a value of None results in tcAny handler
"""
if isinstance(v, TC):
return v._type
if isinstance(v, tcHandler):
return v
if v in _basic_types:
return tcCType(v)
if type(v) in _basic_types:
return tcCType(v)
if type(v) is list:
return tcEnum(v)
if type(v) is tuple:
return tcTuple(*v)
if v is None:
return tcAny()
error = "Can't create tc-handler for a value of %s (%s)" %\
(str_type(v), reprx(v))
raise tcError(error)
############################################################
def str_type(v):
"""Pretty-print type of v"""
if isinstance(v, _class_types):
return repr(v)[1:-1]
else:
return repr(type(v))[1:-1]
############################################################
def reprx(v):
"""Pretty-print value of v"""
if type(v) is types.InstanceType:
return v.__class__.__name__
else:
return repr(v)
| 20,582 | 29.092105 | 77 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/_version.py
|
"""Version module.
This module simply stores the version number, as well as a changelog.
"""
# Version number
__version__ = '1.11.0a1'
# Changelog
def changelog():
"""
PyBDSF Changelog.
-----------------------------------------------------------------------
2023/05/22 - Version 1.10.3
2023/05/08 - Fix build issue with Python 3.11 (#205)
2023/05/03 - Use cibuildwheel to build binary wheels (#203)
Build binary wheels for Linux and MacOS (Intel).
Drop support for Python 3.6.
2023/05/02 - Fix #198 (#199)
Use the new method call `canvas.manager.set_window_title`
2023/04/28 - Replace Travis CI with GitHub actions (#196)
2023/02/10 - Version 1.10.2
2023/02/10 - Fix issues with numpy versions >= 1.24 (#193)
2022/11/28 - Switch to `manylinux2014` for building binary wheels (#191)
2022/11/23 - Fix ImportError in setuptools (#190)
2022/10/31 - Add binary wheels for Python 3.10 (#186)
2022/10/14 - Fix various documentation issues (#185)
2022/10/11 - Add logfilename option (#181)
2022/10/05 - Use len() instead of numpy.alen() (#180)
2022/02/14 - Version 1.10.1: Fix Numpy API incompatibility issue
2022/02/09 - Version 1.10.0
2022/02/09 - Update some functions as required by scipy versions >= 1.8.0
(PR #172)
2022/02/09 - Fix build issues with Python 3.8, end support for Python < 3.6,
add support for Python 3.8 and 3.9, and make installation of the interactive
pybdsf shell optional (PR #169)
2022/02/09 - Improve handling of the beam in the spectral index module
(PR #165)
2021/05/05 - Improve handling of large, complex islands (PR #160)
2020/04/07 - Allow a file to be supplied for the ch0 image used in the
spectral index module (PR #127)
2019/12/05 - Version 1.9.2
2019/12/04 - Fix exception behaviour if spline order change does not work
2019/09/27 - Add check for frequency info in header
2019/09/25 - Version 1.9.1
2019/09/25 - Fix various minor bugs
2019/06/06 - Fix blank_limit check_low error (#100)
2019/05/09 - Fix various shapelet decomposition issues
2019/05/08 - Fix crash in Gaussian fitting (#96)
2019/03/25 - Version 1.9.0
2018/10/18 - Add support for Python 3
2018/10/18 - Fix various minor bugs
2018/10/12 - Version 1.8.15
2018/10/09 - Fix segfault in Gaussian fitting (#63)
2018/10/04 - Fix math domain error (#76)
2018/06/21 - Fix setup.py for boost versions > 1.63
2018/05/18 - Version 1.8.14
2018/05/18 - Fix an error on total flux density (#50)
2018/05/18 - Add the possibility to provide an external noise and mean maps (#43)
2018/05/18 - Append the image FITS header into catalog FITS header (#53)
2018/05/18 - Make PyBDSF compatible with newer boost libraries, specifically
those used in Ubuntu 18.04 (#55)
2017/11/17 - Version 1.8.13
2017/11/17 - Remove deprecated boolean operators
2017/09/01 - Version 1.8.12
2017/09/01 - Fix crash with tiny regions
2017/09/01 - Fix very low centroid peak fluxes
2017/09/01 - Fix compile error with numpy 1.13
2017/06/01 - Version 1.8.11
2017/06/01 - Fix for interactive shell problem
2017/05/31 - Version 1.8.10
2017/05/31 - Fixes for various installation and runtime issues on modern systems.
2017/03/23 - Version 1.8.9
2017/03/23 - Fix to bug that causes an error when grouping Gaussians
into sources
2017/03/17 - Version 1.8.8
2017/03/17 - Rename to PyBDSF, move to github, add setup.py installer
2017/02/28 - Fix to issues related to numpy >= 1.12 and astropy >= 1.3
2016/06/10 - Version 1.8.7
2016/06/10 - Fix to bug that caused incorrect output images when input
image was not square.
2016/01/20 - Version 1.8.6
2016/01/15 - Fix to bug that caused incorrect island mask when two
islands are very close together.
2015/12/07 - Fix to bug that caused crash when image is masked and
the src_ra_dec option is used.
2015/11/30 - Version 1.8.5
2015/11/25 - Fix to bug in export_image that resulted in incorrect
output image when both trim_box and pad_image were used.
2015/11/20 - Fix to bug in wavelet module related to merging of islands.
2015/11/20 - Fix to bug in polarization module related to numbering of
new islands.
2015/11/20 - Fix to bug in spectral index module related to rms map
calculation.
2015/11/20 - Added option to use much faster (but also much more memory
intensive) SciPy fftconvolve function instead of custom PyBDSM one.
The option (use_scipy_fft) defaults to True.
2015/11/20 - Increased number of digits for values in output text
catalogs
2015/08/06 - Version 1.8.4
2015/08/06 - Improved speed of wavelet module.
2015/08/06 - Added option to use PyFFTW in wavelet module if available.
2015/08/06 - Fix to IPython version check.
2015/08/06 - Fix to bug that caused a failure to write shapelet models
in FITS format.
2014/11/07 - Fix to bug that caused a crash when both atrous_do = True
and output_all = True. Fixed a bug that caused a crash on machines
with only one core.
2014/09/26 - Version 1.8.3
2014/09/26 - Fix to bug that caused a crash when using the wavelet
module and all Gaussians in an island were flagged.
2014/07/03 - Mask will now be expanded to match input image shape. Fix
to bug that caused image read failure when image lacks a Stokes axis.
2014/05/14 - Version 1.8.2
2014/05/15 - Fix to bug in CASA masks generated with export_image() that
caused cleaning to fail in CASA 4.2 and above.
2014/02/05 - Fix to bug that resulted in output file names being
converted to lower case inappropriately.
2014/01/14 - Version 1.8.1
2014/01/13 - Added option (bbs_patches = 'mask') to allow patches in
an output BBS sky model to be defined using a mask image.
2014/01/09 - Fix to bug that caused the incl_empty option to be
ignored when format='fits' in the write_catalog task.
2013/12/05 - Enabled output of images in CASA format in the export_image
task (img_format = 'casa'). Added an option to export_image task to
export an island-mask image, with ones where there is emission and
zeros elsewhere (image_type = 'island_mask'). Features in the island
mask may be optionally dilated by specifying the number of dilation
iterations with the mask_dilation parameter. Added an option to
write a CASA region file to the write_catalog task (format =
'casabox'). Added an option to write a CSV catalog to the
write_catalog task (format = 'csv').
2013/11/04 - Added error message when the rms is zero in some part of the
rms map.
2013/10/16 - Version 1.8.0
2013/10/16 - Improved wavelet fitting. Added option so that wavelet
fitting can be done to the sum of images on the remaining wavelet
scales, improving the signal for fitting (controlled with the
atrous_sum option). Added option so that user can choose whether to
include new islands found only in the wavelet images in the final
fit or not (controlled with the atrous_orig_isl option).
2013/10/10 - Fixed a bug that could lead to incomplete fitting of
some islands. Improved overall convergence of fits.
2013/10/10 - Version 1.7.7
2013/10/10 - Improved fitting of bright sources under certain
circumstances.
2013/09/27 - Version 1.7.6
2013/09/27 - Changed caching behavior to ensure that temporary files
are always deleted after they are no longer needed or on exit.
2013/09/05 - Renamed blank_zeros to blank_limit. The blank_limit
option now specifies a limit below which pixels are blanked.
2013/09/05 - Enabled SAGECAL sky-model output.
2013/09/02 - Version 1.7.5
2013/09/02 - Fix to bug that caused a crash when images with 2 or
3 axes were used. Improved rms and mean calculation (following the
implementation used in PySE, see http://dare.uva.nl/document/174052
for details). The threshold used to determine the clipped rms and
mean values is now determined internally by default (kappa_clip =
None).
2013/08/27 - Version 1.7.4
2013/08/29 - Fix to bug in show_fit() that caused error when
'i' is pressed in the plot window and shapelet decomposition
had not been done. Tweak to 'pybdsm' startup shell script to
avoid problems with the Mac OS X matplotlib backend on non-
framework Python installations (such as Anaconda Python).
2013/08/28 - Fix to bug in process_image() that could result in
wavelet Gaussians being excluded from model image under certain
conditions.
2013/08/27 - Version 1.7.3
2013/08/27 - Fix to bug in image reading that caused images to be
distorted.
2013/08/23 - Version 1.7.2
2013/08/23 - Improved handling of non-standard FITS CUNIT keywords.
Improved loading of FITS images when trim_box is specified.
2013/08/22 - Version 1.7.1
2013/08/21 - Fix to bug that caused cached images to be deleted when
rerunning an analysis. Fix to bug in show_fit() due to undefined
images. Fix to bug in process_image() that would result in unneeded
reprocessing.
2013/08/20 - Version 1.7.0
2013/08/19 - PyBDSM will now use Astropy if installed for FITS and WCS
modules.
2013/08/11 - Fix to avoid excessive memory usage in the wavelet module
(replaced scipy.signal.fftconvolve with a custom function).
2013/08/11 - Added option to use disk caching for internally derived
images (do_cache). Caching can reduce memory usage and is
therefore useful when processing large images.
2013/07/11 - Implemented a variable operation chain for process_image
(and img.process()) that allows unneeded steps to be skipped if
the image is being reprocessed.
2013/07/11 - Fixed a bug that could cause Gaussian fitting to hang
during iterative fitting of large islands.
2013/06/24 - Added option (fix_to_beam) to fix the size and position
angle of Gaussians to the restoring beam during fitting. Fix to
bug that caused the position angle used to initialize fitting to
be incorrect.
2013/03/22 - Version 1.6.1
2013/03/21 - Fix to bug in ds9 and kvis catalog files that resulted in
incorrect position angles. Fix to bug in position-dependent WCS
transformations that caused incorrect source parameters in output
catalogs. Added option to output uncorrected source parameters
to a BBS sky model file (correct_proj).
2013/03/14 - Removed sky transformations for flagged Gaussians, as
these could sometimes give math domain errors. Disabled aperture
flux measurement on wavelet images as it is not used/needed.
2013/02/25 - Version 1.6.0
2013/02/25 - Improved speed and accuracy of aperture flux
calculation.
2013/02/20 - Added option to use the curvature map method of
Hancock et al. (2012) for the initial estimation of Gaussian
parameters (ini_method = 'curvature') and for grouping of
Gaussians into sources (group_method = 'curvature').
2013/02/18 - Fix to bug in spectral index module that caused sources
with multiple Gaussians to be skipped. Minor adjustments to the
wavelet module to improve performance.
2013/02/08 - Implemented position-dependent WCS transformations.
2013/02/08 - Added option to fit to any arbitrary location in the
image within a given radius (src_ra_dec and src_radius_pix).
2013/02/04 - Fix to bug in wavelet module that caused crash when
no Gaussians were fit to the main image.
2013/01/30 - Fix to bug that resulted in incorrect numbering of
wavelet Gaussians. Added 'srl' output in ds9 format when using
output_all = True.
2013/01/28 - Fix to bug in source grouping algorithm. Improved fitting
when background mean is nonzero. Fix to allow images with GLAT and
GLON WCS coordinates. Fix to bug when equinox is taken from the
epoch keyword.
2012/12/19 - Version 1.5.1
2012/12/19 - Fix to bug in wavelet module that occurred when the
center of the wavelet Gaussian lies outside of the image. Fix
to re-enable srl output catalogs in ds9 region format. Fix to
bug that resulted in the output directory not always being
created. Added an option (aperture_posn), used when aperture
fluxes are desired, to specify whether to center the aperture
on the source centroid or the source peak.
2012/12/02 - Changes to reduce memory usage, particularly in the
wavelet module.
2012/11/30 - Fix to bypass bug in matplotlib when display variable
is not set.
2012/11/21 - Fixed bug that caused a crash when a detection image
was used. Fixed a bug with incorrect save directory when
plot_allgaus = True.
2012/10/29 - Version 1.5.0
2012/10/29 - Improved handling of WCS information so that a much
greater variety of WCS systems may be used. Fixed a bug in logging
that occurred when negative values were found in the rms map.
Updated installation instructions.
2012/10/12 - Version 1.4.5
2012/10/12 - Added option (incl_empty) to include empty islands (that
have no un-flagged Gaussians) in output catalogs. Any such empty
islands are given negative source IDs and positions given by the
location of the peak of the island.
2012/10/10 - Fixed a bug in Gaussian fitting that could cause a crash
when fitting fails. Fixed a bug in parallelization that could
cause a crash due to improper concatenation of result lists.
2012/10/09 - Version 1.4.4
2012/10/09 - Improved logging. Added a warning when one or more islands
are not fit properly (i.e., no valid, unflagged Gaussians were
fit). Fixed a bug in parallelization of Gaussian fitting that
could cause a crash due to improper mapping of island lists to
processes.
2012/10/05 - Added code to handle images with no unblanked pixels.
Improved fitting robustness.
2012/10/04 - Version 1.4.3
2012/10/04 - Fixed a bug in the mean map calculation that caused mean
maps with constant values (i.e., non-2D maps) to have values of
0.0 Jy/beam unless mean_map = 'const' was explicitly specified.
Fixed a bug in Gaussian fitting that could cause an island to be
skipped.
2012/10/02 - Fixed a bug in the PSF vary module that resulted in
incorrect PSF generators being used. Added an option to smooth
the resulting PSF images (psf_smooth). Parallelized the PSF
interpolation and smoothing steps. Improved PSF vary documentation.
2012/09/25 - Version 1.4.2
2012/09/25 - Dramatically reduced the time required to identify valid
wavelet islands.
2012/09/21 - Fixed bug that resulted in output FITS gaul tables being
improperly sorted. Fixed cosmetic bug in the statusbar that could
sometimes cause improper formatting. Added example of SAMP usage
to the documentation.
2012/09/20 - Version 1.4.1
2012/09/20 - Fixed a bug in the wavelet module that caused a crash when
no Gaussians were fit to the ch0 image.
2012/09/19 - Added option (broadcast) to show_fit task to send
coordinates and row highlight request to a SAMP hub when a Gaussian
is clicked. Fixed bug in aperture flux masking that sometimes caused
the mask to be the wrong shape.
2012/09/18 - Added option to send images and catalogs to a SAMP hub
(activated by setting outfile = 'SAMP' in the export_image and
write_catalog tasks).
2012/09/13 - Improved speed of plotting when images are large and in
mean/rms map generation. Fixed bug that caused residual image
statistics to fail when NaNs are present.
2012/09/11 - Version 1.4.0
2012/09/11 - Parallelized Gaussian fitting, shapelet decomposition,
validation of wavelet islands, and mean/rms map generation.
The number of cores to be used can be specified with the ncores
option (default is to use up to 8). Fixed bug in SED plotting in
the show_fit task.
2012/08/29 - Fixed incorrect terminal size in parameter listing. Added
logging of non-default input parameters and internally derived
parameters.
2012/08/22 - Version 1.3.2
2012/08/22 - Fixed a bug that caused the user-specified rms_box to be
ignored. Added an option to enable the Monte Carlo error estimation
for 'M'-type sources (the do_mc_errors option), which is now
disabled by default.
2012/07/11 - Version 1.3.1
2012/07/11 - Cleaned up unused options.
2012/07/10 - Fixed a bug that caused a segfault during Gaussian
fitting. Fixed a bug that caused a crash when a detection image
is used.
2012/07/05 - Fixed a bug that caused images written when output_all =
True to be transposed. Added frequency information to all output
images. Improved fitting robustness to prevent rare cases in
which no valid Gaussians could be fit to an island. Modified the
island-finding routine to handle NaNs properly.
2012/07/03 - Version 1.3
2012/07/03 - Fixed a bug in calculation of the positional errors of
Gaussians. If interactive=True and image is large (> 4096 pixels),
display is limited to 'ch0_islands' only; otherwise, show_fit()
is very slow. Tweaked show_fit() to better display a single image.
2012/07/02 - Adjusted rms_box algorithm to check for negative rms
values (due to interpolation with cubic spline). If negative
values are found, either the box size is increased or the
interpolation is done with order=1 (bilinear) instead.
2012/06/28 - Output now includes the residual image produced by
using only wavelet Gaussians (if any) when atrous_do=True and
output_all=True. Improved organization of files when
output_all=True. Added logging of simple statistics (mean,
std. dev, skew, and kurtosis) of the residual images.
2012/06/22 - Included image rotation (if any) in beam definition.
Rotation angle can vary across the image (defined by image WCS).
2012/06/19 - Changed exception handling to raise exceptions when
the interactive shell is not being used. Fixed bug that
caused a crash when using show_fit() when no islands were
found.
2012/06/15 - Added Sagecal output format for Gaussian catalogs.
2012/06/14 - Added check for newer versions of the PyBDSM
software tar.gz file available on ftp.strw.leidenuniv.nl.
2012/06/13 - Added total island flux (from sum of pixels) to
"gaul" and "srl" catalogs.
2012/06/06 - Version 1.2
2012/06/06 - Added option to calculate fluxes within a specified
aperture radius in pixels (set with the "aperture" option).
Aperture fluxes, if measured, are output in the 'srl' catalogs.
Changed code that determines terminal width to be more robust.
2012/05/07 - Removed dependencies on matplotlib -- if matplotlib is
not available, plotting is disabled. Corrected inconsistencies,
spelling mistakes, etc. in help text and documentation. Cleaned
up unneeded modules and files.
2012/05/02 - Added option to output flux densities for every channel
found by the spectral index module. Added option to spectral index
module to allow use of flux densities that do not meet the desired
SNR. Changed flag_maxsnr criterion to also flag if the peak flux
density per beam of the Gaussian exceeds the value at its center.
Removed incl_wavelet option.
2012/04/20 - Promoted the adaptive_rms_box parameter to the main options
listing and added the rms_box_bright option so that the user can
specify either (or both) of the rms_boxes. Fixed bug in wavelet
module so that invalid Gaussians (i.e., those that lie outside of
islands in the ch0 image) are not used when making the residual
images at each scale. Improved speed of Gaussian fitting to wavelet
images. Fixed bug that resulted in pixels found to be outside the
universe (check is enabled with the check_outsideuniv option) not
being masked properly.
2012/04/17 - Fixed bug in psf_vary module that resulted in PSF major and
minor axis maps in terms of sigma instead of FWHM. Added option
(psf_stype_only) to allow PSF fitting to non- S-type sources
(useful if sources are very distorted).
2012/04/12 - Fixed bug in adaptive scaling code that could cause
incorrect small-scale rms_box size. Added a parameter
(adaptive_thresh) that controls the minimum threshold for sources
used to set the small-scale rms_box size.
2012/04/02 - Implemented an adaptive scaling scheme for the rms_box
parameter that shrinks the box size near bright sources and expands
it far from them (enabled with the adaptive_rms_box option when
rms_box=None). This scheme generally results in improved rms and
mean maps when both strong artifacts and extended sources are
present. Fixed bug that prevented plotting of results during wavelet
decomposition when interactive = True.
2012/03/29 - Fixed bug in wavelet module that could cause incorrect
associations of Gaussians. Fixed bug in show_fit that displayed
incorrect model and residual images when wavelets were used.
2012/03/28 - Version 1.1
2012/03/28 - Fixed bug that caused mask to be ignored when determining
whether variations in rms and mean maps is significant. Fixed bug
that caused internally derived rms_box value to be ignored.
2012/03/27 - Modified calculation of rms_box parameter (when rms_box
option is None) to work better with fields composed mainly of point
sources when strong artifacts are present. Tweaked flagging on FWHM
to prevent over-flagging of Gaussians in small islands. Changed
wavelet module to flag Gaussians whose centers fall outside of
islands found in the original image and removed atrous_orig_isl
option (as redundant).
2012/03/26 - Modified fitting of large islands to adopt an iterative
fitting scheme that limits the number of Gaussians fit
simultaneously per iteration to 10. This change speeds up fitting of
large islands considerably. The options peak_fit and peak_maxsize
control whether iterative fitting is done. Added new Gaussian
flagging condition (flag_maxsize_fwhm) that flags Gaussians whose
sigma contour times factor extends beyond the island boundary. This
flag prevents fitting of Gaussians that extend far beyond the island
boundary.
2012/03/23 - Tweaked settings that affect fitting of Gaussians to
improve fitting in general.
2012/03/19 - Added output of shapelet parameters to FITS tables. Fixed
issue with resizing of sources in spectral index module.
2012/03/16 - Fixed bugs in polarisation module that caused incorrect
polarization fractions.
2012/03/13 - Improved plotting speed (by factor of ~ 4) in show_fit when
there is a large number of islands. Simplified the spectral index
module to make it more user friendly and stable. Added the option to
use a "detection" image for island detection (the detection_image
option); source properties are still measured from the main input
image.
2012/03/01 - Fixed a bug in the polarisation module that could result in
incorrect flux densities. Changed logging module to suppress output
of ANSI color codes to the log file.
2012/02/27 - Implemented fitting of Gaussians in polarisation module,
instead of simple summation of pixel values, to determine polarized
flux densities.
2012/02/17 - In scripts, process_image() will now accept a dictionary of
parameters as input.
2012/02/10 - Sources that appear only in Stokes Q or U (and hence not in
Stokes I) are now identified and included in the polarisation
module. This identification is done using the polarized intensity
(PI) image. show_fit() and export_image() were updated to allow
display and export of the PI image.
2012/02/06 - Fixed bug in island splitting code that could result in
duplicate Gaussians.
2012/02/02 - Improved polarisation module. Polarization quantities are
now calculated for Gaussians as well as sources.
2012/01/27 - Fixed bug in psf_vary module that affected tesselation.
Fixed many small typos in parameter descriptions.
2012/01/18 - Fixed a bug that resulted in incorrect coordinates when the
trim_box option was used with a CASA image. Added option
(blank_zeros) to blank pixels in the input image that are exactly
zero.
2012/01/13 - Fixed minor bugs in the interactive shell and updated
pybdsm.py to support IPython 0.12.
2011/12/21 - Fixed bug in gaul2srl module due to rare cases in which an
island has a negative rms value. Fixed a memory issue in which
memory was not released after using show_fit.
2011/11/28 - Added option to have minpix_isl estimated automatically as
1/3 of the beam area. This estimate should help exclude false
islands that are much smaller than the beam. This estimate is not
let to fall below 6 pixels.
2011/11/11 - Fixed bugs in source generation that would lead to masking
of all pixels for certain sources during moment analysis. Adjusted
calculation of jmax in wavelet module to use island sizes (instead
of image size) if opts.atrous_orig_isl is True.
2011/11/04 - Implemented new island fitting routine (enabled with the
peak_fit option) that can speed up fitting of large islands. Changed
plotting of Gaussians in show_fit to use Ellipse artists to improve
plotting speed.
2011/11/03 - Altered reading of images to correctly handle 4D cubes.
Fixed bug in readimage that affected filenames.
2011/10/26 - Extended psf_vary module to include fitting of stacked PSFs
with Gaussians, interpolation of the resulting parameters across the
image, and correction of the de- convolved source sizes using the
interpolated PSFs. Changed plotting of Gaussians in show_fit() to
use the FWHM instead of sigma. Modified error calculation of M
sources to be more robust when sources are small. Fixed spelling of
"gaussian" in bbs_patches option list.
2011/10/24 - Many small bug fixes to the psf_vary module. Fixed use of
input directory so that input files not in the current directory are
handled correctly.
2011/10/14 - Added residual rms and mean values to sources and source
list catalogs. These values can be compared to background rms and
mean values as a quick check of fit quality.
2011/10/13 - Modified deconvolution to allow 1-D Gaussians and sources.
Added FREQ0, EQUINOX, INIMAGE keywords to output fits catalogs.
Fixed bug in source position angles. Adjusted column names of output
catalogs slightly to be more descriptive.
2011/10/12 - Added errors to source properties (using a Monte Carlo
method for M sources). Fixed bug in output column names.
2011/10/11 - Tweaked autocomplete to support IPython shell commands
(e.g., "!more file.txt"). Fixed bug in gaul2srl that resulted in
some very nearby Gaussians being placed into different sources.
Added group_tol option so that user can adjust the tolerance of how
Gaussians are grouped into sources.
2011/10/05 - Added output of source lists. Changed name of write_gaul
method to write_catalog (more general).
2011/10/04 - Added option to force source grouping by island
(group_by_isl). Added saving of parameters to a PyBDSM save file to
Op_output.
2011/09/21 - Fixed issue with shapelet centering failing: it now falls
back to simple moment when this happens. Fixed issue with
plotresults when shapelets are fit.
2011/09/14 - Placed output column names and units in TC properties of
Gaussians. This allows easy standardization of the column names and
units.
2011/09/13 - Fixes to trim_box and resetting of Image objects in
interface.process(). Changed thr1 --> thr2 in fit_iter in
guasfit.py, as bright sources are often "overfit" when using thr1,
leading to large negative residuals. Restricted fitting of Gaussians
to wavelet images to be only in islands found in the original image
if opts.atrous_orig_isl is True.
2011/09/08 - Version 1.0
2011/09/08 - Versioning system changed to use _version.py.
"""
pass
| 29,263 | 39.985994 | 85 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/gausfit.py
|
"""Module gausfit.
This module does multi-gaussian fits for all detected islands.
At the moment fitting algorithm is quite simple -- we just add
gaussians one-by-one as long as there are pixels with emission
in the image, and do post-fitting flagging of the extracted
gaussians.
The fitting itself is implemented by the means of MGFunction
class and a number of fitter routines in _cbdsm module.
MGFunction class implements multi-gaussian function and
provides all functionality required by the specific fitters.
"""
from __future__ import print_function
from __future__ import absolute_import
from .image import *
from copy import deepcopy as cp
from . import mylogger
import sys
import time
from . import statusbar
from . import _cbdsm
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import scipy.ndimage as nd
from . import multi_proc as mp
import itertools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
class Op_gausfit(Op):
"""Fit a number of 2D gaussians to each island.
The results of the fitting are stored in the Island
structure itself as a list of Gaussian objects (gaul) and a
list of flagged gaussians (fgaul).
Prerequisites: module islands should be run first.
"""
def __call__(self, img):
from time import time
from . import functions as func
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Gausfit")
if len(img.islands) == 0:
img.gaussians = []
img.ngaus = 0
img.total_flux_gaus = 0.0
img.completed_Ops.append('gausfit')
return img
bar = statusbar.StatusBar('Fitting islands with Gaussians .......... : ',
0, img.nisl)
opts = img.opts
if opts.quiet == False and opts.verbose_fitting == False:
bar.start()
iter_ngmax = 10
min_maxsize = 50.0
maxsize = opts.splitisl_maxsize
min_peak_size = 30.0
peak_size = opts.peak_maxsize
if maxsize < min_maxsize:
maxsize = min_maxsize
opts.splitisl_maxsize = min_maxsize
if peak_size < min_peak_size:
peak_size = min_peak_size
opts.peak_maxsize = min_peak_size
# Set up multiproccessing. First create a simple copy of the Image
# object that contains the minimal data needed.
opts_dict = opts.to_dict()
img_simple = Image(opts_dict)
img_simple.pixel_beamarea = img.pixel_beamarea
img_simple.pixel_beam = img.pixel_beam
img_simple.thresh_pix = img.thresh_pix
img_simple.minpix_isl = img.minpix_isl
img_simple.clipped_mean = img.clipped_mean
img_simple.beam2pix = img.beam2pix
img_simple.beam = img.beam
# Next, define the weights to use when distributing islands among cores.
# The weight should scale with the processing time. At the moment
# we use the island area, but other parameters may be better.
weights = []
for isl in img.islands:
weights.append(isl.size_active)
# Now call the parallel mapping function. Returns a list of
# [gaul, fgaul] for each island. If ncores is 1, use the
# standard Python map function -- this helps with debugging in
# some circumstances
if opts.ncores==1:
gaus_list = map(func.eval_func_tuple,
zip(itertools.repeat(self.process_island),
img.islands, itertools.repeat(img_simple),
itertools.repeat(opts)))
else:
gaus_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_island),
img.islands, itertools.repeat(img_simple),
itertools.repeat(opts)), numcores=opts.ncores,
bar=bar, weights=weights)
gaus_list = list(gaus_list)
for isl in img.islands:
### now convert gaussians into Gaussian objects and store
idx = isl.island_id
gaul = gaus_list[idx][0]
fgaul = gaus_list[idx][1]
dgaul = []
if len(gaul) > 0:
gidx = gaul[-1][0] # save last index value for use with fgaul below
else:
gidx = 0
gaul = [Gaussian(img, par, idx, gidx)
for (gidx, par) in enumerate(gaul)]
if len(gaul) == 0:
# No good Gaussians were fit. In this case, make a dummy
# Gaussian located at the island center so
# that the source may still be included in output catalogs.
# These dummy Gaussians all have an ID of -1. They do not
# appear in any of the source or island Gaussian lists except
# the island dgaul list.
if opts.src_ra_dec is not None:
# Center the dummy Gaussian on the user-specified source position
posn_isl = (int(isl.shape[0]/2.0), int(isl.shape[1]/2.0))
posn_img = (int(isl.shape[0]/2.0 + isl.origin[0]), int(isl.shape[1]/2.0 + isl.origin[1]))
par = [isl.image[posn_isl], posn_img[0], posn_img[1], 0.0, 0.0, 0.0]
else:
# Center the dummy Gaussian on the maximum pixel
posn = N.unravel_index(N.argmax(isl.image*~isl.mask_active), isl.shape) + N.array(isl.origin)
par = [isl.max_value, posn[0], posn[1], 0.0, 0.0, 0.0]
dgaul = [Gaussian(img, par, idx, -1)]
# Now make the list of flagged Gaussians, if any
fgaul= [Gaussian(img, par, idx, gidx + gidx2 + 1, flag)
for (gidx2, (flag, par)) in enumerate(fgaul)]
isl.gaul = gaul
isl.fgaul= fgaul
isl.dgaul = dgaul
gaussian_list = [g for isl in img.islands for g in isl.gaul]
img.gaussians = gaussian_list
### put in the serial number of the gaussians for the whole image
n = 0
nn = 0
tot_flux = 0.0
if img.waveletimage:
# store the wavelet scale for each Gaussian
# (wavelet img's have a img.j attribute)
j = img.j
else:
j = 0
for isl in img.islands:
m = 0
for g in isl.gaul:
n += 1; m += 1
g.gaus_num = n - 1
tot_flux += g.total_flux
for dg in isl.dgaul:
nn -= 1
dg.gaus_num = nn
isl.ngaus = m
img.ngaus = n
img.total_flux_gaus = tot_flux
mylogger.userinfo(mylog, "Total number of Gaussians fit to image",
str(n))
if not img._pi and not img.waveletimage:
mylogger.userinfo(mylog, "Total flux density in model", '%.3f Jy' %
tot_flux)
# Check if model flux is very different from sum of flux in image
if img.ch0_sum_jy > 0 and not img._pi:
if img.total_flux_gaus/img.ch0_sum_jy < 0.5 or \
img.total_flux_gaus/img.ch0_sum_jy > 2.0:
mylog.warn('Total flux density in model is %0.2f times sum of pixels '\
'in input image. Large residuals may remain.' %
(img.total_flux_gaus/img.ch0_sum_jy,))
# Check if there are many Gaussians with deconvolved size of 0 in one
# axis but not in the other. Don't bother to do this for wavelet images.
fraction_1d = self.check_for_1d_gaussians(img)
if fraction_1d > 0.5 and img.beam is not None and img.waveletimage == False:
mylog.warn('After deconvolution, more than 50% of Gaussians are '\
"1-D. Unless you're fitting an extended source, "\
"beam may be incorrect.")
img.completed_Ops.append('gausfit')
return img
def process_island(self, isl, img, opts=None):
"""Processes a single island.
Returns a list of the best-fit Gaussians and flagged Gaussians.
"""
from . import functions as func
if opts is None:
opts = img.opts
iter_ngmax = 10
maxsize = opts.splitisl_maxsize
min_peak_size = 30.0
min_maxsize = 50.0
peak_size = opts.peak_maxsize
if maxsize < min_maxsize:
maxsize = min_maxsize
opts.splitisl_maxsize = min_maxsize
if peak_size < min_peak_size:
peak_size = min_peak_size
opts.peak_maxsize = min_peak_size
size = isl.size_active/img.pixel_beamarea()*2.0 # 2.0 roughly corrects for thresh_isl
if opts.verbose_fitting:
print("Fitting isl #", isl.island_id, '; # pix = ',N.sum(~isl.mask_active),'; size = ',size)
if size > maxsize:
tosplit = func.isl_tosplit(isl, opts)
if opts.split_isl and tosplit[0] > 0:
n_subisl, sub_labels = tosplit[1], tosplit[2]
gaul = []; fgaul = []
if opts.verbose_fitting:
print('SPLITTING ISLAND INTO ',n_subisl,' PARTS FOR ISLAND ',isl.island_id)
for i_sub in range(n_subisl):
islcp = isl.copy(img.pixel_beamarea())
islcp.mask_active = N.where(sub_labels == i_sub+1, False, True)
islcp.mask_noisy = N.where(sub_labels == i_sub+1, False, True)
size_subisl = (~islcp.mask_active).sum()/img.pixel_beamarea()*2.0
if opts.peak_fit and size_subisl > peak_size:
sgaul, sfgaul = self.fit_island_iteratively(img, islcp, iter_ngmax=iter_ngmax, opts=opts)
else:
sgaul, sfgaul = self.fit_island(islcp, opts, img)
gaul = gaul + sgaul; fgaul = fgaul + sfgaul
else:
isl.islmean = 0.0
if opts.peak_fit and size > peak_size:
gaul, fgaul = self.fit_island_iteratively(img, isl, iter_ngmax=iter_ngmax, opts=opts)
else:
gaul, fgaul = self.fit_island(isl, opts, img)
else:
if opts.peak_fit and size > peak_size:
gaul, fgaul = self.fit_island_iteratively(img, isl, iter_ngmax=iter_ngmax, opts=opts)
else:
gaul, fgaul = self.fit_island(isl, opts, img)
# Return list of Gaussians
return [gaul, fgaul]
def fit_island(self, isl, opts, img, ngmax=None, ffimg=None, ini_gausfit=None):
"""Fit island with a set of 2D gaussians.
Parameters:
isl: island
opts: Opts structure of the image
beam: beam parameters which are used as an initial guess for
gaussian shape
Returns:
Function returns 2 lists with parameters of good and flagged
gaussians. Gaussian parameters are updated to be image-relative.
Note: "fitok" indicates whether fit converged
and one or more flagged Gaussians indicate
that significant residuals remain (peak > thr).
"""
from ._cbdsm import MGFunction
from . import functions as func
from .const import fwsig
verbose = opts.verbose_fitting
if verbose:
print('Entering fit_island in verbose mode')
if ffimg is None:
fit_image = isl.image-isl.islmean
else:
fit_image = isl.image-isl.islmean-ffimg
fcn = MGFunction(fit_image, isl.mask_active, 1)
# For fitting, use img.beam instead of img.pixel_beam, as we want
# to pick up the wavelet beam (img.pixel_beam is not changed for
# wavelet images, but img.beam is)
beam = N.array(img.beam2pix(img.beam))
beam = (beam[0]/fwsig, beam[1]/fwsig, beam[2]+90.0) # change angle from +y-axis to +x-axis and FWHM to sigma
if abs(beam[0]/beam[1]) < 1.1:
beam = (1.1*beam[0], beam[1], beam[2])
thr1 = isl.mean + opts.thresh_isl*isl.rms
thr2 = isl.mean + img.thresh_pix*isl.rms
thr0 = thr1
g3_only = opts.fix_to_beam
peak = fcn.find_peak()[0]
dof = isl.size_active
shape = isl.shape
isl_image = isl.image - isl.islmean
size = isl.size_active/img.pixel_beamarea()*2.0
gaul = []
iter = 0
ng1 = 0
if ini_gausfit is None:
ini_gausfit = opts.ini_gausfit
if ini_gausfit not in ['default', 'simple', 'nobeam']:
ini_gausfit = 'default'
if ini_gausfit == 'simple' and ngmax is None:
ngmax = 25
if ini_gausfit == 'default' or opts.fix_to_beam:
gaul, ng1, ngmax = self.inigaus_fbdsm(isl, thr0, beam, img)
if len(gaul)>25:
ini_gausfit = 'simple'
gaul=[]
ng1=0
ngmax=25
if ini_gausfit == 'nobeam' and not opts.fix_to_beam:
gaul = self.inigaus_nobeam(isl, thr0, beam, img)
ng1 = len(gaul); ngmax = ng1+2
if verbose:
print('Initializing, ini_gausfit is',ini_gausfit,'gaul =',gaul,'ngmax =',ngmax)
while iter < 5:
iter += 1
if verbose: print('In Gaussian flag loop, iter =',iter)
fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, ini_gausfit, ngmax, verbose, g3_only)
if verbose: print('Calling flag_gaussians')
gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
if verbose: print('Leaving flag_gaussians')
ng1 = len(gaul)
if fitok and len(fgaul) == 0:
break
if (not fitok or len(gaul) == 0) and ini_gausfit != 'simple':
if verbose: print('Using simple method instead')
# If fits using default or nobeam methods did not work,
# try using simple instead
gaul = []
iter = 0
ng1 = 0
ngmax = 25
while iter < 5:
iter += 1
fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
ng1 = len(gaul)
if fitok and len(fgaul) == 0:
break
sm_isl = nd.binary_dilation(isl.mask_active)
if (not fitok or len(gaul) == 0) and N.sum(~sm_isl) >= img.minpix_isl:
if verbose: print('Fit still not OK, shrinking')
# If fitting still fails, shrink the island a little and try again
fcn = MGFunction(fit_image, nd.binary_dilation(isl.mask_active), 1)
gaul = []
iter = 0
ng1 = 0
ngmax = 25
while iter < 5:
iter += 1
fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
ng1 = len(gaul)
if fitok and len(fgaul) == 0:
break
lg_isl = nd.binary_erosion(isl.mask_active)
if (not fitok or len(gaul) == 0) and N.sum(~lg_isl) >= img.minpix_isl:
if verbose: print('Fit still not OK, expanding')
# If fitting still fails, expand the island a little and try again
fcn = MGFunction(fit_image, nd.binary_erosion(isl.mask_active), 1)
gaul = []
iter = 0
ng1 = 0
ngmax = 25
while iter < 5:
iter += 1
fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
ng1 = len(gaul)
if fitok and len(fgaul) == 0:
break
if not fitok or len(gaul) == 0:
# If all else fails, try to use moment analysis
if verbose: print('All else has failed')
inisl = N.where(~isl.mask_active)
mask_id = N.zeros(isl.image.shape, dtype=N.int32) - 1
mask_id[inisl] = isl.island_id
try:
pixel_beamarea = img.pixel_beamarea()
mompara = func.momanalmask_gaus(fit_image, mask_id, isl.island_id, pixel_beamarea, True)
mompara[5] += 90.0
if not N.isnan(mompara[1]) and not N.isnan(mompara[2]):
x1 = int(N.floor(mompara[1]))
y1 = int(N.floor(mompara[2]))
xind = slice(x1, x1+2, 1); yind = slice(y1, y1+2, 1)
t=(mompara[1]-x1)/(x1+1-x1)
u=(mompara[2]-y1)/(y1+1-y1)
s_peak=(1.0-t)*(1.0-u)*fit_image[x1,y1]+t*(1.0-u)*fit_image[x1+1,y1]+ \
t*u*fit_image[x1+1,y1+1]+(1.0-t)*u*fit_image[x1,y1+1]
mompara[0] = s_peak
par = [mompara.tolist()]
par[3] /= fwsig
par[4] /= fwsig
gaul, fgaul = self.flag_gaussians(par, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
except:
pass
### return whatever we got
if verbose: print('Preparing to return')
isl.mg_fcn = fcn
gaul = [self.fixup_gaussian(isl, g) for g in gaul]
fgaul = [(flag, self.fixup_gaussian(isl, g))
for flag, g in fgaul]
if verbose:
print('Number of good Gaussians: %i' % (len(gaul),))
print('Number of flagged Gaussians: %i' % (len(fgaul),))
return gaul, fgaul
def fit_island_iteratively(self, img, isl, iter_ngmax=5, opts=None):
"""Fits an island iteratively.
For large islands, which can require many Gaussians to fit well,
it is much faster to fit a small number of Gaussians simultaneously
and iterate. However, this does usually result in larger residuals.
"""
from . import functions as func
sgaul = []; sfgaul = []
gaul = []; fgaul = []
if opts is None:
opts = img.opts
thresh_isl = opts.thresh_isl
thresh_pix = opts.thresh_pix
thresh = opts.fittedimage_clip
thr = isl.mean + thresh_isl * isl.rms
rms = isl.rms
if opts.verbose_fitting:
print('Iteratively fitting island ', isl.island_id)
gaul = []; fgaul = []
ffimg_tot = N.zeros(isl.shape, dtype=N.float32)
peak_val = N.max(isl.image - isl.islmean)
count = 0
while peak_val >= thr:
count += 1
if opts.verbose_fitting:
print('Iteration %i' % count)
sgaul, sfgaul = self.fit_island(isl, opts, img, ffimg=ffimg_tot, ngmax=iter_ngmax, ini_gausfit='simple')
gaul = gaul + sgaul; fgaul = fgaul + sfgaul
# Calculate residual image
if len(sgaul) > 0:
for g in sgaul:
gcopy = g[:]
gcopy[1] -= isl.origin[0]
gcopy[2] -= isl.origin[1]
S1, S2, Th = func.corrected_size(gcopy[3:6])
gcopy[3] = S1
gcopy[4] = S2
gcopy[5] = Th
A, C1, C2, S1, S2, Th = gcopy
shape = isl.shape
b = find_bbox(thresh*isl.rms, gcopy)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(gcopy, x_ax, y_ax)
ffimg_tot[bbox] += ffimg
peak_val_prev = peak_val
peak_val = N.max(isl.image - isl.islmean - ffimg_tot)
if func.approx_equal(peak_val, peak_val_prev):
break
else:
break
if len(gaul) == 0:
if opts.verbose_fitting:
# Fitting iteratively did not work -- try normal fit
print('Iterative fitting failed for', isl.island_id)
gaul, fgaul = self.fit_island(isl, opts, img, ini_gausfit='default')
else:
if opts.verbose_fitting:
print('Iterative fitting succeeded for', isl.island_id)
return gaul, fgaul
def inigaus_fbdsm(self, isl, thr, beam, img):
""" initial guess for gaussians like in fbdsm """
from math import sqrt
from .const import fwsig
from . import functions as func
im = isl.image-isl.islmean
if img.opts.ini_method == 'curvature':
im_pos = -1.0 * func.make_curvature_map(isl.image-isl.islmean)
thr_pos = 0.0
else:
im_pos = im
thr_pos = thr
mask = isl.mask_active
av = img.clipped_mean
inipeak, iniposn, im1 = func.get_maxima(im, mask, thr_pos, isl.shape, beam, im_pos=im_pos)
if len(inipeak) == 0:
av, stdnew, maxv, maxp, minv, minp = func.arrstatmask(im, mask)
inipeak = [maxv]; iniposn = [maxp]
nmulsrc1 = len(iniposn)
domore = True
while domore:
domore = False
av, stdnew, maxv, maxp, minv, minp = func.arrstatmask(im1, mask)
if stdnew > isl.rms and maxv >= thr and maxv >= isl.mean+2.0*isl.rms:
domore = True
x1, y1 = N.array(iniposn).transpose()
dumr = N.sqrt((maxp[0]-x1)*(maxp[0]-x1)+(maxp[1]-y1)*(maxp[1]-y1))
distbm = dumr/sqrt(beam[0]*beam[1]*fwsig*fwsig)
if N.any((distbm < 0.5) + (dumr < 2.2)):
domore = False
if domore:
iniposn.append(N.array(maxp)); inipeak.append(maxv)
im1 = func.mclean(im1, maxp, beam)
inipeak = N.array(inipeak); iniposn = N.array(iniposn)
ind = list(N.argsort(inipeak)); ind.reverse()
inipeak = inipeak[ind]
iniposn = iniposn[ind]
gaul = []
for i in range(len(inipeak)):
g = (float(inipeak[i]), int(iniposn[i][0]), int(iniposn[i][1])) + beam
gaul.append(g)
return gaul, nmulsrc1, len(inipeak)
def inigaus_nobeam(self, isl, thr, beam, img):
""" To get initial guesses when the source sizes are very different
from the beam, and can also be elongated. Mainly in the context of
a-trous transform images. Need to arrive at a good guess of the sizes
and hence need to partition the image around the maxima first. Tried the
IFT watershed algo but with markers, it segments the island only around
the minima and not the whole island. Cant find a good weighting scheme
for tesselation either. Hence will try this :
Calculate number of maxima. If one, then take moment as initial
guess. If more than one, then moment of whole island is one of the
guesses if mom1 is within n pixels of one of the maxima. Else dont take
whole island moment. Instead, find minima on lines connecting all maxima
and use geometric mean of all minima of a peak as the size of that peak.
"""
from math import sqrt
from .const import fwsig
import scipy.ndimage as nd
from . import functions as func
im = isl.image-isl.islmean
if img.opts.ini_method == 'curvature':
im_pos = -1.0 * func.make_curvature_map(isl.image-isl.islmean)
thr_pos = 0.0
else:
im_pos = im
thr_pos = -1e9
mask = isl.mask_active
av = img.clipped_mean
inipeak, iniposn, im1 = func.get_maxima(im, mask, thr_pos, isl.shape, beam, im_pos=im_pos)
npeak = len(iniposn)
gaul = []
av, stdnew, maxv, maxp, minv, minp = func.arrstatmask(im, mask)
mom = func.momanalmask_gaus(isl.image-isl.islmean, isl.mask_active, 0, 1.0, True)
if npeak <= 1:
g = (float(maxv), int(round(mom[1])), int(round(mom[2])), mom[3]/fwsig, \
mom[4]/fwsig, mom[5])
gaul.append(g)
if npeak > 1: # markers start from 1=background, watershed starts from 1=background
watershed, markers = func.watershed(im, mask=isl.mask_active)
nshed = N.max(markers)-1 # excluding background
xm, ym = N.transpose([N.where(markers==i) for i in range(1,nshed+2)])[0]
coords = [c for c in N.transpose([xm,ym])[1:]]
alldists = [func.dist_2pt(c1, c2) for c1 in coords for c2 in coords if N.any(c1!=c2)] # has double
meandist = N.mean(alldists) # mean dist between all pairs of markers
# find at least some 'compact' sources
cscale = 3.0
while True:
compact = []; invmask = []
for ished in range(nshed):
shedmask = N.where(watershed==ished+2, False, True) + isl.mask_active # good unmasked pixels = 0
imm = nd.binary_dilation(~shedmask, N.ones((3,3), int))
xbad, ybad = N.where((imm==1)*(im>im[xm[ished+1], ym[ished+1]]))
imm[xbad, ybad] = 0
invmask.append(imm); x, y = N.where(imm); xcen, ycen = N.mean(x), N.mean(y) # good pixels are now = 1
dist = func.dist_2pt([xcen, ycen], [xm[ished+1], ym[ished+1]])
if dist < max(cscale, meandist/4.0):
compact.append(True) # if not compact, break source + diffuse
else:
compact.append(False)
if N.any(compact):
break
else:
# rescale to search for more compact sources
cscale*=1.5
if not N.all(compact):
o_avsize = []
ind = N.where(compact)[0]
for i in ind: o_avsize.append(N.sum(invmask[i]))
avsize = sqrt(N.mean(N.array(o_avsize)))
for i in range(len(compact)):
if not compact[i]: # make them all compact
newmask = N.zeros(imm.shape, bool)
newmask[max(0,int(xm[i+1]-avsize/2)):min(im.shape[0],int(xm[i+1]+avsize/2)), \
max(0,int(ym[i+1]-avsize/2)):min(im.shape[1],int(ym[i+1]+avsize/2))] = True
invmask[i] = invmask[i]*newmask
resid = N.zeros(im.shape, dtype=N.float32) # approx fit all compact ones
for i in range(nshed):
mask1 = ~invmask[i]
size = sqrt(N.sum(invmask))/fwsig
xf, yf = coords[i][0], coords[i][1]
p_ini = [im[xf, yf], xf, yf, size, size, 0.0]
x, y = N.indices(im.shape)
p, success = func.fit_gaus2d(im*invmask[i], p_ini, x, y)
resid = resid + func.gaus_2d(p, x, y)
gaul.append(p)
resid = im - resid
if not N.all(compact): # just add one gaussian to fit whole unmasked island
maxv = N.max(resid) # assuming resid has only diffuse emission. can be false
x, y = N.where(~isl.mask_active); xcen = N.mean(x); ycen = N.mean(y)
invm = ~isl.mask_active
#bound = invm - nd.grey_erosion(invm, footprint = N.ones((3,3), int)) # better to use bound for ellipse fitting
mom = func.momanalmask_gaus(invm, N.zeros(invm.shape, dtype=N.int16), 0, 1.0, True)
g = (maxv, xcen, ycen, mom[3]/fwsig, mom[4]/fwsig, mom[5]-90.)
gaul.append(g)
coords.append([xcen, ycen])
return gaul
def fit_iter(self, gaul, ng1, fcn, dof, beam, thr, iter, inifit, ngmax, verbose=1, g3_only=False):
"""One round of fitting
Parameters:
gaul : list of initial gaussians
fcn : MGFunction object
dof : maximal number of fitted parameters
beam : initial shape for newly added gaussians
[bmaj, bmin, bpa] in pixels
thr : peak threshold for adding more gaussians
verbose: whether to print fitting progress information
"""
from ._cbdsm import lmder_fit, dn2g_fit, dnsg_fit
if verbose: print('Greetings from fit_iter')
fit = lmder_fit
beam = list(beam)
### first drop-in initial gaussians
### no error-checking here, they MUST fit
fcn.reset()
for ig in range(ng1):
g = gaul[ig]
self.add_gaussian(fcn, g, dof, g3_only)
### do a round of fitting if any initials were provided
if verbose: print('About to call C++ wrapper')
fitok = True
if len(gaul) != 0:
fitok = fit(fcn, final=0, verbose=verbose)
if verbose: print('Returned from the fit')
### iteratively add gaussians while there are high peaks
### in the image and fitting converges
while fitok:
peak, coords = fcn.find_peak()
if peak < thr: ### no good peaks left
break
if len(fcn.parameters) < ngmax and iter == 1 and inifit == 'default' and len(gaul) >= ng1+1:
ng1 = ng1 + 1
g = gaul[ng1-1]
else:
if len(fcn.parameters) < ngmax:
g = [peak, coords[0], coords[1]] + beam
else:
break
fitok &= self.add_gaussian(fcn, g, dof, g3_only)
fitok &= fit(fcn, final=0, verbose=verbose)
### and one last fit with higher precision
### make sure we return False when fitok==False due to lack
### of free parameters
fitok &= fit(fcn, final=1, verbose=verbose)
return fitok
def add_gaussian(self, fcn, parameters, dof, g3_only=False):
"""Try adding one more gaussian to fcn object.
It's trying to reduce number of fitted parameters if
there is not enough DoF left.
Note: g1 fits amplitude only
g3 fits amplitude and position
g6 fits all parameters
Parameters:
fcn: MGFunction object
parameters: initial values for gaussian parameters
dof: total possible number of fitted parameters
"""
from ._cbdsm import Gtype
if g3_only:
gtype = (Gtype.g3 if fcn.fitted_parameters() + 3 <= dof else None)
else:
gtype = (Gtype.g3 if fcn.fitted_parameters() + 3 <= dof else None)
gtype = (Gtype.g6 if fcn.fitted_parameters() + 6 <= dof else gtype)
if gtype:
fcn.add_gaussian(gtype, parameters)
return True
else:
return False
def flag_gaussians(self, gaul, opts, beam, thr, peak, shape, isl_mask, isl_image, size):
"""Flag gaussians according to some rules.
Splits list of gaussian parameters in 2, where the first
one is a list of parameters for accepted gaussians, and
the second one is a list of pairs (flag, parameters) for
flagged gaussians.
Parameters:
gaul: input list of gaussians
opts: Opts object to extract flagging parameters from
beam: beam shape
thr: threshold for pixels with signal
peak: peak data value in the current island
shape: shape of the current island
isl_mask: island mask
"""
good = []
bad = []
for g in gaul:
flag = self._flag_gaussian(g, beam, thr, peak, shape, opts, isl_mask, isl_image, size)
if flag:
bad.append((flag, g))
else:
good.append(g)
return good, bad
def _flag_gaussian(self, g, beam, thr, peak, shape, opts, mask, image, size_bms):
"""The actual flagging routine. See above for description.
"""
from math import sqrt, sin, cos, log, pi
from .const import fwsig
from . import functions as func
import scipy.ndimage as nd
A, x1, x2, s1, s2, th = g
s1, s2 = map(abs, [s1, s2])
flag = 0
if N.any(N.isnan(g)) or s1 == 0.0 or s2 == 0.0:
return -1
if s1 < s2: # s1 etc are sigma
ss1 = s2
ss2 = s1
th1 = divmod(th+90.0, 180)[1]
else:
ss1 = s1
ss2 = s2
th1 = divmod(th, 180)[1]
th1 = th1/180.0*pi
if ss1 > 1e4 and ss2 > 1e4:
xbox = 1e9; ybox = 1e9
else:
xbox = 2.0*(abs(ss1*cos(th1)*cos(th1))+abs(ss2*ss2/ss1*sin(th1)*sin(th1)))/ \
sqrt(cos(th1)*cos(th1)+ss2*ss2/ss1/ss1*sin(th1)*sin(th1))
ybox = 2.0*(abs(ss1*sin(th1)*sin(th1))+abs(ss2*ss2/ss1*cos(th1)*cos(th1)))/ \
sqrt(sin(th1)*sin(th1)+ss2*ss2/ss1/ss1*cos(th1)*cos(th1))
### now check all conditions
border = opts.flag_bordersize
x1ok = True
x2ok = True
flagmax = False
if A < opts.flag_minsnr*thr: flag += 1
if A > opts.flag_maxsnr*peak:
flag += 2
flagmax = True
if x1 - border < 0 or x1 + border + 1 > shape[0]:
flag += 4
x1ok = False
if x2 - border < 0 or x2 + border + 1 > shape[1]:
flag += 8
x2ok = False
if x1ok and x2ok:
if not flagmax:
# Check image value at Gaussian center
im_val_at_cen = nd.map_coordinates(image, [N.array([x1]), N.array([x2])])
if A > opts.flag_maxsnr*im_val_at_cen:
flag += 2
borx1_1 = x1 - border
if borx1_1 < 0: borx1_1 = 0
borx1_2 = x1 + border + 1
if borx1_2 > shape[0]: borx1_2 = shape[0]
if N.any(mask[int(borx1_1):int(borx1_2), int(x2)]):
flag += 4
borx2_1 = x2 - border
if borx2_1 < 0: borx2_1 = 0
borx2_2 = x2 + border + 1
if borx2_2 > shape[1]: borx2_2 = shape[1]
if N.any(mask[int(x1), int(borx2_1):int(borx2_2)]):
flag += 8
if xbox > opts.flag_maxsize_isl*shape[0]: flag += 16
if ybox > opts.flag_maxsize_isl*shape[1]: flag += 32
if s1*s2 > opts.flag_maxsize_bm*beam[0]*beam[1]: flag += 64
if opts.flag_smallsrc:
if s1*s2 < opts.flag_minsize_bm*beam[0]*beam[1]: flag += 128
if not opts.flag_smallsrc:
if s1*s2 == 0.: flag += 128
if ss1/ss2 > 2.0:
# Only check for fairly elliptical Gaussians, as this condition
# is unreliable for more circular ones.
ellx, elly = func.drawellipse([A, x1, x2, s1*opts.flag_maxsize_fwhm,
s2*opts.flag_maxsize_fwhm, th])
pt1 = [N.min(ellx), elly[N.argmin(ellx)]]
pt2 = [ellx[N.argmax(elly)], N.max(elly)]
pt3 = [N.max(ellx), elly[N.argmax(ellx)]]
pt4 = [ellx[N.argmin(elly)], N.min(elly)]
extremes = [pt1, pt2, pt3, pt4]
for pt in extremes:
if N.any(N.isnan(pt)):
flag += 256
break
elif pt[0] < 0 or pt[0] >= shape[0] or pt[1] < 0 or pt[1] >= shape[1]:
flag += 256
break
elif mask[int(pt[0]),int(pt[1])]:
flag += 256
break
return flag
def fixup_gaussian(self, isl, gaussian):
"""Normalize parameters by adjusting them to the
proper image coordinates and ensuring that all of
the implicit conventions (such as bmaj >= bmin) are met.
"""
np = list(gaussian)
### update to the image coordinates
np[1] += isl.origin[0]
np[2] += isl.origin[1]
### shape values should be positive
np[3] = abs(np[3])
np[4] = abs(np[4])
### first extent is major
if np[3] < np[4]:
np[3:5] = np[4:2:-1]
np[5] += 90
### clip position angle
np[5] = divmod(np[5], 180)[1]
return np
def check_for_1d_gaussians(self, img):
"""Check for Gaussians with deconvolved sizes of 0 for one axis only."""
n1d = 0
ng = 0
for g in img.gaussians:
ng += 1
dsize = g.deconv_size_sky
if (dsize[0] == 0 and dsize[1] > 0) or (dsize[0] > 0 and dsize[1] == 0):
n1d += 1
if ng > 0:
return float(n1d)/float(ng)
else:
return 0.0
def find_bbox(thresh, g):
"""Calculate bounding box for gaussian.
This function calculates size of the box for evaluating
gaussian, so that value of gaussian is smaller than threshold
outside of the box.
Parameters:
thres: threshold
g: Gaussian object or list of paramters
"""
from math import ceil, sqrt, log
if isinstance(g, list):
A = g[0]
S = g[3]
else:
A = g.peak_flux
S = g.size_pix[0]
if A == 0.0:
return ceil(S*1.5)
if thresh/A >= 1.0 or thresh/A <= 0.0:
return ceil(S*1.5)
return ceil(S*sqrt(-2*log(thresh/A)))
from .image import *
class Gaussian(object):
"""Instances of this class are used to store information about
extracted gaussians in a structured way.
"""
def __init__(self, img, gaussian, isl_idx, g_idx, flg=0):
"""Initialize Gaussian object from fitting data
Parameters:
img: PyBDSM image object
gaussian: 6-tuple of fitted numbers
isl_idx: island serial number
g_idx: gaussian serial number
flg: flagging (if any)
"""
from . import functions as func
from .const import fwsig
import numpy as N
# Add attribute definitions needed for output
self.source_id_def = Int(doc="Source index", colname='Source_id')
self.code_def = String(doc='Source code S, C, or M', colname='S_Code')
self.gaus_num_def = Int(doc="Serial number of the gaussian for the image", colname='Gaus_id')
self.island_id_def = Int(doc="Serial number of the island", colname='Isl_id')
self.flag_def = Int(doc="Flag associated with gaussian", colname='Flag')
self.total_flux_def = Float(doc="Total flux density, Jy", colname='Total_flux', units='Jy')
self.total_fluxE_def = Float(doc="Total flux density error, Jy", colname='E_Total_flux',
units='Jy')
self.peak_flux_def = Float(doc="Peak flux density/beam, Jy/beam", colname='Peak_flux',
units='Jy/beam')
self.peak_fluxE_def = Float(doc="Peak flux density/beam error, Jy/beam",
colname='E_Peak_flux', units='Jy/beam')
self.centre_sky_def = List(Float(), doc="Sky coordinates of gaussian centre",
colname=['RA', 'DEC'], units=['deg', 'deg'])
self.centre_skyE_def = List(Float(), doc="Error on sky coordinates of gaussian centre",
colname=['E_RA', 'E_DEC'], units=['deg', 'deg'])
self.centre_pix_def = List(Float(), doc="Pixel coordinates of gaussian centre",
colname=['Xposn', 'Yposn'], units=['pix', 'pix'])
self.centre_pixE_def = List(Float(), doc="Error on pixel coordinates of gaussian centre",
colname=['E_Xposn', 'E_Yposn'], units=['pix', 'pix'])
self.size_sky_def = List(Float(), doc="Shape of the gaussian FWHM, PA, deg",
colname=['Maj', 'Min', 'PA'], units=['deg', 'deg',
'deg'])
self.size_skyE_def = List(Float(), doc="Error on shape of the gaussian FWHM, PA, deg",
colname=['E_Maj', 'E_Min', 'E_PA'], units=['deg', 'deg',
'deg'])
self.deconv_size_sky_def = List(Float(), doc="Deconvolved shape of the gaussian FWHM, PA, deg",
colname=['DC_Maj', 'DC_Min', 'DC_PA'], units=['deg', 'deg',
'deg'])
self.deconv_size_skyE_def = List(Float(), doc="Error on deconvolved shape of the gaussian FWHM, PA, deg",
colname=['E_DC_Maj', 'E_DC_Min', 'E_DC_PA'], units=['deg', 'deg',
'deg'])
self.size_sky_uncorr_def = List(Float(), doc="Shape in image plane of the gaussian FWHM, PA, deg",
colname=['Maj_img_plane', 'Min_img_plane', 'PA_img_plane'], units=['deg', 'deg',
'deg'])
self.size_skyE_uncorr_def = List(Float(), doc="Error on shape in image plane of the gaussian FWHM, PA, deg",
colname=['E_Maj_img_plane', 'E_Min_img_plane', 'E_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.deconv_size_sky_uncorr_def = List(Float(), doc="Deconvolved shape in image plane of the gaussian FWHM, PA, deg",
colname=['DC_Maj_img_plane', 'DC_Min_img_plane', 'DC_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.deconv_size_skyE_uncorr_def = List(Float(), doc="Error on deconvolved shape in image plane of the gaussian FWHM, PA, deg",
colname=['E_DC_Maj_img_plane', 'E_DC_Min_img_plane', 'E_DC_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.rms_def = Float(doc="Island rms Jy/beam", colname='Isl_rms', units='Jy/beam')
self.mean_def = Float(doc="Island mean Jy/beam", colname='Isl_mean', units='Jy/beam')
self.total_flux_isl_def = Float(doc="Island total flux from sum of pixels", colname='Isl_Total_flux', units='Jy')
self.total_flux_islE_def = Float(doc="Error on island total flux from sum of pixels", colname='E_Isl_Total_flux', units='Jy')
self.gresid_rms_def = Float(doc="Island rms in Gaussian residual image", colname='Resid_Isl_rms', units='Jy/beam')
self.gresid_mean_def= Float(doc="Island mean in Gaussian residual image", colname='Resid_Isl_mean', units='Jy/beam')
self.sresid_rms_def = Float(doc="Island rms in Shapelet residual image", colname='Resid_Isl_rms', units='Jy/beam')
self.sresid_mean_def= Float(doc="Island mean in Shapelet residual image", colname='Resid_Isl_mean', units='Jy/beam')
self.jlevel_def = Int(doc="Wavelet number to which Gaussian belongs", colname='Wave_id')
self.spec_indx_def = Float(doc = "Spectral index", colname='Spec_Indx', units=None)
self.e_spec_indx_def = Float(doc = "Error in spectral index", colname='E_Spec_Indx', units=None)
self.specin_flux_def = List(Float(), doc = "Total flux density per channel, Jy", colname=['Total_flux'], units=['Jy'])
self.specin_fluxE_def = List(Float(), doc = "Error in total flux density per channel, Jy", colname=['E_Total_flux'], units=['Jy'])
self.specin_freq_def = List(Float(), doc = "Frequency per channel, Hz", colname=['Freq'], units=['Hz'])
use_wcs = True
self.gaussian_idx = g_idx
self.gaus_num = 0 # stored later
self.island_id = isl_idx
self.jlevel = img.j
self.flag = flg
self.parameters = gaussian
p = gaussian
self.peak_flux = p[0]
self.centre_pix = p[1:3]
size = p[3:6]
if func.approx_equal(size[0], img.pixel_beam()[0]*1.1) and \
func.approx_equal(size[1], img.pixel_beam()[1]) and \
func.approx_equal(size[2], img.pixel_beam()[2]+90.0) or \
img.opts.fix_to_beam:
# Check whether fitted Gaussian is just the distorted pixel beam given as an
# initial guess (always set to [bm_maj*1.1, bm_min, bm_pa+90]) or if size was
# fixed to the beam. If so, reset the size to the undistorted beam. Note:
# these are sigma sizes, not FWHM sizes.
size = img.pixel_beam()
size = (size[0], size[1], size[2]+90.0) # adjust angle so that corrected_size() works correctly
size = func.corrected_size(size) # gives fwhm and P.A.
self.size_pix = size # FWHM in pixels and P.A. CCW from +y axis
# Use img.orig_beam for flux calculation and deconvolution on wavelet
# images, as img.beam has been altered to match the wavelet scale.
# Note: these are all FWHM sizes.
if img.waveletimage:
bm_pix = N.array(img.beam2pix(img.orig_beam))
else:
bm_pix = N.array(img.beam2pix(img.beam))
# Calculate fluxes, sky sizes, etc. All sizes are FWHM.
tot = p[0]*size[0]*size[1]/(bm_pix[0]*bm_pix[1])
if flg == 0:
# These are good Gaussians
errors = func.get_errors(img, p+[tot], img.islands[isl_idx].rms, fixed_to_beam=img.opts.fix_to_beam)
self.centre_sky = img.pix2sky(p[1:3])
self.centre_skyE = img.pix2coord(errors[1:3], self.centre_pix, use_wcs=use_wcs)
self.size_sky = img.pix2gaus(size, self.centre_pix, use_wcs=use_wcs) # FWHM in degrees and P.A. east from north
self.size_sky_uncorr = img.pix2gaus(size, self.centre_pix, use_wcs=False) # FWHM in degrees and P.A. east from +y axis
self.size_skyE = img.pix2gaus(errors[3:6], self.centre_pix, use_wcs=use_wcs, is_error=True)
self.size_skyE_uncorr = img.pix2gaus(errors[3:6], self.centre_pix, use_wcs=False, is_error=True)
gaus_dc, err = func.deconv2(bm_pix, size)
self.deconv_size_sky = img.pix2gaus(gaus_dc, self.centre_pix, use_wcs=use_wcs)
self.deconv_size_sky_uncorr = img.pix2gaus(gaus_dc, self.centre_pix, use_wcs=False)
self.deconv_size_skyE = img.pix2gaus(errors[3:6], self.centre_pix, use_wcs=use_wcs, is_error=True)
self.deconv_size_skyE_uncorr = img.pix2gaus(errors[3:6], self.centre_pix, use_wcs=False, is_error=True)
else:
# These are flagged Gaussians, so don't calculate sky values or errors
errors = [0]*7
self.centre_sky = [0., 0.]
self.centre_skyE = [0., 0.]
self.size_sky = [0., 0., 0.]
self.size_sky_uncorr = [0., 0., 0.]
self.size_skyE = [0., 0.]
self.size_skyE_uncorr = [0., 0., 0.]
self.deconv_size_sky = [0., 0., 0.]
self.deconv_size_sky_uncorr = [0., 0., 0.]
self.deconv_size_skyE = [0., 0., 0.]
self.deconv_size_skyE_uncorr = [0., 0., 0.]
self.total_flux = tot
self.total_fluxE = errors[6]
self.peak_fluxE = errors[0]
self.total_fluxE = errors[6]
self.centre_pixE = errors[1:3]
self.size_pixE = errors[3:6]
self.rms = img.islands[isl_idx].rms
self.mean = img.islands[isl_idx].mean
self.total_flux_isl = img.islands[isl_idx].total_flux
self.total_flux_islE = img.islands[isl_idx].total_fluxE
| 48,011 | 43.414431 | 138 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/multi_proc.py
|
"""Multiprocessing module to handle parallelization.
This module can optionally update a statusbar and can divide tasks
between cores using weights (so that each core gets a set of tasks with
the same total weight).
Adapted from a module by Brian Refsdal at SAO, available at AstroPython
(http://www.astropython.org/snippet/2010/3/Parallel-map-using-multiprocessing).
"""
from __future__ import print_function
import traceback
import sys
import numpy
_multi = False
_ncpus = 1
try:
# May raise ImportError
import multiprocessing
# Set spawn method to "fork". This is needed for macOS on Python 3.8+ where the
# default has been changed to "spawn", causing problems (see the discussion at
# https://github.com/ipython/ipython/issues/12396)
if sys.platform == 'darwin':
if sys.version_info[0] == 3 and sys.version_info[1] >= 8:
multiprocessing.set_start_method('fork')
_multi = True
# May raise NotImplementedError
_ncpus = min(multiprocessing.cpu_count(), 8)
except:
pass
__all__ = ('parallel_map',)
def worker(f, ii, chunk, out_q, err_q, lock, bar, bar_state):
"""
A worker function that maps an input function over a
slice of the input iterable.
:param f : callable function that accepts argument from iterable
:param ii : process ID
:param chunk: slice of input iterable
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param lock : thread-safe lock to protect a resource
( useful in extending parallel_map() )
:param bar: statusbar to update during fit
:param bar_state: statusbar state dictionary
"""
vals = []
# iterate over slice
for val in chunk:
try:
result = f(val)
except Exception as e:
etype,val,tbk=sys.exc_info()
print('Thread raised exception',e)
print('Traceback of thread is:')
print('-------------------------')
traceback.print_tb(tbk)
print('-------------------------')
err_q.put(e)
return
vals.append(result)
# update statusbar
if bar is not None:
if bar_state['started']:
bar.pos = bar_state['pos']
bar.spin_pos = bar_state['spin_pos']
bar.started = bar_state['started']
increment = bar.increment()
bar_state['started'] = bar.started
bar_state['pos'] += increment
bar_state['spin_pos'] += increment
if bar_state['spin_pos'] >= 4:
bar_state['spin_pos'] = 0
# output the result and task ID to output queue
out_q.put( (ii, vals) )
def run_tasks(procs, err_q, out_q, num):
"""
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
"""
# function to terminate processes that are still running.
die = (lambda vals : [val.terminate() for val in vals
if val.exitcode is None])
try:
for proc in procs:
proc.start()
for proc in procs:
proc.join()
except Exception as e:
# kill all slave processes on ctrl-C
die(procs)
raise e
if not err_q.empty():
# kill all on any exception from any one slave
die(procs)
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results=[None]*num;
for i in range(num):
idx, result = out_q.get()
results[idx] = result
# Remove extra dimension added by array_split
result_list = []
for result in results:
result_list += result
return result_list
def parallel_map(function, sequence, numcores=None, bar=None, weights=None):
"""
A parallelized version of the native Python map function that
utilizes the Python multiprocessing module to divide and
conquer a sequence.
parallel_map does not yet support multiple argument sequences.
:param function: callable function that accepts argument from iterable
:param sequence: iterable sequence
:param numcores: number of cores to use (if None, all are used)
:param bar: statusbar to update during fit
:param weights: weights to use when splitting the sequence
"""
if not callable(function):
raise TypeError("input function '%s' is not callable" %
repr(function))
if not numpy.iterable(sequence):
raise TypeError("input '%s' is not iterable" %
repr(sequence))
sequence = numpy.array(list(sequence), dtype=object)
size = len(sequence)
if not _multi or size == 1:
results = list(map(function, sequence))
if bar is not None:
bar.stop()
return results
# Set default number of cores to use. Try to leave one core free for pyplot.
if numcores is None:
numcores = _ncpus - 1
if numcores > _ncpus - 1:
numcores = _ncpus - 1
if numcores < 1:
numcores = 1
# Returns a started SyncManager object which can be used for sharing
# objects between processes. The returned manager object corresponds
# to a spawned child process and has methods which will create shared
# objects and return corresponding proxies.
manager = multiprocessing.Manager()
# Create FIFO queue and lock shared objects and return proxies to them.
# The managers handles a server process that manages shared objects that
# each slave process has access to. Bottom line -- thread-safe.
out_q = manager.Queue()
err_q = manager.Queue()
lock = manager.Lock()
bar_state = manager.dict()
if bar is not None:
bar_state['pos'] = bar.pos
bar_state['spin_pos'] = bar.spin_pos
bar_state['started'] = bar.started
# if sequence is less than numcores, only use len sequence number of
# processes
if size < numcores:
numcores = size
# group sequence into numcores-worth of chunks
if weights is None or numcores == size:
# No grouping specified (or there are as many cores as
# processes), so divide into equal chunks
sequence = numpy.array_split(sequence, numcores)
else:
# Group so that each group has roughly an equal sum of weights
weight_per_core = numpy.sum(weights)/float(numcores)
cut_values = []
temp_sum = 0.0
for indx, weight in enumerate(weights):
temp_sum += weight
if temp_sum > weight_per_core:
cut_values.append(indx+1)
temp_sum = weight
if len(cut_values) > numcores - 1:
cut_values = cut_values[0:numcores-1]
sequence = numpy.array_split(sequence, cut_values)
# Make sure there are no empty chunks at the end of the sequence
while len(sequence[-1]) == 0:
sequence.pop()
procs = [multiprocessing.Process(target=worker,
args=(function, ii, chunk, out_q, err_q, lock, bar, bar_state))
for ii, chunk in enumerate(sequence)]
try:
results = run_tasks(procs, err_q, out_q, len(sequence))
if bar is not None:
if bar.started:
bar.stop()
return results
except KeyboardInterrupt:
for proc in procs:
if proc.exitcode is None:
proc.terminate()
proc.join()
raise
| 7,753 | 31.041322 | 83 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/threshold.py
|
"""Module threshold.
Defines operation Op_threshold. If the option 'thresh' is defined
as 'fdr' then the value of thresh_pix is estimated using the
False Detection Rate algorithm (using the user defined value
of fdr_alpha). If thresh is None, then the false detection
probability is first calculated, and if the number of false source
pixels is more than fdr_ratio times the estimated number of true source
pixels, then FDR is chosen, else the hard threshold option is chosen.
Masked images aren't handled properly yet.
"""
from __future__ import absolute_import
import numpy as N
from .image import Op, Image, NArray
from math import sqrt,pi,log
from scipy.special import erfc
from . import const
from . import mylogger
class Op_threshold(Op):
"""Calculates FDR threshold if necessary.
Prerequisites: Module preprocess and rmsimage should be run first.
"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Threshold ")
data = img.ch0_arr
mask = img.mask_arr
opts = img.opts
size = N.product(img.ch0_arr.shape)
sq2 = sqrt(2)
if img.opts.thresh is None:
source_p = self.get_srcp(img)
cutoff = 5.0
false_p = 0.5*erfc(cutoff/sq2)*size
if false_p < opts.fdr_ratio*source_p:
img.thresh = 'hard'
mylogger.userinfo(mylog, "Expected 5-sigma-clipped false detection rate < fdr_ratio")
mylogger.userinfo(mylog, "Using sigma-clipping ('hard') thresholding")
else:
img.thresh = 'fdr'
mylogger.userinfo(mylog, "Expected 5-sigma-clipped false detection rate > fdr_ratio")
mylogger.userinfo(mylog, "Using FDR (False Detection Rate) thresholding")
mylog.debug('%s %g' % ("Estimated number of source pixels (using sourcecounts.py) is ",source_p))
mylog.debug('%s %g' % ("Number of false positive pixels expected for 5-sigma is ",false_p))
mylog.debug("Threshold for pixels set to : "+str.swapcase(img.thresh))
else:
img.thresh = img.opts.thresh
if img.thresh=='fdr':
cdelt = img.wcs_obj.acdelt[:2]
bm = (img.beam[0], img.beam[1])
area_pix = int(round(N.product(bm)/(abs(N.product(cdelt))* \
pi/(4.0*log(2.0)))))
s0 = 0
for i in range(area_pix):
s0 += 1.0/(i+1)
slope = opts.fdr_alpha/s0
# sort erf of normalised image as vector
v = N.sort(0.5*erfc(N.ravel((data-img.mean_arr)/img.rms_arr)/sq2))[::-1]
pcrit = None
for i,x in enumerate(v):
if x < slope*i/size:
pcrit = x
break
if pcrit is None:
raise RuntimeError("FDR thresholding failed. Please check the input image for problems.")
dumr1 = 1.0-2.0*pcrit
dumr = 8.0/3.0/pi*(pi-3.0)/(4.0-pi)
# approx for inv(erfc)
sigcrit = sqrt(-2.0/pi/dumr-log(1.0-dumr1*dumr1)/2.0+ \
sqrt((2.0/pi/dumr+log(1.0-dumr1*dumr1)/2.0)* \
(2.0/pi/dumr+log(1.0-dumr1*dumr1)/2.0)- \
log(1.0-dumr1*dumr1)/dumr))*sq2
if pcrit == 0.0:
img.thresh = 'hard'
else:
img.thresh_pix = sigcrit
mylogger.userinfo(mylog, "FDR threshold (replaces thresh_pix)", str(round(sigcrit, 4)))
else:
img.thresh_pix = opts.thresh_pix
img.completed_Ops.append('threshold')
return img
def get_srcp(self, img):
from . import sourcecounts as sc
fwsig = const.fwsig
cutoff = 5.0
spin = -0.80
freq = img.frequency
bm = (img.beam[0], img.beam[1])
cdelt = img.wcs_obj.acdelt[:2]
x = 2.0*pi*N.product(bm)/abs(N.product(cdelt))/(fwsig*fwsig)*img.omega
smin_L = img.clipped_rms*cutoff*((1.4e9/freq)**spin)
scflux = sc.s
scnum = sc.n
index = 0
for i,s in enumerate(scflux):
if s < smin_L:
index = i
break
n1 = scnum[index]; n2 = scnum[-1]
s1 = scflux[index]; s2 = scflux[-1]
alpha = 1.0-log(n1/n2)/log(s1/s2)
A = (alpha-1.0)*n1/(s1**(1.0-alpha))
source_p = x*A*((cutoff*img.clipped_rms)**(1.0-alpha)) \
/((1.0-alpha)*(1.0-alpha))
return source_p
| 4,603 | 38.689655 | 113 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/__init__.py
|
"""Initialize PyBDSF namespace.
Import all standard operations, define default chain of
operations and provide function 'execute', which can
execute chain of operations properly. Also define the
'process_image' convienence function that can take
options as arguments rather than as a dictionary (as
required by 'execute').
"""
from __future__ import print_function
from __future__ import absolute_import
try:
import matplotlib.pyplot as pl
has_pl = True
except (RuntimeError, ImportError, AssertionError):
import sys
print("\033[31;1mWARNING\033[0m: Matplotlib pyplot could not be imported. Plotting is disabled.", file=sys.stderr)
has_pl = False
from .readimage import Op_readimage
from .collapse import Op_collapse
from .preprocess import Op_preprocess
from .rmsimage import Op_rmsimage
from .threshold import Op_threshold
from .islands import Op_islands
from .gausfit import Op_gausfit
from .make_residimage import Op_make_residimage
from .output import Op_outlist
from .shapefit import Op_shapelets
from .gaul2srl import Op_gaul2srl
from .spectralindex import Op_spectralindex
from .polarisation import Op_polarisation
from .wavelet_atrous import Op_wavelet_atrous
from .psf_vary import Op_psf_vary
from .cleanup import Op_cleanup
from ._version import __version__
from .functions import set_up_output_paths
import gc
default_chain = [Op_readimage(),
Op_collapse(),
Op_preprocess(),
Op_rmsimage(),
Op_threshold(),
Op_islands(),
Op_gausfit(),
Op_wavelet_atrous(),
Op_shapelets(),
Op_gaul2srl(),
Op_spectralindex(),
Op_polarisation(),
Op_make_residimage(),
Op_psf_vary(),
Op_outlist(),
Op_cleanup()
]
fits_chain = default_chain # for legacy scripts
def execute(chain, opts):
"""Execute chain.
Create new Image with given options and apply chain of
operations to it. The opts input must be a dictionary.
"""
from .image import Image
from . import mylogger
if 'quiet' in opts:
quiet = opts['quiet']
else:
quiet = False
if 'debug' in opts:
debug = opts['debug']
else:
debug = False
_, basedir = set_up_output_paths(opts)
basename = os.path.basename(opts['filename']) + '.pybdsf.log'
logfilename = os.path.join(basedir, basename)
mylogger.init_logger(logfilename, quiet=quiet, debug=debug)
mylog = mylogger.logging.getLogger("PyBDSF.Init")
mylog.info("Processing "+opts["filename"])
try:
img = Image(opts)
img.log = logfilename
_run_op_list(img, chain)
return img
except RuntimeError as err:
# Catch and log, then re-raise if needed (e.g., for AstroWise)
mylog.error(str(err))
raise
except KeyboardInterrupt:
mylogger.userinfo(mylog, "\n\033[31;1mAborted\033[0m")
raise
def _run_op_list(img, chain):
"""Runs an Image object through chain of op's.
This is separate from execute() to allow other modules (such as
interface.py) to use it as well.
"""
from time import time
from .interface import raw_input_no_history
from .gausfit import Op_gausfit
from . import mylogger
import gc
ops = []
stopat = img.opts.stop_at
# Make sure all op's are instances
for op in chain:
if isinstance(op, type):
ops.append(op())
else:
ops.append(op)
if stopat == 'read' and isinstance(op, Op_readimage): break
if stopat == 'isl' and isinstance(op, Op_islands): break
# Log all non-default parameters
mylog = mylogger.logging.getLogger("PyBDSF.Init")
mylog.info("PyBDSF version %s"
% (__version__, ))
par_msg = "Non-default input parameters:\n"
user_opts = img.opts.to_list()
for user_opt in user_opts:
k, v = user_opt
val = img.opts.__getattribute__(k)
if val != v._default and v.group() != 'hidden':
par_msg += ' %-20s = %s\n' % (k, repr(val))
mylog.info(par_msg[:-1]) # -1 is to trim final newline
# Run all op's
dc = '\033[34;1m'
nc = '\033[0m'
for op in ops:
if isinstance(op, Op_gausfit) and img.opts.interactive:
print(dc + '--> Displaying islands and rms image...' + nc)
if max(img.ch0_arr.shape) > 4096:
print(dc + '--> Image is large. Showing islands only.' + nc)
img.show_fit(rms_image=False, mean_image=False, ch0_image=False,
ch0_islands=True, gresid_image=False, sresid_image=False,
gmodel_image=False, smodel_image=False, pyramid_srcs=False)
else:
img.show_fit(rms_image=True, mean_image=True,
ch0_islands=True, gresid_image=False, sresid_image=False,
gmodel_image=False, smodel_image=False, pyramid_srcs=False)
prompt = dc + "Press enter to continue or 'q' to quit .. : " + nc
answ = raw_input_no_history(prompt)
while answ != '':
if answ == 'q':
return False
answ = raw_input_no_history(prompt)
op.__start_time = time()
op(img)
op.__stop_time = time()
gc.collect()
if img.opts.interactive and not img._pi:
print(dc + 'Fitting complete. Displaying results...' + nc)
if img.opts.shapelet_do:
show_smod = True
show_sres = True
else:
show_smod = False
show_sres = False
if img.opts.spectralindex_do:
show_spec = True
else:
show_spec = False
if max(img.ch0_arr.shape) > 4096:
print(dc + '--> Image is large. Showing Gaussian residual image only.' + nc)
img.show_fit(rms_image=False, mean_image=False, ch0_image=False,
ch0_islands=False, gresid_image=True, sresid_image=False,
gmodel_image=False, smodel_image=False, pyramid_srcs=False,
source_seds=show_spec)
else:
img.show_fit(smodel_image=show_smod, sresid_image=show_sres,
source_seds=show_spec)
if img.opts.print_timing:
print("="*36)
print("%18s : %10s" % ("Module", "Time (sec)"))
print("-"*36)
for i, op in enumerate(chain):
if hasattr(op, '__start_time'):
print("%18s : %f" % (op.__class__.__name__,
(op.__stop_time - op.__start_time)))
indx_stop = i
print("="*36)
print("%18s : %f" % ("Total",
(chain[indx_stop].__stop_time - chain[0].__start_time)))
# Log all internally derived parameters
mylog = mylogger.logging.getLogger("PyBDSF.Final")
par_msg = "Internally derived parameters:\n"
import inspect
import types
for attr in inspect.getmembers(img.opts):
if attr[0][0] != '_':
if isinstance(attr[1], (int, str, bool, float, type(None), tuple, list)):
if hasattr(img, attr[0]):
used = img.__getattribute__(attr[0])
if used != attr[1] and isinstance(used, (int, str, bool, float,
type(None), tuple,
list)):
par_msg += ' %-20s : %s\n' % (attr[0], repr(used))
mylog.info(par_msg[:-1]) # -1 is to trim final newline
return True
def process_image(input, **kwargs):
"""Run a standard analysis and returns the associated Image object.
The input can be a FITS or CASA image, a PyBDSF parameter save
file, or a dictionary of options. Partial names are allowed for the
parameters as long as they are unique. Parameters are set to default
values if par = ''.
Examples:
> img = bdsf.process_image('example.fits', thresh_isl=4)
--> process FITS image names 'example.fits'
> img_3C196 = bdsf.process_image('3C196.image', mea='map')
--> process CASA image, 'mean_map' parameter is abbreviated
> img_VirA = bdsf.process_image('VirA_im.pybdsf.sav')
--> load parameter save file and process
"""
from .interface import load_pars
from .image import Image
import os
# Try to load input assuming it's a parameter save file or a dictionary.
# load_pars returns None if this doesn't work.
img, err = load_pars(input)
# If load_pars fails (returns None), assume that input is an image file. If it's not a
# valid image file (but is an existing file), an error will be raised
# by img.process() during reading of the file.
if img is None:
if os.path.exists(input):
img = Image({'filename': input})
else:
raise RuntimeError("File '" + input + "' not found.")
# Set logging options (must be done explicitly, as they are used before the
# kwargs are parsed in img.process())
if 'quiet' in kwargs:
img.opts.quiet = kwargs['quiet']
if 'debug' in kwargs:
img.opts.debug = kwargs['debug']
# Now process it. Any kwargs specified by the user will
# override those read in from the parameter save file or dictionary.
img.process(**kwargs)
return img
| 9,534 | 35.957364 | 118 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/gaul2srl.py
|
"""Module gaul2srl
This will group gaussians in an island into sources. Will code callgaul2srl.f here, though
it could probably be made more efficient.
img.sources is a list of source objects, which are instances of the class Source
(with attributes the same as in .srl of fbdsm).
img.sources[n] is a source.
source.gaussians is the list of component gaussian objects.
source.island_id is the island id of that source.
source.source_id is the source id of that source, the index of source in img.sources.
Each gaussian object gaus has gaus.source_id, the source id.
Also, each island object of img.islands list has the source object island.source
"""
from __future__ import absolute_import
from .image import *
from .islands import *
from .interface import wrap
from . import mylogger
import numpy as N
N.seterr(divide='raise')
class Op_gaul2srl(Op):
"""
Slightly modified from fortran.
"""
def __call__(self, img):
# for each island, get the gaussians into a list and then send them to process
# src_index is source number, starting from 0
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Gaul2Srl")
mylogger.userinfo(mylog, 'Grouping Gaussians into sources')
img.aperture = img.opts.aperture
if img.aperture is not None and img.aperture <= 0.0:
mylog.warn('Specified aperture is <= 0. Skipping aperture fluxes.')
img.aperture = None
src_index = -1
dsrc_index = 0
sources = []
dsources = []
no_gaus_islands = []
no_gaus_islands_flag_values = []
for iisl, isl in enumerate(img.islands):
isl_sources = []
isl_dsources = []
g_list = []
for g in isl.gaul:
if g.flag == 0:
g_list.append(g)
if len(g_list) > 0:
if len(g_list) == 1:
src_index, source = self.process_single_gaussian(img, g_list, src_index, code = 'S')
sources.append(source)
isl_sources.append(source)
else:
src_index, source = self.process_CM(img, g_list, isl, src_index)
sources.extend(source)
isl_sources.extend(source)
else:
if not img.waveletimage:
dg = isl.dgaul[0]
no_gaus_islands.append((isl.island_id, dg.centre_pix[0], dg.centre_pix[1]))
flag_values = []
for fg in isl.fgaul:
flag_values.append(fg.flag)
no_gaus_islands_flag_values.append(flag_values)
# Put in the dummy Source as the source and use negative IDs
g_list = isl.dgaul
dsrc_index, dsource = self.process_single_gaussian(img, g_list, dsrc_index, code = 'S')
dsources.append(dsource)
isl_dsources.append(dsource)
isl.sources = isl_sources
isl.dsources = isl_dsources
img.sources = sources
img.dsources = dsources
img.nsrc = src_index + 1
mylogger.userinfo(mylog, "Number of sources formed from Gaussians",
str(img.nsrc))
if not img.waveletimage and not img._pi and len(no_gaus_islands) > 0 and not img.opts.quiet:
message = 'All Gaussians were flagged for the following island'
if len(no_gaus_islands) == 1:
message += ':\n'
else:
message += 's:\n'
for isl_id, flag_list in zip(no_gaus_islands, no_gaus_islands_flag_values):
message += ' Island #%i (x=%i, y=%i): ' % isl_id
if len(flag_list) > 0:
flags_str = '{}'.format(', '.join([str(f) for f in flag_list]))
if len(flag_list) == 1:
pl_str = ''
else:
pl_str = 's'
message += 'fit with {0} Gaussian{1} with flag{1} = {2}\n'.format(len(flag_list), pl_str, flags_str)
else:
message += '\n'
if len(no_gaus_islands) == 1:
message += 'Please check this island. If it is a valid island and\n'
else:
message += 'Please check these islands. If they are valid islands and\n'
if img.opts.atrous_do:
message += 'should be fit, try adjusting the flagging options (use\n'\
'show_fit with "ch0_flagged=True" to see the flagged Gaussians\n'\
'and "help \'flagging_opts\'" to see the meaning of the flags).'
else:
message += 'should be fit, try adjusting the flagging options (use\n'\
'show_fit with "ch0_flagged=True" to see the flagged Gaussians\n'\
'and "help \'flagging_opts\'" to see the meaning of the flags)\n'\
'or enabling the wavelet module (with "atrous_do=True").'
message += '\nTo include empty islands in output source catalogs, set\n'\
'incl_empty=True in the write_catalog task.'
mylog.warning(message)
img.completed_Ops.append('gaul2srl')
#################################################################################################
def process_single_gaussian(self, img, g_list, src_index, code):
""" Process single gaussian into a source, for both S and C type sources. g is just one
Gaussian object (not a list)."""
g = g_list[0]
total_flux = [g.total_flux, g.total_fluxE]
peak_flux_centroid = peak_flux_max = [g.peak_flux, g.peak_fluxE]
posn_sky_centroid = posn_sky_max = [g.centre_sky, g.centre_skyE]
size_sky = [g.size_sky, g.size_skyE]
size_sky_uncorr = [g.size_sky_uncorr, g.size_skyE]
deconv_size_sky = [g.deconv_size_sky, g.deconv_size_skyE]
deconv_size_sky_uncorr = [g.deconv_size_sky_uncorr, g.deconv_size_skyE]
bbox = img.islands[g.island_id].bbox
ngaus = 1
island_id = g.island_id
aper_flux = func.ch0_aperture_flux(img, g.centre_pix, img.aperture)
if g.gaussian_idx == -1:
src_index -= 1
else:
src_index += 1
g.source_id = src_index
g.code = code
if g.gaus_num < 0:
gaussians = []
else:
gaussians = [g]
source_prop = list([code, total_flux, peak_flux_centroid, peak_flux_max, aper_flux, posn_sky_centroid,
posn_sky_max, size_sky, size_sky_uncorr, deconv_size_sky, deconv_size_sky_uncorr, bbox, ngaus, island_id, gaussians])
source = Source(img, source_prop)
source.source_id = src_index
return src_index, source
##################################################################################################
def process_CM(self, img, g_list, isl, src_index):
"""
Bundle errors with the quantities.
ngau = number of gaussians in island
src_id = the source index array for every gaussian in island
nsrc = final number of distinct sources in the island
"""
ngau = len(g_list) # same as cisl in callgaul2srl.f
nsrc = ngau # same as islct; initially make each gaussian as a source
src_id = N.arange(nsrc) # same as islnum in callgaul2srl.f
boxx, boxy = isl.bbox
subn = boxx.stop-boxx.start; subm = boxy.stop-boxy.start
delc = [boxx.start, boxy.start]
subim = self.make_subim(subn, subm, g_list, delc)
index = [(i,j) for i in range(ngau) for j in range(ngau) if j > i]
for pair in index:
same_island = self.in_same_island(pair, img, g_list, isl, subim, subn, subm, delc)
if same_island:
nsrc -= 1
mmax, mmin = max(src_id[pair[0]],src_id[pair[1]]), min(src_id[pair[0]],src_id[pair[1]])
arr = N.where(src_id == mmax)[0]; src_id[arr] = mmin
# now reorder src_id so that it is contiguous
for i in range(ngau):
ind1 = N.where(src_id==i)[0]
if len(ind1) == 0:
arr = N.where(src_id > i)[0]
if len(arr) > 0:
decr = N.min(src_id[arr])-i
for j in arr: src_id[j] -= decr
nsrc = N.max(src_id)+1
# now do whats in sub_calc_para_source
source_list = []
for isrc in range(nsrc):
posn = N.where(src_id == isrc)[0]
g_sublist=[]
for i in posn:
g_sublist.append(g_list[i])
ngau_insrc = len(posn)
# Do source type C
if ngau_insrc == 1:
src_index, source = self.process_single_gaussian(img, g_sublist, src_index, code = 'C')
else:
# make mask and subim. Invalid mask value is -1 since 0 is valid srcid
mask = self.make_mask(isl, subn, subm, 1, isrc, g_sublist, delc)
src_index, source = self.process_Multiple(img, g_sublist, mask, src_index, isrc, subim, \
isl, delc, subn, subm)
source_list.append(source)
return src_index, source_list
##################################################################################################
def in_same_island(self, pair, img, g_list, isl, subim, subn, subm, delc):
""" Whether two gaussians belong to the same source or not. """
from . import functions as func
def same_island_min(pair, g_list, subim, delc, tol=0.5):
""" If the difference between the lower peak and the minimum of the reconstructed fluxes along the line joining the peak positions
is greater than thresh_isl times the rms_clip, they belong to different islands. """
g1 = g_list[pair[0]]
g2 = g_list[pair[1]]
pix1 = N.array(g1.centre_pix)
pix2 = N.array(g2.centre_pix)
x1, y1 = map(int, N.floor(pix1)-delc); x2, y2 = map(int, N.floor(pix2)-delc)
pix1 = N.array(N.unravel_index(N.argmax(subim[x1:x1+2,y1:y1+2]), (2,2)))+[x1,y1]
pix2 = N.array(N.unravel_index(N.argmax(subim[x2:x2+2,y2:y2+2]), (2,2)))+[x2,y2]
if pix1[1] >= subn: pix1[1] = pix1[1]-1
if pix2[1] >= subm: pix2[1] = pix2[1]-1
pix1 = pix1.astype(float) #N.array(map(float, pix1))
pix2 = pix2.astype(float) #N.array(map(float, pix2))
maxline = int(round(N.max(N.abs(pix1-pix2)+1)))
flux1 = g1.peak_flux
flux2 = g2.peak_flux
# get pix values of the line
pixdif = pix2 - pix1
same_island_min = False
same_island_cont = False
if maxline == 1:
same_island_min = True
same_island_cont = True
else:
if abs(pixdif[0]) > abs(pixdif[1]):
xline = N.round(min(pix1[0],pix2[0])+N.arange(maxline))
yline = N.round((pix1[1]-pix2[1])/(pix1[0]-pix2[0])* \
(min(pix1[0],pix2[0])+N.arange(maxline)-pix1[0])+pix1[1])
else:
yline = N.round(min(pix1[1],pix2[1])+N.arange(maxline))
xline = N.round((pix1[0]-pix2[0])/(pix1[1]-pix2[1])* \
(min(pix1[1],pix2[1])+N.arange(maxline)-pix1[1])+pix1[0])
rpixval = N.zeros(maxline, dtype=N.float32)
xbig = N.where(xline >= N.size(subim,0))
xline[xbig] = N.size(subim,0) - 1
ybig = N.where(yline >= N.size(subim,1))
yline[ybig] = N.size(subim,1) - 1
for i in range(maxline):
pixval = subim[int(xline[i]), int(yline[i])]
rpixval[i] = pixval
min_pixval = N.min(rpixval)
minind_p = N.argmin(rpixval)
maxind_p = N.argmax(rpixval)
if minind_p in (0, maxline-1) and maxind_p in (0, maxline-1):
same_island_cont = True
if min_pixval >= min(flux1, flux2):
same_island_min = True
elif abs(min_pixval-min(flux1,flux2)) <= tol*isl.rms*img.opts.thresh_isl:
same_island_min = True
return same_island_min, same_island_cont
def same_island_dist(pair, g_list, tol=0.5):
""" If the centres are seperated by a distance less than half the sum of their
fwhms along the PA of the line joining them, they belong to the same island. """
from math import sqrt
g1 = g_list[pair[0]]
g2 = g_list[pair[1]]
pix1 = N.array(g1.centre_pix)
pix2 = N.array(g2.centre_pix)
gsize1 = g1.size_pix
gsize2 = g2.size_pix
fwhm1 = func.gdist_pa(pix1, pix2, gsize1)
fwhm2 = func.gdist_pa(pix1, pix2, gsize2)
dx = pix2[0]-pix1[0]; dy = pix2[1]-pix1[1]
dist = sqrt(dy*dy + dx*dx)
if dist <= tol*(fwhm1+fwhm2):
same_island = True
else:
same_island = False
return same_island
if img.opts.group_by_isl:
same_isl1_min = True
same_isl1_cont = True
same_isl2 = True
else:
if img.opts.group_method == 'curvature':
subim = -1.0 * func.make_curvature_map(subim)
tol = img.opts.group_tol
same_isl1_min, same_isl1_cont = same_island_min(pair, g_list, subim, delc, tol)
same_isl2 = same_island_dist(pair, g_list, tol/2.0)
g1 = g_list[pair[0]]
same_island = (same_isl1_min and same_isl2) or same_isl1_cont
return same_island
##################################################################################################
def process_Multiple(self, img, g_sublist, mask, src_index, isrc, subim, isl, delc, subn, subm):
""" Same as gaul_to_source.f. isrc is same as k in the fortran version. """
from math import pi, sqrt
from .const import fwsig
from scipy import ndimage
from . import functions as func
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Gaul2Srl ")
dum = img.beam[0]*img.beam[1]
cdeltsq = img.wcs_obj.acdelt[0]*img.wcs_obj.acdelt[1]
bmar_p = 2.0*pi*dum/(cdeltsq*fwsig*fwsig)
# try
subim_src = self.make_subim(subn, subm, g_sublist, delc)
mompara = func.momanalmask_gaus(subim_src, mask, isrc, bmar_p, True)
# initial peak posn and value
maxv = N.max(subim_src)
maxx, maxy = N.unravel_index(N.argmax(subim_src), subim_src.shape)
# fit gaussian around this posn
blc = N.zeros(2,dtype=int); trc = N.zeros(2,dtype=int)
n, m = subim_src.shape[0:2]
bm_pix = N.array([img.pixel_beam()[0]*fwsig, img.pixel_beam()[1]*fwsig, img.pixel_beam()[2]])
ssubimsize = max(int(N.round(N.max(bm_pix[0:2])*2))+1, 5)
blc[0] = max(0, maxx-(ssubimsize-1)/2); blc[1] = max(0, maxy-(ssubimsize-1)/2)
trc[0] = min(n, maxx+(ssubimsize-1)/2); trc[1] = min(m, maxy+(ssubimsize-1)/2)
s_imsize = trc - blc + 1
p_ini = [maxv, (s_imsize[0]-1)/2.0*1.1, (s_imsize[1]-1)/2.0*1.1, bm_pix[0]/fwsig*1.3, \
bm_pix[1]/fwsig*1.1, bm_pix[2]*2]
data = subim_src[blc[0]:blc[0]+s_imsize[0], blc[1]:blc[1]+s_imsize[1]]
smask = mask[blc[0]:blc[0]+s_imsize[0], blc[1]:blc[1]+s_imsize[1]]
rmask = N.where(smask==isrc, False, True)
x_ax, y_ax = N.indices(data.shape)
if N.sum(~rmask) >=6:
para, ierr = func.fit_gaus2d(data, p_ini, x_ax, y_ax, rmask)
if (0.0<para[1]<s_imsize[0]) and (0.0<para[2]<s_imsize[1]) and \
para[3]<s_imsize[0] and para[4]<s_imsize[1]:
maxpeak = para[0]
else:
maxpeak = maxv
posn = para[1:3]-(0.5*N.sum(s_imsize)-1)/2.0+N.array([maxx, maxy])-1+delc
else:
maxpeak = maxv
posn = N.unravel_index(N.argmax(data*~rmask), data.shape)+N.array(delc) +blc
# calculate peak by bilinear interpolation around centroid
# First check that moment analysis gave a valid position. If not, use
# posn from gaussian fit instead.
if N.isnan(mompara[1]):
mompara[1] = posn[0] - delc[0]
x1 = int(N.floor(mompara[1]))
if N.isnan(mompara[2]):
mompara[2] = posn[1] - delc[1]
y1 = int(N.floor(mompara[2]))
xind = slice(x1, x1+2, 1); yind = slice(y1, y1+2, 1)
if img.opts.flag_smallsrc and (N.sum(mask[xind, yind]==N.ones((2,2))*isrc) != 4):
mylog.debug('Island = '+str(isl.island_id))
mylog.debug('Mask = '+repr(mask[xind, yind])+'xind, yind, x1, y1 = '+repr(xind)+' '+repr(yind)+' '+repr(x1)+' '+repr(y1))
t=(mompara[1]-x1)/(x1+1-x1) # in case u change it later
u=(mompara[2]-y1)/(y1+1-y1)
try:
s_peak=((1.0-t)*(1.0-u)*subim_src[x1,y1]+
t*(1.0-u)*subim_src[x1+1,y1]+
t*u*subim_src[x1+1,y1+1]+
(1.0-t)*u*subim_src[x1,y1+1])
except IndexError:
# interpolation failed because source is too small
# probably pathological, take a guess..
s_peak=subim_src[x1,y1]
if (not img.opts.flag_smallsrc) and (N.sum(mask[xind, yind]==N.ones((2,2))*isrc) != 4):
mylog.debug('Speak '+repr(s_peak)+'Mompara = '+repr(mompara))
mylog.debug('x1, y1 : '+repr(x1)+', '+repr(y1))
# Don't let s_peak fall too far below the normalized peak (this can
# happen when, e.g., the centroid falls outside of the source)
norm_peak = mompara[0]*bmar_p/(mompara[3]*mompara[4])
if s_peak < norm_peak/2.0:
s_peak = norm_peak/2.0
# convert pixels to coords
try:
sra, sdec = img.pix2sky([mompara[1]+delc[0], mompara[2]+delc[1]])
mra, mdec = img.pix2sky(posn)
except RuntimeError as err:
# Invalid pixel wcs coordinate
sra, sdec = 0.0, 0.0
mra, mdec = 0.0, 0.0
# "deconvolve" the sizes
gaus_c = [mompara[3], mompara[4], mompara[5]]
gaus_bm = [bm_pix[0], bm_pix[1], bm_pix[2]]
gaus_dc, err = func.deconv2(gaus_bm, gaus_c)
deconv_size_sky = img.pix2gaus(gaus_dc, [mompara[1]+delc[0], mompara[2]+delc[1]])
deconv_size_sky_uncorr = img.pix2gaus(gaus_dc, [mompara[1]+delc[0], mompara[2]+delc[1]], use_wcs=False)
# update all objects etc
tot = 0.0
totE_sq = 0.0
for g in g_sublist:
tot += g.total_flux
totE_sq += g.total_fluxE**2
totE = sqrt(totE_sq)
size_pix = [mompara[3], mompara[4], mompara[5]]
size_sky = img.pix2gaus(size_pix, [mompara[1]+delc[0], mompara[2]+delc[1]])
size_sky_uncorr = img.pix2gaus(size_pix, [mompara[1]+delc[0], mompara[2]+delc[1]], use_wcs=False)
# Estimate uncertainties in source size and position due to
# errors in the constituent Gaussians using a Monte Carlo technique.
# Sum with Condon (1997) errors in quadrature.
plist = mompara.tolist()+[tot]
plist[0] = s_peak
plist[3] /= fwsig
plist[4] /= fwsig
errors = func.get_errors(img, plist, isl.rms)
if img.opts.do_mc_errors:
nMC = 20
mompara0_MC = N.zeros(nMC, dtype=N.float32)
mompara1_MC = N.zeros(nMC, dtype=N.float32)
mompara2_MC = N.zeros(nMC, dtype=N.float32)
mompara3_MC = N.zeros(nMC, dtype=N.float32)
mompara4_MC = N.zeros(nMC, dtype=N.float32)
mompara5_MC = N.zeros(nMC, dtype=N.float32)
for i in range(nMC):
# Reconstruct source from component Gaussians. Draw the Gaussian
# parameters from random distributions given by their errors.
subim_src_MC = self.make_subim(subn, subm, g_sublist, delc, mc=True)
try:
mompara_MC = func.momanalmask_gaus(subim_src_MC, mask, isrc, bmar_p, True)
mompara0_MC[i] = mompara_MC[0]
mompara1_MC[i] = mompara_MC[1]
mompara2_MC[i] = mompara_MC[2]
mompara3_MC[i] = mompara_MC[3]
mompara4_MC[i] = mompara_MC[4]
mompara5_MC[i] = mompara_MC[5]
except:
mompara0_MC[i] = mompara[0]
mompara1_MC[i] = mompara[1]
mompara2_MC[i] = mompara[2]
mompara3_MC[i] = mompara[3]
mompara4_MC[i] = mompara[4]
mompara5_MC[i] = mompara[5]
mompara0E = N.std(mompara0_MC)
mompara1E = N.std(mompara1_MC)
if mompara1E > 2.0*mompara[1]:
mompara1E = 2.0*mompara[1] # Don't let errors get too large
mompara2E = N.std(mompara2_MC)
if mompara2E > 2.0*mompara[2]:
mompara2E = 2.0*mompara[2] # Don't let errors get too large
mompara3E = N.std(mompara3_MC)
if mompara3E > 2.0*mompara[3]:
mompara3E = 2.0*mompara[3] # Don't let errors get too large
mompara4E = N.std(mompara4_MC)
if mompara4E > 2.0*mompara[4]:
mompara4E = 2.0*mompara[4] # Don't let errors get too large
mompara5E = N.std(mompara5_MC)
if mompara5E > 2.0*mompara[5]:
mompara5E = 2.0*mompara[5] # Don't let errors get too large
else:
mompara1E = 0.0
mompara2E = 0.0
mompara3E = 0.0
mompara4E = 0.0
mompara5E = 0.0
# Now add MC errors in quadrature with Condon (1997) errors
size_skyE = [sqrt(mompara3E**2 + errors[3]**2) * sqrt(cdeltsq),
sqrt(mompara4E**2 + errors[4]**2) * sqrt(cdeltsq),
sqrt(mompara5E**2 + errors[5]**2)]
sraE, sdecE = (sqrt(mompara1E**2 + errors[1]**2) * sqrt(cdeltsq),
sqrt(mompara2E**2 + errors[2]**2) * sqrt(cdeltsq))
deconv_size_skyE = size_skyE # set deconvolved errors to non-deconvolved ones
# Find aperture flux
if img.opts.aperture_posn == 'centroid':
aper_pos = [mompara[1]+delc[0], mompara[2]+delc[1]]
else:
aper_pos = posn
aper_flux, aper_fluxE = func.ch0_aperture_flux(img, aper_pos, img.aperture)
isl_id = isl.island_id
source_prop = list(['M', [tot, totE], [s_peak, isl.rms], [maxpeak, isl.rms],
[aper_flux, aper_fluxE], [[sra, sdec],
[sraE, sdecE]], [[mra, mdec], [sraE, sdecE]], [size_sky, size_skyE], [size_sky_uncorr, size_skyE],
[deconv_size_sky, deconv_size_skyE], [deconv_size_sky_uncorr, deconv_size_skyE], isl.bbox, len(g_sublist),
isl_id, g_sublist])
source = Source(img, source_prop)
src_index += 1
for g in g_sublist:
g.source_id = src_index
g.code = 'M'
source.source_id = src_index
return src_index, source
##################################################################################################
def make_subim(self, subn, subm, g_list, delc, mc=False):
from . import functions as func
subim = N.zeros((subn, subm), dtype=N.float32)
x, y = N.indices((subn, subm))
for g in g_list:
params = func.g2param(g)
params[1] -= delc[0]; params[2] -= delc[1]
if mc:
# draw random variables from distributions given by errors
params_err = func.g2param_err(g)
for i in range(len(params)):
mc_param = N.random.normal(loc=params[i], scale=params_err[i])
params[i] = mc_param
gau = func.gaus_2d(params, x, y)
subim = subim + gau
return subim
##################################################################################################
def make_mask(self, isl, subn, subm, nsrc, src_id, g_list, delc):
from . import functions as func
# define stuff for calculating gaussian
boxx, boxy = isl.bbox
subn = boxx.stop-boxx.start; subm = boxy.stop-boxy.start
x, y = N.indices((subn, subm))
# construct image of each source in the island
src_image = N.zeros((subn, subm, nsrc), dtype=N.float32)
nn = 1
for isrc in range(nsrc):
if nsrc == 1:
g_sublist = g_list
else:
posn = N.where(src_id == isrc)[0]
g_sublist=[]
for i in posn:
g_sublist.append(g_list[i])
for g in g_sublist:
params = func.g2param(g)
params[1] -= delc[0]; params[2] -= delc[1]
gau = func.gaus_2d(params, x, y)
src_image[:,:,isrc] = src_image[:,:,isrc] + gau
# mark each pixel as belonging to one source
# just compare value, should compare with sigma later
mask = N.argmax(src_image, axis=2) + src_id
orig_mask = isl.mask_active
mask[N.where(orig_mask)] = -1
return mask
##################################################################################################
# Define class Source
##################################################################################################
from .image import *
class Source(object):
""" Instances of this class store sources made from grouped gaussians. """
def __init__(self, img, sourceprop):
# Add attribute definitions needed for output
self.source_id_def = Int(doc="Source index", colname='Source_id')
self.code_def = String(doc='Source code S, C, or M', colname='S_Code')
self.total_flux_def = Float(doc="Total flux density (Jy)", colname='Total_flux', units='Jy')
self.total_fluxE_def = Float(doc="Error in total flux density (Jy)", colname='E_Total_flux',
units='Jy')
self.peak_flux_centroid_def = Float(doc="Peak flux density per beam at centroid of emission (Jy/beam)",
colname='Peak_flux_cen', units='Jy/beam')
self.peak_flux_centroidE_def = Float(doc="Error in peak flux density per beam at centroid of emission (Jy/beam)",
colname='E_Peak_flux_cen', units='Jy/beam')
self.peak_flux_max_def = Float(doc="Peak flux density per beam at posn of maximum emission (Jy/beam)",
colname='Peak_flux', units='Jy/beam')
self.peak_flux_maxE_def = Float(doc="Error in peak flux density per beam at posn of max emission (Jy/beam)",
colname='E_Peak_flux', units='Jy/beam')
self.aperture_flux_def = Float(doc="Total aperture flux density (Jy)", colname='Aperture_flux',
units='Jy')
self.aperture_fluxE_def = Float(doc="Error in total aperture flux density (Jy)", colname='E_Aperture_flux',
units='Jy')
self.posn_sky_centroid_def = List(Float(), doc="Posn (RA, Dec in deg) of centroid of source",
colname=['RA', 'DEC'], units=['deg', 'deg'])
self.posn_sky_centroidE_def = List(Float(), doc="Error in posn (RA, Dec in deg) of centroid of source",
colname=['E_RA', 'E_DEC'], units=['deg', 'deg'])
self.posn_sky_max_def = List(Float(), doc="Posn (RA, Dec in deg) of maximum emission of source",
colname=['RA_max', 'DEC_max'], units=['deg', 'deg'])
self.posn_sky_maxE_def = List(Float(), doc="Error in posn (deg) of maximum emission of source",
colname=['E_RA_max', 'E_DEC_max'], units=['deg', 'deg'])
self.posn_pix_centroid_def = List(Float(), doc="Position (x, y in pixels) of centroid of source",
colname=['Xposn', 'Yposn'], units=['pix', 'pix'])
self.posn_pix_centroidE_def = List(Float(), doc="Error in position (x, y in pixels) of centroid of source",
colname=['E_Xposn', 'E_Yposn'], units=['pix', 'pix'])
self.posn_pix_max_def = List(Float(), doc="Position (x, y in pixels) of maximum emission of source",
colname=['Xposn_max', 'Yposn_max'], units=['pix', 'pix'])
self.posn_pix_maxE_def = List(Float(), doc="Error in position (pixels) of maximum emission of source",
colname=['E_Xposn_max', 'E_Yposn_max'], units=['pix', 'pix'])
self.size_sky_def = List(Float(), doc="Shape of the source FWHM, BPA, deg",
colname=['Maj', 'Min', 'PA'], units=['deg', 'deg',
'deg'])
self.size_skyE_def = List(Float(), doc="Error on shape of the source FWHM, BPA, deg",
colname=['E_Maj', 'E_Min', 'E_PA'], units=['deg', 'deg',
'deg'])
self.deconv_size_sky_def = List(Float(), doc="Deconvolved shape of the source FWHM, BPA, deg",
colname=['DC_Maj', 'DC_Min', 'DC_PA'], units=['deg', 'deg',
'deg'])
self.deconv_size_skyE_def = List(Float(), doc="Error on deconvolved shape of the source FWHM, BPA, deg",
colname=['E_DC_Maj', 'E_DC_Min', 'E_DC_PA'], units=['deg', 'deg',
'deg'])
self.size_sky_uncorr_def = List(Float(), doc="Shape in image plane of the gaussian FWHM, PA, deg",
colname=['Maj_img_plane', 'Min_img_plane', 'PA_img_plane'], units=['deg', 'deg',
'deg'])
self.size_skyE_uncorr_def = List(Float(), doc="Error on shape in image plane of the gaussian FWHM, PA, deg",
colname=['E_Maj_img_plane', 'E_Min_img_plane', 'E_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.deconv_size_sky_uncorr_def = List(Float(), doc="Deconvolved shape in image plane of the gaussian FWHM, PA, deg",
colname=['DC_Maj_img_plane', 'DC_Min_img_plane', 'DC_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.deconv_size_skyE_uncorr_def = List(Float(), doc="Error on deconvolved shape in image plane of the gaussian FWHM, PA, deg",
colname=['E_DC_Maj_img_plane', 'E_DC_Min_img_plane', 'E_DC_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.rms_isl_def = Float(doc="Island rms Jy/beam", colname='Isl_rms', units='Jy/beam')
self.mean_isl_def = Float(doc="Island mean Jy/beam", colname='Isl_mean', units='Jy/beam')
self.total_flux_isl_def = Float(doc="Island total flux from sum of pixels", colname='Isl_Total_flux', units='Jy')
self.total_flux_islE_def = Float(doc="Error on island total flux from sum of pixels", colname='E_Isl_Total_flux', units='Jy')
self.gresid_rms_def = Float(doc="Island rms in Gaussian residual image Jy/beam",
colname='Resid_Isl_rms', units='Jy/beam')
self.gresid_mean_def = Float(doc="Island mean in Gaussian residual image Jy/beam",
colname='Resid_Isl_mean', units='Jy/beam')
self.sresid_rms_def = Float(doc="Island rms in Shapelet residual image Jy/beam",
colname='Resid_Isl_rms', units='Jy/beam')
self.sresid_mean_def = Float(doc="Island mean in Shapelet residual image Jy/beam",
colname='Resid_Isl_mean', units='Jy/beam')
self.ngaus_def = Int(doc='Number of gaussians in the source', colname='N_gaus')
self.island_id_def = Int(doc="Serial number of the island", colname='Isl_id')
self.bbox_def = List(Instance(slice(0), or_none=False), doc = "")
self.spec_indx_def = Float(doc = "Spectral index", colname='Spec_Indx', units=None)
self.e_spec_indx_def = Float(doc = "Error in spectral index", colname='E_Spec_Indx', units=None)
self.specin_flux_def = List(Float(), doc = "Total flux density, Jy", colname=['Total_flux'], units=['Jy'])
self.specin_fluxE_def = List(Float(), doc = "Error in total flux density per channel, Jy", colname=['E_Total_flux'], units=['Jy'])
self.specin_freq_def = List(Float(), doc = "Frequency per channel, Hz", colname=['Freq'], units=['Hz'])
code, total_flux, peak_flux_centroid, peak_flux_max, aper_flux, posn_sky_centroid, \
posn_sky_max, size_sky, size_sky_uncorr, deconv_size_sky, \
deconv_size_sky_uncorr, bbox, ngaus, island_id, gaussians = sourceprop
self.code = code
self.total_flux, self.total_fluxE = total_flux
self.peak_flux_centroid, self.peak_flux_centroidE = peak_flux_centroid
self.peak_flux_max, self.peak_flux_maxE = peak_flux_max
self.posn_sky_centroid, self.posn_sky_centroidE = posn_sky_centroid
self.posn_sky_max, self.posn_sky_maxE = posn_sky_max
self.size_sky, self.size_skyE = size_sky
self.size_sky_uncorr, self.size_skyE_uncorr = size_sky_uncorr
self.deconv_size_sky, self.deconv_size_skyE = deconv_size_sky
self.deconv_size_sky_uncorr, self.deconv_size_skyE_uncorr = deconv_size_sky_uncorr
self.bbox = bbox
self.ngaus = ngaus
self.island_id = island_id
self.gaussians = gaussians
self.rms_isl = img.islands[island_id].rms
self.mean_isl = img.islands[island_id].mean
self.total_flux_isl = img.islands[island_id].total_flux
self.total_flux_islE = img.islands[island_id].total_fluxE
self.mean_isl = img.islands[island_id].mean
self.jlevel = img.j
self.aperture_flux, self.aperture_fluxE = aper_flux
| 34,935 | 49.927114 | 142 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/interface.py
|
"""Interface module.
The interface module handles all functions typically needed by the user in an
interactive environment such as IPython. Many are also used by the
custom IPython shell defined in pybdsf.
"""
from __future__ import print_function
from __future__ import absolute_import
try:
# For Python 2, use raw_input() for input()
input = raw_input
except NameError:
pass
def process(img, **kwargs):
"""Find and measure sources in an image.
This function is used by process_image in __init__.py and by process_image
in pybdsf. It is also used as a method of the Image object in image.py
to allow reprocessing of existing Image objects with the command
img.process().
Any options given as keyword arguments will override existing ones stored
in img.opts.
"""
from . import default_chain, _run_op_list
from .image import Image
from . import mylogger
from .functions import set_up_output_paths
import os
# Start up logger. We need to initialize it each time process() is
# called, in case the quiet or debug options have changed
_, basedir = set_up_output_paths(img.opts)
basename = os.path.basename(img.opts.filename) + '.pybdsf.log'
logfilename = os.path.join(basedir, basename)
img.log = ''
mylogger.init_logger(logfilename, quiet=img.opts.quiet,
debug=img.opts.debug)
add_break_to_logfile(logfilename)
mylog = mylogger.logging.getLogger("PyBDSF.Process")
mylog.info("Processing "+img.opts.filename)
try:
# set options if given
if len(kwargs) > 0:
set_pars(img, **kwargs)
except RuntimeError as err:
# Catch and log error
mylog.error(str(err))
# Re-throw error if the user is not in the interactive shell
if img._is_interactive_shell:
return False
else:
raise
# Run all the op's
try:
# Run op's in chain
img, op_chain = get_op_chain(img)
if op_chain is not None:
_run_op_list(img, op_chain)
img._prev_opts = img.opts.to_dict()
return True
except RuntimeError as err:
# Catch and log error
mylog.error(str(err))
# Re-throw error if the user is not in the interactive shell
if img._is_interactive_shell:
return False
else:
raise
except KeyboardInterrupt:
mylogger.userinfo(mylog, "\n\033[31;1mAborted\033[0m")
if img._is_interactive_shell:
return False
else:
raise
def get_op_chain(img):
"""Determines the optimal Op chain for an Image object.
This is useful when reprocessing an Image object. For example,
if Gaussians were already fit, but the user now wants to use
shapelets, we do not need to re-run Op_gausfit, etc.
Note that any new options added to opts.py should also be
added here. If not, a full reprocessing will be done if the
new option is changed.
"""
from . import default_chain
Op_chain = default_chain[:]
Op_names = ['readimage',
'collapse',
'preprocess',
'rmsimage',
'threshold',
'islands',
'gausfit',
'wavelet_atrous',
'shapelets',
'gaul2srl',
'spectralindex',
'polarisation',
'make_residimage',
'psf_vary',
'outlist',
'cleanup']
prev_opts = img._prev_opts
if prev_opts is None:
return img, default_chain
new_opts = img.opts.to_dict()
# Set the hidden options, which should include any option whose change
# should not trigger a process_image action
hidden_opts = img.opts.get_names(group='hidden')
hidden_opts.append('advanced_opts')
hidden_opts.append('flagging_opts')
hidden_opts.append('multichan_opts')
hidden_opts.append('output_opts')
# Define lists of options for each Op. Some of these can be defined
# using the "group" parameter of each option.
#
# Op_readimage()
readimage_opts = ['filename', 'beam', 'trim_box', 'frequency',
'beam_spectrum', 'frequency_sp']
# Op_collapse()
collapse_opts = img.opts.get_names(group='multichan_opts')
collapse_opts.append('polarisation_do')
collapse_opts += readimage_opts
# Op_preprocess()
preprocess_opts = ['kappa_clip', 'polarisation_do']
preprocess_opts += collapse_opts
# Op_rmsimage()
rmsimage_opts = ['rms_box', 'rms_box_bright', 'adaptive_rms_box',
'mean_map', 'rms_map', 'adaptive_thresh', 'rms_box_bright']
rmsimage_opts += preprocess_opts
# Op_threshold()
threshold_opts = ['thresh', 'thresh_pix', 'thresh_isl']
threshold_opts += rmsimage_opts
# Op_islands()
islands_opts = threshold_opts
islands_opts.append('minpix_isl')
# Op_gausfit()
gausfit_opts = ['verbose_fitting']
gausfit_opts += islands_opts
gausfit_opts += img.opts.get_names(group='flagging_opts')
# Op_wavelet_atrous()
wavelet_atrous_opts = img.opts.get_names(group='atrous_do')
wavelet_atrous_opts.append('atrous_do')
wavelet_atrous_opts += gausfit_opts
# Op_shapelets()
shapelets_opts = img.opts.get_names(group='shapelet_do')
shapelets_opts.append('shapelet_do')
shapelets_opts += islands_opts
# Op_gaul2srl()
gaul2srl_opts = ['group_tol', 'group_by_isl', 'group_method']
gaul2srl_opts += gausfit_opts
gaul2srl_opts += wavelet_atrous_opts
# Op_spectralindex()
spectralindex_opts = img.opts.get_names(group='spectralindex_do')
spectralindex_opts.append('spectralindex_do')
spectralindex_opts += gaul2srl_opts
# Op_polarisation()
polarisation_opts = img.opts.get_names(group='polarisation_do')
polarisation_opts.append('polarisation_do')
polarisation_opts += gaul2srl_opts
# Op_make_residimage()
make_residimage_opts = ['fittedimage_clip']
make_residimage_opts += gausfit_opts
make_residimage_opts += wavelet_atrous_opts
make_residimage_opts += shapelets_opts
# Op_psf_vary()
psf_vary_opts = img.opts.get_names(group='psf_vary_do')
psf_vary_opts.append('psf_vary_do')
psf_vary_opts += gaul2srl_opts
# Op_outlist() and Op_cleanup() are always done.
# Find whether new opts differ from previous opts (and are not hidden
# opts, which should not be checked). If so, found = True and we reset
# the relevant image parameters and add the relevant Op to the Op_chain.
re_run = False
found = False
for k, v in prev_opts.items():
if v != new_opts[k] and k not in hidden_opts:
re_run = True
if k in readimage_opts:
if hasattr(img, 'use_io'): del img.use_io
if hasattr(img, 'image_arr'): del img.image_arr
while 'readimage' in img.completed_Ops:
img.completed_Ops.remove('readimage')
found = True
if k in collapse_opts:
if hasattr(img, 'mask_arr'): del img.mask_arr
if hasattr(img, 'ch0_arr'): del img.ch0_arr
while 'collapse' in img.completed_Ops:
img.completed_Ops.remove('collapse')
found = True
if k in preprocess_opts:
while 'preprocess' in img.completed_Ops:
img.completed_Ops.remove('preprocess')
found = True
if k in rmsimage_opts:
if hasattr(img, 'rms_arr'): del img.rms_arr
if hasattr(img, 'mean_arr'): del img.mean_arr
if hasattr(img, 'rms_Q_arr'): del img.rms_Q_arr
if hasattr(img, 'mean_Q_arr'): del img.mean_Q_arr
if hasattr(img, 'rms_U_arr'): del img.rms_U_arr
if hasattr(img, 'mean_U_arr'): del img.mean_U_arr
if hasattr(img, 'rms_V_arr'): del img.rms_V_arr
if hasattr(img, 'mean_V_arr'): del img.mean_V_arr
if hasattr(img, '_adapt_rms_isl_pos'): del img._adapt_rms_isl_pos
while 'rmsimage' in img.completed_Ops:
img.completed_Ops.remove('rmsimage')
found = True
if k in threshold_opts:
while 'threshold' in img.completed_Ops:
img.completed_Ops.remove('threshold')
found = True
if k in islands_opts:
if hasattr(img, 'islands'): del img.islands
while 'islands' in img.completed_Ops:
img.completed_Ops.remove('islands')
found = True
if k in gausfit_opts:
if hasattr(img, 'sources'): del img.sources
if hasattr(img, 'dsources'): del img.dsources
if hasattr(img, 'gaussians'): del img.gaussians
while 'gausfit' in img.completed_Ops:
img.completed_Ops.remove('gausfit')
found = True
if k in wavelet_atrous_opts:
if hasattr(img, 'atrous_gaussians'): del img.atrous_gaussians
if hasattr(img, 'islands'): del img.islands
if hasattr(img, 'sources'): del img.sources
if hasattr(img, 'dsources'): del img.dsources
if hasattr(img, 'gaussians'): del img.gaussians
while 'islands' in img.completed_Ops:
img.completed_Ops.remove('islands')
while 'gausfit' in img.completed_Ops:
img.completed_Ops.remove('gausfit')
while 'wavelet_atrous' in img.completed_Ops:
img.completed_Ops.remove('wavelet_atrous')
found = True
if k in shapelets_opts:
while 'shapelets' in img.completed_Ops:
img.completed_Ops.remove('shapelets')
found = True
if k in gaul2srl_opts:
while 'gaul2srl' in img.completed_Ops:
img.completed_Ops.remove('gaul2srl')
found = True
if k in spectralindex_opts:
while 'spectralindex' in img.completed_Ops:
img.completed_Ops.remove('spectralindex')
found = True
if k in polarisation_opts:
while 'polarisation' in img.completed_Ops:
img.completed_Ops.remove('polarisation')
found = True
if k in make_residimage_opts:
if hasattr(img, 'resid_gaus_arr'):
del img.resid_gaus_arr
img.resid_gaus_arr = None # set to init state
if hasattr(img, 'model_gaus_arr'): del img.model_gaus_arr
if hasattr(img, 'resid_shap_arr'): del img.resid_shap_arr
if hasattr(img, 'model_shap_arr'): del img.model_shap_arr
while 'make_residimage' in img.completed_Ops:
img.completed_Ops.remove('make_residimage')
found = True
if k in psf_vary_opts:
while 'psf_vary' in img.completed_Ops:
img.completed_Ops.remove('psf_vary')
found = True
if not found:
break
# If nothing has changed, ask if user wants to re-run
if not found and not re_run:
prompt = "Analysis appears to be up-to-date. Force reprocessing (y/n)? "
answ = raw_input_no_history(prompt)
while answ.lower() not in ['y', 'n', 'yes', 'no']:
answ = raw_input_no_history(prompt)
if answ.lower() in ['y', 'yes']:
re_run = True # Force re-run
else:
return img, None
# If a changed option is not in any of the above lists,
# force a re-run of all Ops.
if not found:
img.completed_Ops = []
if hasattr(img, 'use_io'): del img.use_io
if hasattr(img, 'image_arr'): del img.image_arr
if hasattr(img, 'mask_arr'): del img.mask_arr
if hasattr(img, 'ch0_arr'): del img.ch0_arr
if hasattr(img, 'rms_arr'): del img.rms_arr
if hasattr(img, 'mean_arr'): del img.mean_arr
if hasattr(img, 'rms_Q_arr'): del img.rms_Q_arr
if hasattr(img, 'mean_Q_arr'): del img.mean_Q_arr
if hasattr(img, 'rms_U_arr'): del img.rms_U_arr
if hasattr(img, 'mean_U_arr'): del img.mean_U_arr
if hasattr(img, 'rms_V_arr'): del img.rms_V_arr
if hasattr(img, 'mean_V_arr'): del img.mean_V_arr
if hasattr(img, 'islands'): del img.islands
if hasattr(img, 'sources'): del img.sources
if hasattr(img, 'dsources'): del img.dsources
if hasattr(img, 'gaussians'): del img.gaussians
if hasattr(img, 'atrous_gaussians'): del img.atrous_gaussians
if hasattr(img, 'resid_gaus_arr'): del img.resid_gaus_arr
if hasattr(img, 'model_gaus_arr'): del img.model_gaus_arr
if hasattr(img, 'resid_shap_arr'): del img.resid_shap_arr
if hasattr(img, 'model_shap_arr'): del img.model_shap_arr
if hasattr(img, '_adapt_rms_isl_pos'): del img._adapt_rms_isl_pos
return img, Op_chain
while 'outlist' in img.completed_Ops:
img.completed_Ops.remove('outlist')
while 'cleanup' in img.completed_Ops:
img.completed_Ops.remove('cleanup')
for completed_Op in img.completed_Ops:
if completed_Op in Op_names:
Op_indx = Op_names.index(completed_Op)
Op_names.pop(Op_indx)
Op_chain.pop(Op_indx)
return img, Op_chain
def load_pars(filename):
"""Load parameters from a save file or dictionary.
If a file is given, it must be a pickled opts dictionary.
filename - name of options file to load or a dictionary of opts.
Returns None (and original error) if no file can be loaded successfully.
"""
from .image import Image
from . import mylogger
try:
import cPickle as pickle
except ImportError:
import pickle
# First, check if input is a dictionary
if isinstance(filename, dict):
timg = Image(filename)
return timg, None
else:
try:
pkl_file = open(filename, 'rb')
pars = pickle.load(pkl_file)
pkl_file.close()
timg = Image(pars)
print("--> Loaded parameters from file '" + filename + "'.")
return timg, None
except Exception as err:
return None, err
def save_pars(img, savefile=None, quiet=False):
"""Save parameters to a file.
The save file is a "pickled" opts dictionary.
"""
try:
import cPickle as pickle
except ImportError:
import pickle
from . import tc
import sys
if savefile is None or savefile == '':
basename = os.path.basename(img.opts.filename) + '.pybdsf.sav'
savefile = os.path.join(img.basedir, basename)
# convert opts to dictionary
pars = img.opts.to_dict()
output = open(savefile, 'wb')
pickle.dump(pars, output, protocol=0)
output.close()
if not quiet:
print("--> Saved parameters to file '" + savefile + "'.")
def list_pars(img, opts_list=None, banner=None, use_groups=True):
"""Lists all parameters for the Image object.
opts_list - a list of the parameter names to list;
if None, all parameters are used.
banner - banner text to place at top of listing.
use_groups - whether to use the group information for each
parameter.
"""
from . import tc
import sys
# Get all options as a list sorted by name
opts = img.opts.to_list()
# Filter list
if opts_list is not None:
opts_temp = []
for o in opts:
if o[0] in opts_list:
opts_temp.append(o)
opts = opts_temp
# Move filename, infile, outfile to front of list
for o in opts:
if o[0] == 'filename' or o[0] == 'infile' or o[0] == 'outfile':
opts.remove(o)
opts.insert(0, o)
# Now group options with the same "group" together.
if use_groups:
opts = group_opts(opts)
# Finally, print options, values, and doc strings to screen
print_opts(opts, img, banner=banner)
def set_pars(img, **kwargs):
"""Set parameters using arguments instead of using a dictionary.
Allows partial names for parameters as long as they are unique. Parameters
are set to default values if par = ''.
"""
import re
import sys
from .image import Image
# Enumerate all options
opts = img.opts.get_names()
# Check that parameters are valid options and are unique
full_key = []
for i, key in enumerate(kwargs):
chk_key = checkpars(opts, key)
if chk_key == []:
raise RuntimeError("Input parameter '" + key + "' not recognized.")
if len(chk_key) > 1 and key not in opts:
raise RuntimeError("Input parameter '" + key + "' matches to more than one "\
"possible parameter:\n " + "\n ".join(chk_key))
if key in opts:
full_key.append(key)
else:
full_key.append(chk_key[0])
# Build options dictionary
pars = {}
for i, key in enumerate(kwargs):
if kwargs[key] == '':
temp_img = Image({'filename':''})
opt_names = temp_img.opts.get_names()
for k in opt_names:
if key == k:
kwargs[key] = temp_img.opts.__getattribute__(k)
pars.update({full_key[i]: kwargs[key]})
# Finally, set the options
img.opts.set_opts(pars)
def group_opts(opts):
"""Sorts options by group (as defined in opts.py).
Returns a list of options, with suboptions arranged in a list inside the
main list and directly following the main options. Options belonging to the
"hidden" group are excluded from the returned list (as defined in opts.py).
"""
groups = []
gp = []
for i in range(len(opts)):
grp = opts[i][1].group()
if grp is not None and grp not in groups:
groups.append(opts[i][1].group())
groups.sort()
# Now, make a list for each group with its options. Don't include
# "hidden" options, as they should never by seen by the user.
for g in groups:
g_list = []
for i in range(len(opts)):
if isinstance(opts[i], tuple):
if g == str(opts[i][1].group()):
g_list.append(opts[i])
for gs in g_list:
opts.remove(gs)
for i in range(len(opts)):
if g == str(opts[i][0]) and g != 'hidden':
opts.insert(i+1, g_list)
break
return opts
def print_opts(grouped_opts_list, img, banner=None):
"""Print options to screen.
Options can be sorted by group (defined in opts.py) previously defined by
group_opts. Output of grouped items is suppressed if parent option is
False. The layout is as follows:
[20 spaces par name with ...] = [at least 49 spaces for value]
[at least 49 spaces for doc]
When more than one line is required for the doc, the next line is:
[25 blank spaces][at least 47 spaces for doc]
As in casapy, print non-defaults in blue, options with suboptions in
47m and suboptions in green. Option Values are printed in bold, to help
to distinguish them from the descriptions. NOTE: in iTerm, one needs
to set the bold color in the profiles to white, as it defaults to red,
which is a bit hard on the eyes in this case.
"""
from .image import Image
import os
from . import functions as func
termy, termx = func.getTerminalSize() # note: returns row, col -> y, x
minwidth = 28 # minimum width for parameter names and values
# Define colors for output
dc = '\033[1;34m' # Blue: non-default option text color
ec = '\033[0;47m' # expandable option text color
sc = '\033[0;32m' # Green: suboption text color
nc = '\033[0m' # normal text color
ncb = '\033[1m' # normal text color bold
if banner is not None:
print(banner)
spcstr = ' ' * minwidth # spaces string for second or later lines
infix = nc + ': ' + nc # infix character used to separate values from comments
print('=' * termx) # division string for top of parameter listing
for indx, o in enumerate(grouped_opts_list):
if isinstance(o, tuple):
# Print main options, which are always tuples, before printing
# suboptions (if any).
k = o[0]
v = o[1]
val = img.opts.__getattribute__(k)
v1 = v2 = ''
if val == v._default:
# value is default
v1 = ncb
v2 = nc
else:
# value is non-default
v1 = dc
v2 = nc
if isinstance(val, str):
valstr = v1 + repr(val) + v2
if k == 'filename':
# Since we can check whether filename is valid,
# do so here and print in red if not.
if not os.path.exists(val):
valstr = '\033[31;1m' + repr(val) + nc
width_par_val = max(minwidth, len(k) + len(str(val)) + 5)
else:
if isinstance(val, float):
val = round_float(val)
if isinstance(val, tuple):
val = round_tuple(val)
valstr = v1 + str(val) + v2
width_par_val = max(minwidth, len(k) + len(str(val)) + 4)
width_desc = max(termx - width_par_val - 3, 44)
# Get the option description text from the doc string, which
# is defined in opts.py. By convention, print_opts will only
# show the short description; help('option_name') will
# print both the short and long description. The versions
# are separated in the doc string by '\n', which is split
# on here:
desc_text = wrap(str(v.doc()).split('\n')[0], width_desc)
fmt = '%' + str(minwidth) + 's' + infix + '%44s'
# Now loop over lines of description
if indx < len(grouped_opts_list)-1:
# Here we check if next entry in options list is a tuple or a
# list. If it is a list, then the current option has
# suboptions and should be in the ec color. Since we check the
# next option, we can't do this if we let indx go to the end.
if isinstance(grouped_opts_list[indx+1], tuple):
parvalstr = nc + k + nc + ' ..'
else:
parvalstr = ec + k + nc + ' ..'
else:
# Since this is the last entry in the options list and is a
# tuple, it cannot be an expandable option, so make it nc color
parvalstr = nc + k + nc + ' ..'
if "'" in valstr:
len_without_formatting = len(k) + len(str(val)) + 5
else:
len_without_formatting = len(k) + len(str(val)) + 4
for i in range(len_without_formatting, minwidth):
parvalstr += '.'
parvalstr += ' ' + valstr
if "'" not in valstr:
parvalstr += ' '
for dt_indx, dt in enumerate(desc_text):
if dt_indx == 0:
print(fmt % (parvalstr.ljust(minwidth), dt.ljust(44)))
else:
print(nc + spcstr + ' %44s' % dt.ljust(44))
else:
# Print suboptions, indented 2 spaces from main options in sc color
parent_opt = grouped_opts_list[indx-1]
parent_val = img.opts.__getattribute__(parent_opt[0])
if parent_val == True:
for og in o:
k = og[0]
v = og[1]
val = img.opts.__getattribute__(k)
v1 = v2 = ''
if val == v._default:
# value is default
v1 = ncb
v2 = nc
else:
# value is non-default
v1 = dc
v2 = nc
if isinstance(val, str):
valstr = v1 + repr(val) + v2
width_par_val = max(minwidth, len(k) + len(str(val)) + 7)
else:
if isinstance(val, float):
val = round_float(val)
if k == 'beam_spectrum' and val is not None:
val = round_list_of_tuples(val)
if k == 'frequency_sp' and val is not None:
val = round_list(val)
valstr = v1 + str(val) + v2
width_par_val = max(minwidth, len(k) + len(str(val)) + 6)
width_desc = max(termx - width_par_val - 3, 44)
desc_text = wrap(str(v.doc()).split('\n')[0], width_desc)
fmt = ' ' + '%' + str(minwidth) + 's' + infix + '%44s'
parvalstr = sc + k + nc + ' ..'
if "'" in valstr:
len_without_formatting = len(k) + len(str(val)) + 7
else:
len_without_formatting = len(k) + len(str(val)) + 6
for i in range(len_without_formatting, minwidth):
parvalstr += '.'
parvalstr += ' ' + valstr
if "'" not in valstr:
parvalstr += ' '
for dt_indx, dt in enumerate(desc_text):
if dt_indx == 0:
print(fmt % (parvalstr.ljust(minwidth-2), dt.ljust(44)))
else:
print(nc + spcstr + ' %44s' % dt.ljust(44))
def wrap(text, width=80):
"""Wraps text to given width and returns list of lines."""
lines = []
for paragraph in text.split('\n'):
line = []
len_line = 0
for word in paragraph.split(' '):
word.strip()
len_word = len(word)
if len_line + len_word <= width:
line.append(word)
len_line += len_word + 1
else:
lines.append(' '.join(line))
line = [word]
len_line = len_word + 1
lines.append(' '.join(line))
return lines
def checkpars(lines, regex):
"""Checks that parameters are unique"""
import re
result = []
for l in lines:
match = re.match(regex,l)
if match:
result += [l]
return result
def in_ipython():
"""Checks if interpreter is IPython."""
try:
__IPYTHON__
except NameError:
return False
else:
return True
def raw_input_no_history(prompt):
"""Removes user input from readline history."""
import readline
userinput = input(prompt)
if userinput != '':
readline.remove_history_item(readline.get_current_history_length()-1)
return userinput
# The following functions just make the printing of
# parameters look better
def round_tuple(val):
valstr_list = []
for v in val:
vstr = '%s' % (round(v, 5))
if len(vstr) > 7:
vstr = '%.5f' % (v,)
valstr_list.append(vstr)
valstr = '(' + ','.join(valstr_list) + ')'
return valstr
def round_float(val):
vstr = '%s' % (round(val, 5))
if len(vstr) > 7 and val < 1e3:
vstr = '%.5f' % (val,)
elif len(vstr) > 7 and val >= 1e3:
vstr = '%.2e' % (val,)
return vstr
def round_list(val):
valstr_list = []
for v in val:
valstr_list.append('%.2e' % (v,))
valstr = '[' + ','.join(valstr_list) + ']'
return valstr
def round_list_of_tuples(val):
valstr_list = []
valstr_list_tot = []
for l in val:
for v in l:
vstr = '%s' % (round(v, 5))
if len(vstr) > 7:
vstr = '%.5f' % (v,)
valstr_list.append(vstr)
valstr = '(' + ','.join(valstr_list) + ')'
valstr_list_tot.append(valstr)
valstr = '[' + ','.join(valstr_list_tot) + ']'
return valstr
# The following functions give convenient access to the output functions in
# output.py
def export_image(img, outfile=None, img_format='fits', pad_image = False,
img_type='gaus_resid', mask_dilation=0, clobber=False):
"""Write an image to a file. Returns True if successful, False if not.
outfile - name of resulting file; if None, file is
named automatically.
img_type - type of image to export; see below
img_format - format of resulting file: 'fits' or 'casa'
incl_wavelet - include wavelet Gaussians in model
and residual images?
clobber - overwrite existing file?
The following images may be exported:
'ch0' - image used for source detection
'rms' - rms map image
'mean' - mean map image
'pi' - polarized intensity image
'gaus_resid' - Gaussian model residual image
'gaus_model' - Gaussian model image
'shap_resid' - Shapelet model residual image
'shap_model' - Shapelet model image
'psf_major' - PSF major axis FWHM image (FWHM in arcsec)
'psf_minor' - PSF minor axis FWHM image (FWHM in arcsec)
'psf_pa' - PSF position angle image (degrees east of north)
'psf_ratio' - PSF peak-to-total flux ratio (in units of 1/beam)
'psf_ratio_aper' - PSF peak-to-aperture flux ratio (in units of 1/beam)
'island_mask' - Island mask image (0 = outside island, 1 = inside island)
"""
import os
from . import functions as func
from .const import fwsig
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSF."+img.log+"ExportImage")
# First some checking:
if not 'gausfit' in img.completed_Ops and 'gaus' in img_type:
print('\033[91mERROR\033[0m: Gaussians have not been fit. Please run process_image first.')
return False
elif not 'shapelets' in img.completed_Ops and 'shap' in img_type:
print('\033[91mERROR\033[0m: Shapelets have not been fit. Please run process_image first.')
return False
elif not 'polarisation' in img.completed_Ops and 'pi' in img_type:
print('\033[91mERROR\033[0m: Polarization properties have not been calculated. Please run process_image first.')
return False
elif not 'psf_vary' in img.completed_Ops and 'psf' in img_type:
print('\033[91mERROR\033[0m: PSF variations have not been calculated. Please run process_image first.')
return False
elif not 'collapse' in img.completed_Ops and 'ch0' in img_type:
print('\033[91mERROR\033[0m: ch0 image has not been calculated. Please run process_image first.')
return False
elif not 'rmsimage' in img.completed_Ops and ('rms' in img_type or 'mean' in img_type):
print('\033[91mERROR\033[0m: Mean and rms maps have not been calculated. Please run process_image first.')
return False
elif not 'make_residimage' in img.completed_Ops and ('resid' in img_type or 'model' in img_type):
print('\033[91mERROR\033[0m: Residual and model maps have not been calculated. Please run process_image first.')
return False
format = img_format.lower()
if (format in ['fits', 'casa']) == False:
print('\033[91mERROR\033[0m: img_format must be "fits" or "casa"')
return False
filename = outfile
if filename is None or filename == '':
filename = img.imagename + '_' + img_type + '.' + format
if os.path.exists(filename) and clobber == False:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
if format == 'fits':
use_io = 'fits'
if format == 'casa':
use_io = 'rap'
bdir = ''
try:
if img_type == 'ch0':
func.write_image_to_file(use_io, filename,
img.ch0_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'rms':
func.write_image_to_file(use_io, filename,
img.rms_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'mean':
func.write_image_to_file(use_io, filename,
img.mean_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'pi':
func.write_image_to_file(use_io, filename,
img.ch0_pi_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_major':
func.write_image_to_file(use_io, filename,
img.psf_vary_maj_arr*fwsig, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_minor':
func.write_image_to_file(use_io, filename,
img.psf_vary_min_arr*fwsig, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_pa':
func.write_image_to_file(use_io, filename,
img.psf_vary_pa_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_ratio':
func.write_image_to_file(use_io, filename,
img.psf_vary_ratio_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_ratio_aper':
func.write_image_to_file(use_io, filename,
img.psf_vary_ratio_aper_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'gaus_resid':
im = img.resid_gaus_arr
func.write_image_to_file(use_io, filename,
im, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'gaus_model':
im = img.model_gaus_arr
func.write_image_to_file(use_io, filename,
im, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'shap_resid':
func.write_image_to_file(use_io, filename,
img.resid_shap_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'shap_model':
func.write_image_to_file(use_io, filename,
img.model_shap_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'island_mask':
import numpy as N
import scipy.ndimage as nd
island_mask_bool = img.pyrank + 1 > 0
if mask_dilation > 0:
# Dilate the mask by specified number of iterations
island_mask_bool = nd.binary_dilation(island_mask_bool,
iterations=mask_dilation)
# Perform a binary closing to remove small holes/gaps. The
# structure array is chosen to be about the size of the
# beam (assuming a normally sampled psf), so that holes/gaps
# smaller than the beam are removed.
pbeam = int(round(img.beam2pix(img.beam)[0] * 1.5))
island_mask_bool = nd.binary_closing(island_mask_bool,
structure=N.ones((pbeam, pbeam)))
# Check for telescope, needed for CASA clean masks
if img._telescope is None:
print('\033[91mWARNING\033[0m: Telescope is unknown. Mask may not work correctly in CASA.')
island_mask = N.array(island_mask_bool, dtype=N.float32)
func.write_image_to_file(use_io, filename,
island_mask, img, bdir, pad_image,
clobber=clobber, is_mask=True)
else:
print("\n\033[91mERROR\033[0m: img_type not recognized.")
return False
if filename == 'SAMP':
print('--> Image sent to SMAP hub')
else:
print('--> Wrote file ' + repr(filename))
if use_io == 'rap':
# remove the temporary fits file used as a casacore template
import os
os.remove(filename+'.fits')
return True
except RuntimeError as err:
# Catch and log error
mylog.error(str(err))
# Re-throw error if the user is not in the interactive shell
if img._is_interactive_shell:
return False
else:
raise
except KeyboardInterrupt:
mylogger.userinfo(mylog, "\n\033[31;1mAborted\033[0m")
if img._is_interactive_shell:
return False
else:
raise
def write_catalog(img, outfile=None, format='bbs', srcroot=None, catalog_type='gaul',
bbs_patches=None, incl_chan=False, incl_empty=False, clobber=False,
force_output=False, correct_proj=True, bbs_patches_mask=None):
"""Write the Gaussian, source, or shapelet list to a file. Returns True if
successful, False if not.
filename - name of resulting file; if None, file is
named automatically. If 'SAMP', table is sent to a samp hub
(must be running already).
catalog_type - type of catalog
"gaul" - Gaussian list
"srl" - Source list
"shap" - Shapelet list ("fits" format only)
format - format of output list. Supported formats are:
"fits" - FITS binary table
"ascii" - ASCII text file
"bbs" - BBS sky model (Gaussian list only)
"ds9" - ds9 region file
"star" - AIPS STAR file (Gaussian list only)
"kvis" - kvis file (Gaussian list only)
"sagecal" - SAGECAL file (Gaussian list only)
srcroot - root for source and patch names (BBS/ds9 only);
if None, the srcroot is chosen automatically
bbs_patches - type of patches to use:
None - no patches
"gaussian" - each Gaussian gets its own patch
"single" - all Gaussians are put into a single
patch
"source" - sources are grouped by source into patches
"mask" - use a Boolean mask to define the patches
bbs_patches_mask - file name of mask file if bbs_patches="mask"
incl_chan - Include fluxes for each channel?
incl_empty - Include islands without any valid Gaussians (source list only)?
sort_by - Property to sort output list by:
"flux" - sort by total integrated flux, largest first
"indx" - sort by Gaussian and island or source index, smallest first
force_output - Force the creation of a catalog, even if it is empty
correct_proj - Correct source parameters for image projection effects (BBS only)?
clobber - Overwrite existing file?
"""
from . import output
# First some checking:
if not 'gausfit' in img.completed_Ops:
print('\033[91mERROR\033[0m: Image has not been fit. Please run process_image first.')
return False
if catalog_type == 'shap' and not 'shapelets' in img.completed_Ops:
print('\033[91mERROR\033[0m: Image has not been decomposed into shapelets. Please run process_image first.')
return False
if catalog_type == 'srl' and not 'gaul2srl' in img.completed_Ops:
print('\033[91mERROR\033[0m: Gaussians have not been grouped into sources. Please run process_image first.')
return False
format = format.lower()
patch = bbs_patches
filename = outfile
if isinstance(patch, str):
patch = patch.lower()
if format not in ['fits', 'ascii', 'bbs', 'ds9', 'star',
'kvis', 'sagecal', 'csv', 'casabox']:
print('\033[91mERROR\033[0m: format must be "fits", '\
'"ascii", "ds9", "star", "kvis", "csv", "casabox", or "bbs"')
return False
if patch not in [None, 'gaussian', 'single', 'source', 'mask']:
print('\033[91mERROR\033[0m: patch must be None, '\
'"gaussian", "source", "single", or "mask"')
return False
if patch == 'mask':
if bbs_patches_mask is None:
print('\033[91mERROR\033[0m: if patch is "mask", bbs_patches_mask must be set to the file name of the mask file')
return False
if (catalog_type in ['gaul', 'srl', 'shap']) == False:
print('\033[91mERROR\033[0m: catalog_type must be "gaul", '\
'"srl", or "shap"')
return False
if catalog_type == 'shap' and format != 'fits':
print("\033[91mERROR\033[0m: Only format = 'fits' is supported with shapelet output.")
return False
if (len(img.sources) == 0 and not incl_empty) or (len(img.sources) == 0 and len(img.dsources) == 0 and incl_empty):
if not force_output:
print('No sources were found in the image. Output file not written.')
return False
if filename == '':
filename = None
# Now go format by format and call appropriate function
if filename == 'samp' or filename == 'SAMP':
import tempfile
from . import functions as func
import os
if not hasattr(img,'samp_client'):
s, private_key = func.start_samp_proxy()
img.samp_client = s
img.samp_key = private_key
# Broadcast fits table to SAMP Hub
tfile = tempfile.NamedTemporaryFile(delete=False)
filename = output.write_fits_list(img, filename=tfile.name,
incl_chan=incl_chan, incl_empty=incl_empty,
clobber=True, objtype=catalog_type)
table_name = 'PyBDSF '+ catalog_type + ' table'
if catalog_type == 'srl':
img.samp_srl_table_url = 'file://' + os.path.abspath(tfile.name)
if catalog_type == 'gaul':
img.samp_gaul_table_url = 'file://' + os.path.abspath(tfile.name)
func.send_fits_table(img.samp_client, img.samp_key, table_name, tfile.name)
print('--> Table sent to SMAP hub')
return True
if format == 'fits':
filename = output.write_fits_list(img, filename=filename,
incl_chan=incl_chan, incl_empty=incl_empty,
clobber=clobber, objtype=catalog_type)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote FITS file ' + repr(filename))
return True
if format == 'ascii' or format == 'csv':
filename = output.write_ascii_list(img, filename=filename,
incl_chan=incl_chan, incl_empty=incl_empty,
sort_by='index', format = format,
clobber=clobber, objtype=catalog_type)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote ASCII file ' + repr(filename))
return True
if format == 'bbs':
if catalog_type != 'gaul':
print("\033[91mERROR\033[0m: Only catalog_type = 'gaul' is supported with BBS files.")
return False
filename = output.write_bbs_gaul(img, filename=filename,
srcroot=srcroot, incl_empty=incl_empty,
patch=patch, correct_proj=correct_proj,
sort_by='flux',
clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote BBS sky model ' + repr(filename))
return True
if format == 'sagecal':
if catalog_type != 'gaul':
print("\033[91mERROR\033[0m: Only catalog_type = 'gaul' is supported with Sagecal files.")
return False
filename = output.write_lsm_gaul(img, filename=filename,
srcroot=srcroot, incl_empty=incl_empty,
patch=patch,
sort_by='flux',
clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote Sagecal lsm file ' + repr(filename))
return True
if format == 'ds9':
filename = output.write_ds9_list(img, filename=filename,
srcroot=srcroot, incl_empty=incl_empty,
clobber=clobber, objtype=catalog_type)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote ds9 region file ' + repr(filename))
return True
if format == 'star':
if catalog_type != 'gaul':
print("\033[91mERROR\033[0m: Only catalog_type = 'gaul' is supported with star files.")
return False
filename = output.write_star(img, filename=filename,
clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote AIPS STAR file ' + repr(filename))
return True
if format == 'kvis':
if catalog_type != 'gaul':
print("\033[91mERROR\033[0m: Only catalog_type = 'gaul' is supported with kvis files.")
return False
filename = output.write_kvis_ann(img, filename=filename,
clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber=False.')
return False
else:
print('--> Wrote kvis file ' + repr(filename))
return True
if format == 'casabox':
filename = output.write_casa_gaul(img, filename=filename,
incl_empty=incl_empty, clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber=False.')
else:
print('--> Wrote CASA clean box file ' + filename)
def add_break_to_logfile(logfile):
f = open(logfile, 'a')
f.write('\n' + '='*72 + '\n')
f.close()
| 47,513 | 40.244792 | 125 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/make_residimage.py
|
"""Module make_residimage.
It calculates residual image from the list of gaussians and shapelets
"""
from __future__ import absolute_import
import numpy as N
from scipy import stats # for skew and kurtosis
from .image import *
from .shapelets import *
from . import mylogger
class Op_make_residimage(Op):
"""Creates an image from the fitted gaussians
or shapelets.
The resulting model image is stored in the
resid_gaus or resid_shap attribute.
Prerequisites: module gausfit or shapelets should
be run first.
"""
def __call__(self, img):
from . import functions as func
from copy import deepcopy as cp
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"ResidImage")
mylog.info("Calculating residual image after subtracting reconstructed gaussians")
shape = img.ch0_arr.shape
thresh= img.opts.fittedimage_clip
resid_gaus = cp(img.ch0_arr)
model_gaus = N.zeros(shape, dtype=N.float32)
for g in img.gaussians:
C1, C2 = g.centre_pix
if hasattr(g, 'wisland_id') and img.waveletimage:
isl = img.islands[g.wisland_id]
else:
isl = img.islands[g.island_id]
b = self.find_bbox(thresh*isl.rms, g)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
resid_gaus[bbox] = resid_gaus[bbox] - ffimg
model_gaus[bbox] = model_gaus[bbox] + ffimg
# Apply mask to model and resid images
if hasattr(img, 'rms_mask'):
mask = img.rms_mask
else:
mask = img.mask_arr
if isinstance(img.mask_arr, N.ndarray):
pix_masked = N.where(img.mask_arr == True)
model_gaus[pix_masked] = N.nan
resid_gaus[pix_masked] = N.nan
img.model_gaus_arr = model_gaus
img.resid_gaus_arr = resid_gaus
if img.opts.output_all:
if img.waveletimage:
resdir = img.basedir + '/wavelet/residual/'
moddir = img.basedir + '/wavelet/model/'
else:
resdir = img.basedir + '/residual/'
moddir = img.basedir + '/model/'
if not os.path.exists(resdir): os.makedirs(resdir)
if not os.path.exists(moddir): os.makedirs(moddir)
func.write_image_to_file(img.use_io, img.imagename + '.resid_gaus.fits', resid_gaus, img, resdir)
mylog.info('%s %s' % ('Writing', resdir+img.imagename+'.resid_gaus.fits'))
func.write_image_to_file(img.use_io, img.imagename + '.model.fits', (img.ch0_arr - resid_gaus), img, moddir)
mylog.info('%s %s' % ('Writing', moddir+img.imagename+'.model_gaus.fits'))
### residual rms and mean per island
for isl in img.islands:
resid = resid_gaus[tuple(isl.bbox)]
self.calc_resid_mean_rms(isl, resid, type='gaus')
# Calculate some statistics for the Gaussian residual image
non_masked = N.where(~N.isnan(img.ch0_arr))
mean = N.mean(resid_gaus[non_masked], axis=None)
std_dev = N.std(resid_gaus[non_masked], axis=None)
skew = stats.skew(resid_gaus[non_masked], axis=None)
kurt = stats.kurtosis(resid_gaus[non_masked], axis=None)
stat_msg = "Statistics of the Gaussian residual image:\n"
stat_msg += " mean: %.3e (Jy/beam)\n" % mean
stat_msg += " std. dev: %.3e (Jy/beam)\n" % std_dev
stat_msg += " skew: %.3f\n" % skew
stat_msg += " kurtosis: %.3f" % kurt
mylog.info(stat_msg)
# Now residual image for shapelets
if img.opts.shapelet_do:
mylog.info("Calculating residual image after subtracting reconstructed shapelets")
shape = img.ch0_arr.shape
fimg = N.zeros(shape, dtype=N.float32)
for isl in img.islands:
if hasattr(isl, 'shapelet_beta'):
if isl.shapelet_beta > 0: # make sure shapelet has nonzero scale for this island
mask=isl.mask_active
cen=isl.shapelet_centre-N.array(isl.origin)
basis, beta, nmax, cf = isl.shapelet_basis, isl.shapelet_beta, \
isl.shapelet_nmax, isl.shapelet_cf
image_recons=reconstruct_shapelets(isl.shape, mask, basis, beta, cen, nmax, cf)
fimg[tuple(isl.bbox)] += image_recons
model_shap = fimg
resid_shap = img.ch0_arr - fimg
if img.opts.shapelet_gresid:
# also subtract Gaussian model image
shape = img.ch0_arr.shape
thresh= img.opts.fittedimage_clip
model_gaus = N.zeros(shape, dtype=N.float32)
for isl in img.islands:
for g in isl.gaul:
C1, C2 = g.centre_pix
b = self.find_bbox(thresh*isl.rms, g)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
model_gaus[bbox] = model_gaus[bbox] + ffimg
resid_shap -= model_gaus
# Apply mask to model and resid images
if hasattr(img, 'rms_mask'):
mask = img.rms_mask
else:
mask = img.mask_arr
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
model_shap[pix_masked] = N.nan
resid_shap[pix_masked] = N.nan
img.model_shap_arr = model_shap
img.resid_shap_arr = resid_shap
if img.opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '.resid_shap.fits', resid_shap, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.resid_shap.fits'))
### shapelet residual rms and mean per island
for isl in img.islands:
resid = resid_shap[tuple(isl.bbox)]
self.calc_resid_mean_rms(isl, resid, type='shap')
# Calculate some statistics for the Shapelet residual image
non_masked = N.where(~N.isnan(img.ch0_arr))
mean = N.mean(resid_shap[non_masked], axis=None)
std_dev = N.std(resid_shap[non_masked], axis=None)
skew = stats.skew(resid_shap[non_masked], axis=None)
kurt = stats.kurtosis(resid_shap[non_masked], axis=None)
mylog.info("Statistics of the Shapelet residual image:")
mylog.info(" mean: %.3e (Jy/beam)" % mean)
mylog.info(" std. dev: %.3e (Jy/beam)" % std_dev)
mylog.info(" skew: %.3f" % skew)
mylog.info(" kurtosis: %.3f" % kurt)
img.completed_Ops.append('make_residimage')
return img
def find_bbox(self, thresh, g):
"""Calculate bounding box for gaussian.
This function calculates size of the box for evaluating
gaussian, so that value of gaussian is smaller than threshold
outside of the box.
Parameters:
thres: threshold
g: Gaussian object
"""
from math import ceil, sqrt, log
A = g.peak_flux
S = g.size_pix[0]
if A == 0.0:
return ceil(S*1.5)
if thresh/A >= 1.0 or thresh/A <= 0.0:
return ceil(S*1.5)
return ceil(S*sqrt(-2*log(thresh/A)))
def calc_resid_mean_rms(self, isl, resid, type):
"""Inserts mean and rms of residual image into isl, src, and gaussians
type - specifies 'gaus' or 'shap'
"""
if len(isl.gaul) == 0:
resid = N.zeros(isl.shape, dtype=N.float32)
ind = N.where(~isl.mask_active)
resid = resid[ind]
if type == 'gaus':
isl.gresid_rms = N.std(resid)
isl.gresid_mean = N.mean(resid)
else:
isl.sresid_rms = N.std(resid)
isl.sresid_mean = N.mean(resid)
if hasattr(isl, 'sources'):
for src in isl.sources:
if type == 'gaus':
src.gresid_rms = N.std(resid)
src.gresid_mean = N.mean(resid)
else:
src.sresid_rms = N.std(resid)
src.sresid_mean = N.mean(resid)
for g in src.gaussians:
if type == 'gaus':
g.gresid_rms = N.std(resid)
g.gresid_mean = N.mean(resid)
else:
g.sresid_rms = N.std(resid)
g.sresid_mean = N.mean(resid)
if hasattr(isl, 'dsources'):
for dsrc in isl.dsources: # Handle dummy sources (if any)
if type == 'gaus':
dsrc.gresid_rms = N.std(resid)
dsrc.gresid_mean = N.mean(resid)
else:
dsrc.sresid_rms = N.std(resid)
dsrc.sresid_mean = N.mean(resid)
| 9,400 | 40.052402 | 120 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/shapefit.py
|
"""Module shapelets
This will do all the shapelet analysis of islands in an image
"""
from __future__ import absolute_import
from .image import *
from .islands import *
from .shapelets import *
from . import mylogger
from . import statusbar
from . import multi_proc as mp
import itertools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from . import functions as func
from .gausfit import find_bbox
class Op_shapelets(Op):
""" Get the image and mask from each island and send it to
shapelet programs which can then also be called seperately """
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Shapefit")
bar = statusbar.StatusBar('Decomposing islands into shapelets ...... : ', 0, img.nisl)
opts = img.opts
if img.opts.shapelet_do:
if not opts.quiet:
bar.start()
# Set up multiproccessing. First create a simple copy of the Image
# object that contains the minimal data needed.
opts_dict = opts.to_dict()
img_simple = Image(opts_dict)
img_simple.pixel_beamarea = img.pixel_beamarea
img_simple.pixel_beam = img.pixel_beam
img_simple.thresh_pix = img.thresh_pix
img_simple.minpix_isl = img.minpix_isl
img_simple.clipped_mean = img.clipped_mean
img_simple.shape = img.ch0_arr.shape
# Now call the parallel mapping function. Returns a list of
# [beta, centre, nmax, basis, cf] for each island
shap_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_island),
img.islands, itertools.repeat(img_simple),
itertools.repeat(opts)), numcores=opts.ncores,
bar=bar)
for id, isl in enumerate(img.islands):
beta, centre, nmax, basis, cf = shap_list[id]
isl.shapelet_beta=beta
isl.shapelet_centre=centre
isl.shapelet_posn_sky=img.pix2sky(centre)
isl.shapelet_posn_skyE=[0.0, 0.0, 0.0]
isl.shapelet_nmax=nmax
isl.shapelet_basis=basis
isl.shapelet_cf=cf
img.completed_Ops.append('shapelets')
def process_island(self, isl, img, opts=None):
"""Processes a single island.
Returns shapelet parameters.
"""
if opts is None:
opts = img.opts
if opts.shapelet_gresid:
shape = img.shape
thresh= opts.fittedimage_clip
model_gaus = N.zeros(shape, dtype=N.float32)
for g in isl.gaul:
C1, C2 = g.centre_pix
b = find_bbox(thresh*isl.rms, g)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
model_gaus[bbox] = model_gaus[bbox] + ffimg
arr = isl.image - isl.islmean - model_gaus[tuple(isl.bbox)]
else:
arr = isl.image - isl.islmean
mask = isl.mask_active
basis = opts.shapelet_basis
beam_pix = img.pixel_beam()
mode = opts.shapelet_fitmode
if mode != 'fit':
mode = ''
fixed = (0,0,0)
(beta, centre, nmax) = self.get_shapelet_params(arr, mask, basis, beam_pix, fixed, N.array(isl.origin), mode)
cf = decompose_shapelets(arr, mask, basis, beta, centre, nmax, mode)
return [beta, tuple(N.array(centre) + N.array(isl.origin)), nmax, basis, cf]
def get_shapelet_params(self, image, mask, basis, beam_pix, fixed, ori, mode, beta=None, cen=None, nmax=None):
""" This takes as input an image, its mask (false=valid), basis="cartesian"/"polar",
fixed=(i,j,k) where i,j,k =0/1 to calculate or take as fixed for (beta, centre, nmax),
beam_pix has the beam in (pix_fwhm, pix_fwhm, deg),
beta (the scale), cen (centre of basis expansion), nmax (max order). The output
is an updated set of values of (beta, centre, nmax). If fixed is 1 and the value is not
specified as an argument, then fixed is taken as 0."""
from math import sqrt, log, floor
from . import functions as func
import numpy as N
if fixed[0]==1 and beta is None: fixed[0]=0
if fixed[1]==1 and cen is None: fixed[1]=0
if fixed[2]==1 and nmax is None: fixed[2]=0
if fixed[0]*fixed[1]==0:
(m1, m2, m3)=func.moment(image, mask)
if fixed[0]==0:
try:
beta = sqrt(m3[0]*m3[1])*2.0
except ValueError:
beta = 0.5
if beta == 0.0:
beta = 0.5
if fixed[1]==0:
cen=m2
if fixed[2]==0:
(n, m)=image.shape
nmax=int(round(sqrt(1.0*n*n+m*m)/beam_pix[1]))-1
nmax=min(max(nmax*2+2,10),10) # totally ad hoc
npix = N.product(image.shape)-N.sum(mask)
if nmax*nmax >= n*m : nmax = int(floor(sqrt(npix-1))) # -1 is for when n*m is a perfect square
if mode == 'fit': # make sure npara <= npix
nmax_max = int(round(0.5*(-3+sqrt(1+8*npix))))
nmax=min(nmax, nmax_max)
betarange=[0.5,sqrt(beta*max(n,m))] # min, max
if fixed[1]==0:
cen=shape_findcen(image, mask, basis, beta, nmax, beam_pix) # + check_cen_shapelet
#print 'First Centre = ',cen,N.array(cen)+ori
from time import time
t1 = time()
if fixed[0]==0:
beta, err=shape_varybeta(image, mask, basis, beta, cen, nmax, betarange, plot=False)
t2 = time()
#print 'TIME ',t2-t1, '\n'
#print 'Final Beta = ',beta, err
if fixed[1]==0 and fixed[0]==0:
cen=shape_findcen(image, mask, basis, beta, nmax, beam_pix) # + check_cen_shapelet
#print 'Final Cen = ',N.array(cen)+ori
return beta, cen, nmax
| 6,326 | 38.792453 | 117 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/output.py
|
"""Module output.
Defines functions that write the results of source detection in a
variety of formats. These are then used as methods of Image objects
and/or are called by the outlist operation if output_all is True.
"""
from __future__ import print_function
from __future__ import absolute_import
from .image import Op
class Op_outlist(Op):
"""Write out list of Gaussians
All available output lists are generated atm.
"""
def __call__(self, img):
if img.opts.output_all:
import os
if len(img.gaussians) > 0:
dir = img.basedir + '/catalogues/'
if not os.path.exists(dir):
os.makedirs(dir)
self.write_bbs(img, dir)
self.write_lsm(img, dir)
self.write_gaul(img, dir)
self.write_srl(img, dir)
self.write_aips(img, dir)
self.write_kvis(img, dir)
self.write_ds9(img, dir, objtype='gaul')
self.write_ds9(img, dir, objtype='srl')
self.write_gaul_FITS(img, dir)
self.write_srl_FITS(img, dir)
if not os.path.exists(img.basedir + '/misc/'):
os.makedirs(img.basedir + '/misc/')
self.write_opts(img, img.basedir + '/misc/')
self.save_opts(img, img.basedir + '/misc/')
img.completed_Ops.append('outlist')
def write_bbs(self, img, dir):
""" Writes the gaussian list as a bbs-readable file"""
if 'bbsname' in img.extraparams:
name = img.extraparams['bbsname']
else:
name = img.imagename
fname = dir + name + '.sky_in'
# Write Gaussian list
write_bbs_gaul(img, filename=fname, srcroot=img.opts.srcroot,
patch=img.opts.bbs_patches, sort_by='flux',
clobber=True, incl_empty=img.opts.incl_empty,
correct_proj=img.opts.correct_proj)
def write_lsm(self, img, dir):
""" Writes the gaussian list as an SAGECAL file"""
fname = dir + img.imagename + '.lsm'
write_lsm_gaul(img, filename=fname, sort_by='indx',
clobber=True,
incl_empty=img.opts.incl_empty)
def write_gaul(self, img, dir):
""" Writes the gaussian list as an ASCII file"""
fname = dir + img.imagename + '.gaul'
write_ascii_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='gaul',
incl_empty=img.opts.incl_empty)
def write_srl(self, img, dir):
""" Writes the source list as an ASCII file"""
fname = dir + img.imagename + '.srl'
write_ascii_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='srl',
incl_empty=img.opts.incl_empty)
def write_aips(self, img, dir):
""" Writes the gaussian list an AIPS STAR file"""
fname = dir + img.imagename + '.star'
write_star(img, filename=fname, sort_by='indx',
clobber=True)
def write_kvis(self, img, dir):
""" Writes the gaussian list as a kvis file"""
fname = dir + img.imagename + '.kvis.ann'
write_kvis_ann(img, filename=fname, sort_by='indx',
clobber=True)
def write_ds9(self, img, dir, objtype='gaul'):
""" Writes the gaussian list as a ds9 region file"""
fname = dir + img.imagename + '.' + objtype + '.ds9.reg'
write_ds9_list(img, filename=fname, srcroot=img.opts.srcroot,
clobber=True, deconvolve=False, objtype=objtype,
incl_empty=img.opts.incl_empty,)
def write_gaul_FITS(self, img, dir):
""" Writes the gaussian list as FITS binary table"""
fname = dir + img.imagename+'.gaul.FITS'
write_fits_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='gaul',
incl_empty=img.opts.incl_empty,)
def write_srl_FITS(self, img, dir):
""" Writes the source list as FITS binary table"""
fname = dir + img.imagename+'.srl.FITS'
write_fits_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='srl',
incl_empty=img.opts.incl_empty, incl_chan=img.opts.incl_chan)
def write_shap_FITS(self, img, dir):
""" Writes the shapelet list as a FITS file"""
fname = dir + img.imagename + '.shap.FITS'
write_fits_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='shap')
def write_opts(self, img, dir):
""" Writes input parameters to a text file."""
import inspect
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
fname = 'parameters_used'
f = open(dir+fname, 'w')
mylog.info('Writing '+dir+fname)
for attr in inspect.getmembers(img.opts):
if attr[0][0] != '_':
if isinstance(attr[1], (int, str, bool, float, type(None), tuple, list)):
f.write('%-40s' % attr[0])
f.write(repr(attr[1])+'\n')
# Also print the values derived internally. They are all stored
# in img with the same name (e.g., img.opts.beam --> img.beam)
if hasattr(img, attr[0]):
used = img.__getattribute__(attr[0])
if used != attr[1] and isinstance(used, (int, str, bool, float,
type(None), tuple,
list)):
f.write('%-40s' % ' Value used')
f.write(repr(used)+'\n')
f.close()
def save_opts(self, img, dir):
""" Saves input parameters to a PyBDSM save file."""
from . import interface
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
fname = 'parameters.sav'
mylog.info('Writing '+dir+fname)
interface.save_pars(img, dir+fname, quiet=True)
def ra2hhmmss(deg):
"""Convert RA coordinate (in degrees) to HH MM SS"""
from math import modf
if deg < 0:
deg += 360.0
x, hh = modf(deg/15.)
x, mm = modf(x*60)
ss = x*60
return (int(hh), int(mm), ss)
def dec2ddmmss(deg):
"""Convert DEC coordinate (in degrees) to DD MM SS"""
from math import modf
sign = (-1 if deg < 0 else 1)
x, dd = modf(abs(deg))
x, ma = modf(x*60)
sa = x*60
return (int(dd), int(ma), sa, sign)
def B1950toJ2000(Bcoord):
""" Precess using Aoki et al. 1983. Same results as NED to ~0.2asec """
from math import sin, cos, pi, sqrt, asin, acos
import numpy as N
rad = 180.0/pi
ra, dec = Bcoord
A = N.array([-1.62557e-6, -0.31919e-6, -0.13843e-6])
M = N.array([[0.9999256782, 0.0111820609, 0.00485794], [-0.0111820610, 0.9999374784, -0.0000271474],
[-0.0048579477, -0.0000271765, 0.9999881997]])
r0 = N.zeros(3)
r0[0] = cos(dec/rad) * cos(ra/rad)
r0[1] = cos(dec/rad) * sin(ra/rad)
r0[2] = sin(dec/rad)
r0A = N.sum(r0*A)
r1 = r0 - A + r0A*r0
r = N.sum(M.transpose()*r1, axis=1)
rscal = sqrt(N.sum(r*r))
decj = asin(r[2]/rscal)*rad
d1 = r[0] / rscal / cos(decj/rad)
d2 = r[1] / rscal / cos(decj/rad)
raj = acos(d1)*rad
if d2 < 0.0:
raj = 360.0 - raj
Jcoord = [raj, decj]
return Jcoord
def write_bbs_gaul(img, filename=None, srcroot=None, patch=None,
incl_primary=True, sort_by='flux',
clobber=False, incl_empty=False, correct_proj=True):
"""Writes Gaussian list to a BBS sky model"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM.write_gaul")
if int(img.equinox) != 2000 and int(img.equinox) != 1950:
mylog.warning('Equinox of input image is not J2000 or B1950. '
'Sky model may not be appropriate for BBS.')
if int(img.equinox) == 1950:
mylog.warning('Equinox of input image is B1950. Coordinates '
'will be precessed to J2000.')
outl, outn, patl = list_and_sort_gaussians(img, patch=patch,
root=srcroot, sort_by=sort_by)
outstr_list = make_bbs_str(img, outl, outn, patl, incl_empty=incl_empty,
correct_proj=correct_proj)
if filename is None:
filename = img.imagename + '.sky_in'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, 'w')
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_lsm_gaul(img, filename=None, srcroot=None, patch=None,
incl_primary=True, sort_by='flux',
clobber=False, incl_empty=False):
"""Writes Gaussian list to a SAGECAL lsm sky model"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM.write_gaul")
if int(img.equinox) != 2000 and int(img.equinox) != 1950:
mylog.warning('Equinox of input image is not J2000 or B1950. '
'Sky model may not be appropriate for Sagecal.')
if int(img.equinox) == 1950:
mylog.warning('Equinox of input image is B1950. Coordinates '
'will be precessed to J2000.')
outl, outn, patl = list_and_sort_gaussians(img, patch=patch,
root=srcroot, sort_by=sort_by)
outstr_list = make_lsm_str(img, outl, outn, incl_empty=incl_empty)
if filename is None:
filename = img.imagename + '.lsm'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, 'w')
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_ds9_list(img, filename=None, srcroot=None, deconvolve=False,
clobber=False, incl_empty=False, objtype='gaul'):
"""Writes Gaussian list to a ds9 region file"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if objtype == 'gaul':
outl, outn, patl = list_and_sort_gaussians(img, patch=None)
elif objtype == 'srl':
root = img.parentname
outl = [img.sources]
if incl_empty:
# Append the dummy sources for islands without any unflagged Gaussians
outl[0] += img.dsources
outn = []
for src in img.sources:
outn.append(root + '_i' + str(src.island_id) + '_s' +
str(src.source_id))
if incl_empty:
# Append the dummy sources for islands without any unflagged Gaussians
for dsrc in img.dsources:
outn.append(root + '_i' + str(dsrc.island_id) + '_s' +
str(dsrc.source_id))
outn = [outn]
outstr_list = make_ds9_str(img, outl, outn, deconvolve=deconvolve,
objtype=objtype, incl_empty=incl_empty)
if filename is None:
filename = img.imagename + '.' + objtype + '.reg'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, "w")
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_ascii_list(img, filename=None, sort_by='indx', format='ascii',
incl_chan=False, incl_empty=False, clobber=False, objtype='gaul'):
"""Writes Gaussian list to an ASCII file"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if objtype == 'gaul':
outl, outn, patl = list_and_sort_gaussians(img, patch=None, sort_by=sort_by)
elif objtype == 'srl':
outl = [img.sources]
if incl_empty:
# Append the dummy sources for islands without any unflagged Gaussians
outl[0] += img.dsources
outstr_list = make_ascii_str(img, outl, objtype=objtype, incl_chan=incl_chan,
incl_empty=incl_empty, format=format)
if filename is None:
if objtype == 'gaul':
filename = img.imagename + '.gaul'
elif objtype == 'srl':
filename = img.imagename + '.srl'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, "w")
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_casa_gaul(img, filename=None, incl_empty=False, clobber=False):
"""Writes a clean box file for use in casapy"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
outl, outn, patl = list_and_sort_gaussians(img, patch=None)
outstr_list = make_casa_str(img, outl)
if filename is None:
filename = img.imagename + '.box'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, "w")
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_fits_list(img, filename=None, sort_by='index', objtype='gaul',
incl_chan=False, incl_empty=False, clobber=False):
""" Write as FITS binary table.
"""
from . import mylogger
from distutils.version import StrictVersion
try:
from astropy.io import fits as pyfits
use_header_update = False
use_from_columns = True
except ImportError:
import pyfits
if StrictVersion(pyfits.__version__) < StrictVersion('3.1'):
use_header_update = True
use_from_columns = False
else:
use_header_update = False
use_from_columns = True
import os
import numpy as N
from ._version import __version__
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if objtype == 'gaul':
outl, outn, patl = list_and_sort_gaussians(img, patch=None, sort_by=sort_by)
elif objtype == 'srl':
outl = [img.sources]
if incl_empty:
# Append the dummy sources for islands without any unflagged Gaussians
outl[0] += img.dsources
elif objtype == 'shap':
outl = [[isl for isl in img.islands if hasattr(isl, 'shapelet_nmax')]]
nmax = 0
if objtype == 'shap':
# loop over shapelets and get maximum size of coefficient matrix
for isl in outl[0]:
if hasattr(isl, 'shapelet_nmax'):
if isl.shapelet_nmax > nmax:
nmax = isl.shapelet_nmax
nmax += 1
if img.opts.aperture is not None:
incl_aper = True
else:
incl_aper = False
if len(outl[0]) > 0:
cvals, cnames, cformats, cunits = make_output_columns(outl[0][0], fits=True,
objtype=objtype,
incl_spin=img.opts.spectralindex_do,
incl_chan=incl_chan,
incl_pol=img.opts.polarisation_do,
incl_aper=incl_aper,
incl_empty=incl_empty,
nmax=nmax, nchan=img.nchan)
out_list = make_fits_list(img, outl, objtype=objtype, nmax=nmax, incl_empty=incl_empty,incl_chan=incl_chan)
col_list = []
for ind, col in enumerate(out_list):
list1 = pyfits.Column(name=cnames[ind], format=cformats[ind],
unit=cunits[ind], array=N.array(out_list[ind]))
col_list.append(list1)
if len(col_list) == 0:
col_list = [pyfits.Column(name='Blank', format='1J')]
if use_from_columns:
tbhdu = pyfits.BinTableHDU.from_columns(col_list)
else:
tbhdu = pyfits.new_table(col_list)
if objtype == 'gaul':
tbhdu.header.add_comment('Gaussian list for '+img.filename)
elif objtype == 'srl':
tbhdu.header.add_comment('Source list for '+img.filename)
elif objtype == 'shap':
tbhdu.header.add_comment('Shapelet list for '+img.filename)
tbhdu.header.add_comment('Generated by PyBDSM version %s'
% (__version__, ))
freq = "%.5e" % img.frequency
tbhdu.header.add_comment('Reference frequency of the detection ("ch0") image: %s Hz' % freq)
tbhdu.header.add_comment('Equinox : %s' % img.equinox)
if use_header_update:
tbhdu.header.update('INIMAGE', img.filename, 'Filename of image')
tbhdu.header.update('FREQ0', float(freq), 'Reference frequency')
tbhdu.header.update('EQUINOX', img.equinox, 'Equinox')
else:
tbhdu.header['INIMAGE'] = (img.filename, 'Filename of image')
tbhdu.header['FREQ0'] = (float(freq), 'Reference frequency')
tbhdu.header['EQUINOX'] = (img.equinox, 'Equinox')
for key in img.header.keys():
if key in ['HISTORY', 'COMMENT', '']:
continue
tbhdu.header.add_comment('%s = %s' % (key, repr(img.header[key])))
if filename is None:
filename = img.imagename + '.' + objtype + '.fits'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
try:
tbhdu.writeto(filename, overwrite=True)
except TypeError:
# The "overwrite" argument was added in astropy v1.3, so fall back to "clobber"
# if it doesn't work
tbhdu.writeto(filename, clobber=True)
return filename
def write_kvis_ann(img, filename=None, sort_by='indx',
clobber=False):
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if filename is None:
filename = img.imagename + '.kvis.ann'
if os.path.exists(filename) and not clobber:
return None
f = open(filename, 'w')
mylog.info('Writing '+filename)
f.write("### KVis annotation file\n\n")
f.write("color green\n\n")
outl, outn, patl = list_and_sort_gaussians(img, patch=None, sort_by=sort_by)
for g in outl[0]:
iidx = g.island_id
# kvis does not correct for postion-dependent angle or pixel scale
# for region files, so we must use the uncorrected values
ra, dec = g.centre_sky
shape = g.size_sky_uncorr
str = 'text %10.5f %10.5f %d\n' % \
(ra, dec, iidx)
f.write(str)
str = 'ellipse %10.5f %10.5f %10.7f %10.7f %10.4f\n' % \
(ra, dec, shape[0], shape[1], shape[2])
f.write(str)
f.close()
return filename
def write_star(img, filename=None, sort_by='indx',
clobber=False):
from .output import ra2hhmmss, dec2ddmmss
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if filename is None:
filename = img.imagename + '.star'
if os.path.exists(filename) and not clobber:
return None
f = open(filename, 'w')
mylog.info('Writing '+filename)
outl, outn, patl = list_and_sort_gaussians(img, patch=None, sort_by=sort_by)
for g in outl[0]:
A = g.peak_flux
ra, dec = g.centre_sky
shape = g.size_sky_uncorr
# convert to canonical representation
ra = ra2hhmmss(ra)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
str = '%2i %2i %6.3f ' \
'%c%2i %2i %6.3f ' \
'%9.4f %9.4f %7.2f ' \
'%2i %13.7f %10s\n' % \
(ra[0], ra[1], ra[2],
decsign, dec[0], dec[1], dec[2],
shape[0]*3600, shape[1]*3600, shape[2],
4, A, '')
f.write(str)
f.close()
return filename
def make_bbs_str(img, glist, gnames, patchnames, objtype='gaul',
incl_empty=False, correct_proj=True):
"""Makes a list of string entries for a BBS sky model."""
from .output import ra2hhmmss
from .output import dec2ddmmss
import numpy as N
outstr_list = []
freq = "%.5e" % img.frequency
if len(patchnames) == 0:
# Handle empty list: just write default header
outstr_list.append("format = Name, Type, Ra, Dec, I, Q, U, V, "
"MajorAxis, MinorAxis, Orientation, "
"ReferenceFrequency='"+freq+"', "
"SpectralIndex='[]'\n\n")
elif patchnames[0] is None:
outstr_list.append("format = Name, Type, Ra, Dec, I, Q, U, V, "
"MajorAxis, MinorAxis, Orientation, "
"ReferenceFrequency='"+freq+"', "
"SpectralIndex='[]'\n\n")
else:
outstr_list.append("format = Name, Type, Patch, Ra, Dec, I, Q, U, V, "
"MajorAxis, MinorAxis, Orientation, "
"ReferenceFrequency='"+freq+"', "
"SpectralIndex='[]'\n\n")
if objtype == 'shap':
raise RuntimeError("Shapelets not yet supported in the BBS format.")
else:
patchname_last = ''
for pindx, patch_name in enumerate(patchnames): # loop over patches
if patch_name is not None and patch_name != patchname_last:
outstr_list.append(', , ' + patch_name + ', 00:00:00, +00.00.00\n')
patchname_last = patch_name
gaussians_in_patch = glist[pindx]
names_in_patch = gnames[pindx]
for gindx, g in enumerate(gaussians_in_patch):
if g.gaus_num >= 0 or (g.gaus_num < 0 and incl_empty):
src_name = names_in_patch[gindx]
ra, dec = g.centre_sky
if img.equinox == 1950:
ra, dec = B1950toJ2000([ra, dec])
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.6f" % (ra[2])).zfill(6)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+'.'+str(dec[1]).zfill(2)+'.'+str("%.6f" % (dec[2])).zfill(6)
total = str("%.3e" % (g.total_flux))
if correct_proj:
deconv = list(g.deconv_size_sky)
else:
deconv = list(g.deconv_size_sky_uncorr)
if deconv[0] == 0.0 and deconv[1] == 0.0:
stype = 'POINT'
deconv[2] = 0.0
else:
stype = 'GAUSSIAN'
deconv1 = str("%.5e" % (deconv[0]*3600.0))
deconv2 = str("%.5e" % (deconv[1]*3600.0))
deconv3 = str("%.5e" % (deconv[2]))
deconvstr = deconv1 + ', ' + deconv2 + ', ' + deconv3
specin = '-0.8'
if 'spectralindex' in img.completed_Ops:
if g.spec_indx is not None and N.isfinite(g.spec_indx):
specin = str("%.3e" % (g.spec_indx))
sep = ', '
if img.opts.polarisation_do:
Q_flux = str("%.3e" % (g.total_flux_Q))
U_flux = str("%.3e" % (g.total_flux_U))
V_flux = str("%.3e" % (g.total_flux_V))
else:
Q_flux = '0.0'
U_flux = '0.0'
V_flux = '0.0'
if patch_name is None:
outstr_list.append(src_name + sep + stype + sep + sra + sep +
sdec + sep + total + sep + Q_flux + sep +
U_flux + sep + V_flux + sep +
deconvstr + sep + freq + sep +
'[' + specin + ']\n')
else:
outstr_list.append(src_name + sep + stype + sep + patch_name +
sep + sra + sep + sdec + sep + total + sep +
Q_flux + sep + U_flux + sep + V_flux + sep +
deconvstr + sep + freq + sep +
'[' + specin + ']\n')
else:
outstr_list.pop()
return outstr_list
def make_lsm_str(img, glist, gnames, incl_empty=False):
"""Makes a list of string entries for a SAGECAL sky model."""
from .output import ra2hhmmss
from .output import dec2ddmmss
import numpy as N
from ._version import __version__
outstr_list = ["# SAGECAL sky model\n"]
freq = "%.5e" % img.frequency
outstr_list.append('# Generated by PyBDSM version %s\n'
% (__version__, ))
outstr_list.append("# Name | RA (hr,min,sec) | DEC (deg,min,sec) | I | Q | U | V | SI | RM | eX | eY | eP | freq0\n\n")
for gindx, g in enumerate(glist[0]):
if g.gaus_num >= 0 or (g.gaus_num < 0 and incl_empty):
src_name = gnames[0][gindx]
ra, dec = g.centre_sky
if img.equinox == 1950:
ra, dec = B1950toJ2000([ra, dec])
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+' '+str(ra[1]).zfill(2)+' '+str("%.6f" % (ra[2])).zfill(6)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+' '+str(dec[1]).zfill(2)+' '+str("%.6f" % (dec[2])).zfill(6)
total = str("%.3e" % (g.total_flux))
deconv = list(g.deconv_size_sky)
if deconv[0] == 0.0 and deconv[1] == 0.0:
sname = 'P' + src_name
deconv[2] = 0.0
else:
sname = 'G' + src_name
# Make sure Gaussian is not 1-D, as SAGECAL cannot handle these
if deconv[0] < 1e-5:
deconv[0] = 1e-5
if deconv[1] < 1e-5:
deconv[1] = 1e-5
# The following conversions taken from the SABECAL script "convert_skymodel.py"
deconv1 = str("%.5e" % (deconv[0]*N.pi/180.0/2.0))
deconv2 = str("%.5e" % (deconv[1]*N.pi/180.0/2.0))
deconv3 = str("%.5e" % (N.pi/2-(N.pi-deconv[2]/180.0*N.pi)))
deconvstr = deconv1 + ' ' + deconv2 + ' ' + deconv3
specin = '-0.8'
if 'spectralindex' in img.completed_Ops:
if g.spec_indx is not None and N.isfinite(g.spec_indx):
specin = str("%.3e" % (g.spec_indx))
sep = ' '
if img.opts.polarisation_do:
Q_flux = str("%.3e" % g.total_flux_Q)
U_flux = str("%.3e" % g.total_flux_U)
V_flux = str("%.3e" % g.total_flux_V)
else:
Q_flux = '0.0'
U_flux = '0.0'
V_flux = '0.0'
outstr_list.append(sname + sep + sra + sep +
sdec + sep + total + sep + Q_flux + sep +
U_flux + sep + V_flux + sep +
specin + sep + '0' + sep + deconvstr + sep +
freq + sep + '\n')
return outstr_list
def make_ds9_str(img, glist, gnames, deconvolve=False, objtype='gaul', incl_empty=False):
"""Makes a list of string entries for a ds9 region file."""
from . import mylogger
outstr_list = []
if img.equinox is None:
equinox = 'fk5'
else:
if int(img.equinox) == 2000:
equinox = 'fk5'
elif int(img.equinox) == 1950:
equinox = 'fk4'
else:
mylog = mylogger.logging.getLogger("PyBDSM.write_ds9")
mylog.warning('Equinox of input image is not J2000 or B1950. '
'Regions may not be correct.')
equinox = 'fk5'
outstr_list.append('# Region file format: DS9 version 4.0\nglobal color=green '
'font="helvetica 10 normal" select=1 highlite=1 edit=1 '
'move=1 delete=1 include=1 fixed=0 source\n'+equinox+'\n')
for gindx, g in enumerate(glist[0]):
if objtype == 'gaul':
objid = g.gaus_num
else:
objid = g.source_id
if objid >= 0 or (objid < 0 and incl_empty):
src_name = gnames[0][gindx]
if objtype == 'gaul':
ra, dec = g.centre_sky
else:
ra, dec = g.posn_sky_centroid
# ds9 does not correct for postion-dependent angle or pixel scale
# for region files, so we must use the uncorrected values
if deconvolve:
deconv = g.deconv_size_sky_uncorr
else:
deconv = g.size_sky_uncorr
if deconv[0] == 0.0 and deconv[1] == 0.0:
deconv[2] = 0.0
region = 'point(' + str(ra) + ',' + str(dec) + \
') # point=cross width=2 text={' + src_name + '}\n'
else:
# ds9 can't handle 1-D Gaussians, so make sure they are 2-D
if deconv[0] < 1.0/3600.0:
deconv[0] = 1.0/3600.0
if deconv[1] < 1.0/3600.0:
deconv[1] = 1.0/3600.0
region = 'ellipse(' + str(ra) + ',' + str(dec) + ',' + \
str(deconv[0]*3600.0) + '",' + str(deconv[1]*3600.0) + \
'",' + str(deconv[2]+90.0) + ') # text={' + src_name + '}\n'
outstr_list.append(region)
return outstr_list
def make_ascii_str(img, glist, objtype='gaul', format='ascii', incl_empty=False,
incl_chan=False):
"""Makes a list of string entries for an ascii region file."""
from ._version import __version__
outstr_list = []
freq = "%.5e" % img.frequency
if objtype == 'gaul':
outstr_list.append('# Gaussian list for '+img.filename+'\n')
elif objtype == 'srl':
outstr_list.append('# Source list for '+img.filename+'\n')
outstr_list.append('# Generated by PyBDSM version %s\n'
% (__version__, ))
outstr_list.append('# Reference frequency of the detection ("ch0") image: %s Hz\n' % freq)
outstr_list.append('# Equinox : %s \n\n' % img.equinox)
if img.opts.aperture is not None:
incl_aper = True
else:
incl_aper = False
for i, g in enumerate(glist[0]):
cvals, cnames, cformats, cunits = make_output_columns(g, fits=False,
objtype=objtype,
incl_spin=img.opts.spectralindex_do,
incl_chan=incl_chan,
incl_pol=img.opts.polarisation_do,
incl_aper=incl_aper,
incl_empty=incl_empty,
nchan=img.nchan)
if cvals is not None:
cformats[-1] += "\n"
if format == 'ascii':
if i == 0:
outstr_list.append("# " + " ".join(cnames) + "\n")
outstr_list.append(" ".join(cformats).format(*cvals))
else:
if i == 0:
outstr_list.append("# " + ", ".join(cnames) + "\n")
outstr_list.append(", ".join(cformats).format(*cvals))
return outstr_list
def make_fits_list(img, glist, objtype='gaul', nmax=30, incl_empty=False,
incl_chan=False):
from . import functions as func
out_list = []
if img.opts.aperture is not None:
incl_aper = True
else:
incl_aper = False
for g in glist[0]:
cvals, ext1, ext2, ext3 = make_output_columns(g, fits=True, objtype=objtype,
incl_spin=img.opts.spectralindex_do,
incl_chan=incl_chan,
incl_pol=img.opts.polarisation_do,
incl_aper=incl_aper,
incl_empty=incl_empty,
nmax=nmax, nchan=img.nchan)
if cvals is not None:
out_list.append(cvals)
out_list = func.trans_gaul(out_list)
return out_list
def make_casa_str(img, glist):
"""Makes a list of string entries for a casa region file."""
from . import functions as func
outstr_list = ['#CRTFv0 CASA Region Text Format version 0\n']
scale = 2.0 # scale box to 2 times FWHM of Gaussian
for gindx, g in enumerate(glist[0]):
x, y = g.centre_pix
ellx, elly = func.drawellipse(g)
blc = [min(ellx), min(elly)]
trc = [max(ellx), max(elly)]
blc[0] -= (x - blc[0]) * scale
blc[1] -= (y - blc[1]) * scale
trc[0] += (trc[0] - x) * scale
trc[1] += (trc[1] - y) * scale
blc_sky = img.pix2sky(blc)
trc_sky = img.pix2sky(trc)
blc_sky_str = convert_radec_str(blc_sky[0], blc_sky[1])
trc_sky_str = convert_radec_str(trc_sky[0], trc_sky[1])
# Format is: box [ [<blcx>, <blcy>], [<trcx>, <trcy>] ]
# Note that we use gindx rather than g.gaus_num so that
# all Gaussians will have a unique id, even if wavelet
# Gaussians are included.
outstr_list.append('box [[' + ', '.join(blc_sky_str) + '], [' +
', '.join(trc_sky_str) + ']] coord=J2000\n')
return outstr_list
def write_islands(img):
import numpy as N
import os
# write out island properties for reference since achaar doesnt work.
filename = img.basedir + '/misc/'
if not os.path.exists(filename):
os.makedirs(filename)
filename = filename + 'island_file'
if img.j == 0:
f = open(filename, 'w')
f.write('Wavelet# Island_id bbox origin shape mask_active mask_noisy size_active mean rms max_value ngaul gresid_mean ' +
'gresid_rms resid_rms resid_mean nsource \n')
else:
f = open(filename, 'a')
for isl in img.islands:
f.write('%5i %5i %5i %5i %5i %5i %5i %5i %5i %5i %10i %10i %10i %.3e %.3e %.3e %5i %.3e %.3e %5i \n'
% (img.j, isl.island_id, isl.bbox[0].start, isl.bbox[0].stop, isl.bbox[1].start, isl.bbox[1].stop,
isl.origin[0], isl.origin[1], isl.shape[0], isl.shape[1], N.sum(~isl.mask_active), N.sum(~isl.mask_noisy),
isl.size_active, isl.mean, isl.rms, isl.max_value, len(isl.gaul), isl.gresid_mean, isl.gresid_rms,
len(isl.sources)))
f.close()
def get_src(src_list, srcid):
"""Returns the source for srcid or None if not found"""
for src in src_list:
if src.source_id == srcid:
return src
return None
def convert_radec_str(ra, dec):
"""Takes ra, dec in degrees and returns BBS/CASA strings"""
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.3f" % (ra[2])).zfill(6)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+'.'+str(dec[1]).zfill(2)+'.'+str("%.3f" % (dec[2])).zfill(6)
return sra, sdec
def list_and_sort_gaussians(img, patch=None, root=None,
sort_by='index'):
"""Returns sorted lists of Gaussians and their names and patch names.
patch - can be "single", "gaussian", "source", or None
Returns (outlist, outnames, patchnames)
outlist is [[g1, g2, g3], [g4], ...]
outnames is [['root_i2_s1_g1', 'root_i2_s1_g2', 'root_i2_s1_g3'], ...]
patchnames is ['root_patch_s1', 'root_patch_s2', ...]
The names are root_iXX_sXX_gXX (or wXX_iXX_sXX_gXX for wavelet Gaussians)
"""
import numpy as N
from . import functions as func
# Define lists
if root is None:
root = img.parentname
gauslist = []
gausname = []
outlist = []
outnames = []
patchnames = []
patchnames_sorted = []
gausflux = [] # fluxes of Gaussians
gausindx = [] # indices of Gaussians
patchflux = [] # total flux of each patch
patchindx = [] # indices of sources
patchnums = [] # number of patch from mask
# If a mask image is to be used to define patches, read it in and
# make a rank image from it
use_mask = False
if patch not in ['single', 'gaussian', 'source', None]:
mask_file = img.opts.bbs_patches_mask
patches_mask, hdr = func.read_image_from_file(mask_file, img, img.indir)
use_mask = True
act_pixels = patches_mask[0, 0]
rank = len(act_pixels.shape)
import scipy.ndimage as nd
connectivity = nd.generate_binary_structure(rank, rank)
mask_labels, count = nd.label(act_pixels, connectivity)
src_list = img.sources
for src in src_list:
for g in src.gaussians:
gauslist.append(g)
gausflux.append(g.total_flux)
gausindx.append(g.gaus_num)
jstr = '_w' + str(g.jlevel)
gausname.append(root + jstr + '_i' + str(src.island_id) + '_s' +
str(src.source_id) + '_g' + str(g.gaus_num))
if patch == 'gaussian':
outlist.append(gauslist)
outnames.append(gausname)
patchnames.append(root + '_patch' + jstr + '_g' + str(g.gaus_num))
patchflux.append(N.sum(gausflux))
patchindx.append(g.gaus_num)
gauslist = [] # reset for next Gaussian
gausname = []
gausflux = []
gausindx = []
if use_mask:
patchnums.append(mask_labels[g.centre_pix[0], g.centre_pix[1]])
if patch == 'source':
sorted_gauslist = list(gauslist)
sorted_gausname = list(gausname)
if sort_by == 'flux':
# Sort Gaussians by flux within each source
indx = N.argsort(N.array(gausflux)).tolist()
indx.reverse()
elif sort_by == 'index':
# Sort Gaussians by index within each source
indx = N.argsort(N.array(gausindx)).tolist()
else:
# Unrecognized property --> Don't sort
indx = range(len(gausindx))
for i, si in enumerate(indx):
sorted_gauslist[i] = gauslist[si]
sorted_gausname[i] = gausname[si]
outlist.append(sorted_gauslist)
outnames.append(sorted_gausname)
patchnames.append(root + '_patch' + '_s' + str(src.source_id))
patchflux.append(N.sum(gausflux))
patchindx.append(src.source_id)
gauslist = [] # reset for next source
gausname = []
gausflux = []
if use_mask:
unique_patch_ids = set(patchnums)
# Check if there is a patch with id = 0. If so, this means there were
# some Gaussians that fell outside of the regions in the patch
# mask file.
if 0 in unique_patch_ids:
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM.write_gaul")
mylog.warning('Some sources fall outside of the regions '
'defined in the mask file. These sources are not '
'included in the output sky model.')
for p in unique_patch_ids:
if p != 0:
in_patch = N.where(patchnums == p)
outlist.append(N.array(gauslist)[in_patch].tolist())
outnames.append(N.array(gausname)[in_patch].tolist())
patchnames.append('patch_'+str(p))
patchflux.append(N.sum(N.array(gausflux)[in_patch]))
patchindx.append(p)
# Sort
if patch == 'single' or patch is None:
outlist = [list(gauslist)]
outlist_sorted = [list(gauslist)]
outnames = [list(gausname)]
outnames_sorted = [list(gausname)]
if patch == 'single':
patchnames = [root + '_patch']
else:
patchnames = [None]
if sort_by == 'flux':
# Sort by Gaussian flux
indx = N.argsort(N.array(gausflux)).tolist()
indx.reverse()
elif sort_by == 'index':
# Sort by Gaussian index
indx = N.argsort(N.array(gausindx)).tolist()
else:
# Unrecognized property --> Don't sort
indx = list(range(len(gausindx)))
for i, si in enumerate(indx):
outlist_sorted[0][i] = outlist[0][si]
outnames_sorted[0][i] = outnames[0][si]
patchnames_sorted = list(patchnames)
else:
outlist_sorted = list(outlist)
outnames_sorted = list(outnames)
patchnames_sorted = list(patchnames)
if sort_by == 'flux':
# Sort by patch flux
indx = N.argsort(N.array(patchflux)).tolist()
indx.reverse()
elif sort_by == 'index':
# Sort by source index
indx = N.argsort(N.array(patchindx)).tolist()
else:
# Unrecognized property --> Don't sort
indx = list(range(len(gausindx)))
for i, si in enumerate(indx):
outlist_sorted[i] = outlist[si]
outnames_sorted[i] = outnames[si]
patchnames_sorted[i] = patchnames[si]
return (outlist_sorted, outnames_sorted, patchnames_sorted)
def make_output_columns(obj, fits=False, objtype='gaul', incl_spin=False,
incl_chan=False, incl_pol=False, incl_aper=False,
incl_empty=False, nmax=30, nchan=1):
"""Returns a list of column names, formats, and units for Gaussian, Source, or Shapelet"""
import numpy as N
# First, define a list of columns in order desired, using the names of
# the attributes of the object
if objtype == 'gaul':
names = ['gaus_num', 'island_id', 'source_id', 'jlevel',
'centre_sky', 'centre_skyE', 'total_flux',
'total_fluxE', 'peak_flux', 'peak_fluxE',
'centre_pix', 'centre_pixE', 'size_sky', 'size_skyE',
'size_sky_uncorr', 'size_skyE_uncorr',
'deconv_size_sky', 'deconv_size_skyE',
'deconv_size_sky_uncorr', 'deconv_size_skyE_uncorr',
'total_flux_isl', 'total_flux_islE', 'rms',
'mean', 'gresid_rms', 'gresid_mean',
'code']
elif objtype == 'srl':
if incl_aper:
infix = ['aperture_flux', 'aperture_fluxE']
else:
infix = []
names = ['source_id', 'island_id', 'posn_sky_centroid',
'posn_sky_centroidE', 'total_flux',
'total_fluxE',
'peak_flux_max', 'peak_flux_maxE'] + infix + \
['posn_sky_max', 'posn_sky_maxE',
'posn_pix_centroid', 'posn_pix_centroidE', 'posn_pix_max',
'posn_pix_maxE',
'size_sky', 'size_skyE',
'size_sky_uncorr', 'size_skyE_uncorr',
'deconv_size_sky', 'deconv_size_skyE',
'deconv_size_sky_uncorr', 'deconv_size_skyE_uncorr',
'total_flux_isl', 'total_flux_islE',
'rms_isl', 'mean_isl', 'gresid_rms',
'gresid_mean', 'code']
elif objtype == 'shap':
names = ['island_id', 'shapelet_posn_sky', 'shapelet_posn_skyE',
'shapelet_basis', 'shapelet_beta', 'shapelet_nmax', 'shapelet_cf']
else:
print('Object type unrecongnized.')
return (None, None, None, None)
if incl_spin:
names += ['spec_indx', 'e_spec_indx']
if incl_chan:
names += ['specin_flux', 'specin_fluxE', 'specin_freq']
if incl_pol:
names += ['total_flux_Q', 'total_fluxE_Q', 'total_flux_U', 'total_fluxE_U',
'total_flux_V', 'total_fluxE_V', 'lpol_fraction', 'lpol_fraction_loerr',
'lpol_fraction_hierr', 'cpol_fraction', 'cpol_fraction_loerr',
'cpol_fraction_hierr', 'tpol_fraction', 'tpol_fraction_loerr',
'tpol_fraction_hierr', 'lpol_angle', 'lpol_angle_err']
cnames = []
cformats = []
cunits = []
cvals = []
skip_next = False
for n, name in enumerate(names):
if hasattr(obj, name):
if name in ['specin_flux', 'specin_fluxE', 'specin_freq']:
# As these are variable length lists, they must
# (unfortunately) be treated differently.
val = obj.__getattribute__(name)
colname = obj.__dict__[name+'_def']._colname
units = obj.__dict__[name+'_def']._units
for i in range(nchan):
if i < len(val):
cvals.append(val[i])
cnames.append(colname[0]+'_ch'+str(i+1))
cunits.append(units[0])
else:
cvals.append(N.NaN)
cnames.append(colname[0]+'_ch'+str(i+1))
cunits.append(units[0])
else:
if not skip_next:
val = obj.__getattribute__(name)
colname = obj.__dict__[name+'_def']._colname
units = obj.__dict__[name+'_def']._units
if units is None:
units = ' '
if isinstance(val, list) or isinstance(val, tuple):
# This is a list, so handle it differently. We assume the next
# entry will have the errors, and they are interleaved to be
# in the order (val, error).
next_name = names[n+1]
val_next = obj.__getattribute__(next_name)
colname_next = obj.__dict__[next_name+'_def']._colname
units_next = obj.__dict__[next_name+'_def']._units
if units_next is None:
units_next = ' '
for i in range(len(val)):
cvals.append(val[i])
cvals.append(val_next[i])
cnames.append(colname[i])
cnames.append(colname_next[i])
cunits.append(units[i])
cunits.append(units_next[i])
skip_next = True
elif isinstance(val, N.ndarray):
# This is a numpy array, so flatten it
tarr = val.flatten()
tarr2 = N.resize(tarr, nmax**2)
tarr2[tarr.shape[0]:] = N.NaN
cvals.append(tarr2)
cnames.append(colname)
cunits.append(units)
else:
cvals.append(val)
cnames.append(colname)
cunits.append(units)
else:
skip_next = False
for i, v in enumerate(cvals):
if fits:
if isinstance(v, int):
cformats.append('J')
elif isinstance(v, float) or isinstance(v, N.float32) or isinstance(v, N.float64):
cformats.append('D')
elif isinstance(v, str):
cformats.append('A')
elif isinstance(v, N.ndarray):
cformats.append('%iD' % (nmax**2,))
else:
raise RuntimeError("Format not supported.")
else:
if isinstance(v, int):
cformats.append('{'+str(i)+':4d}')
elif isinstance(v, float) or isinstance(v, N.float32) or isinstance(v, N.float64):
cformats.append('{'+str(i)+':.14f}')
elif isinstance(v, str):
cformats.append('{'+str(i)+':4s}')
else:
raise RuntimeError("Format not supported.")
if objtype == 'gaul':
if obj.gaus_num < 0 and not incl_empty:
return (None, cnames, cformats, cunits)
if objtype == 'srl':
if obj.source_id < 0 and not incl_empty:
return (None, cnames, cformats, cunits)
return (cvals, cnames, cformats, cunits)
| 49,335 | 40.147623 | 130 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/preprocess.py
|
"""Module preprocess
Calculates some basic statistics of the image and sets up processing
parameters for PyBDSM.
"""
from __future__ import absolute_import
import numpy as N
from . import _cbdsm
from .image import *
from math import pi, sqrt, log
from . import const
from . import functions as func
from . import mylogger
class Op_preprocess(Op):
"""Preprocessing -- calculate some basic statistics and set
processing parameters. Should assume that pixels outside the universe
are blanked in QC ? """
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Preprocess")
bstat = func.bstat
if img.opts.kappa_clip is None:
kappa = -img.pixel_beamarea()
else:
kappa = img.opts.kappa_clip
if img.opts.polarisation_do:
pols = ['I', 'Q', 'U', 'V']
ch0images = [img.ch0_arr, img.ch0_Q_arr, img.ch0_U_arr, img.ch0_V_arr]
img.clipped_mean_QUV = []
img.clipped_rms_QUV = []
else:
pols = ['I'] # assume I is always present
ch0images = [img.ch0_arr]
if hasattr(img, 'rms_mask'):
mask = img.rms_mask
else:
mask = img.mask_arr
opts = img.opts
for ipol, pol in enumerate(pols):
image = ch0images[ipol]
### basic stats
mean, rms, cmean, crms, cnt = bstat(image, mask, kappa)
if cnt > 198: cmean = mean; crms = rms
if pol == 'I':
if func.approx_equal(crms, 0.0, rel=None):
raise RuntimeError('Clipped rms appears to be zero. Check for regions '\
'with values of 0 and\nblank them (with NaNs) '\
'or use trim_box to exclude them.')
img.raw_mean = mean
img.raw_rms = rms
img.clipped_mean= cmean
img.clipped_rms = crms
mylog.info('%s %.4f %s %.4f %s ' % ("Raw mean (Stokes I) = ", mean*1000.0, \
'mJy and raw rms = ',rms*1000.0, 'mJy'))
mylog.info('%s %.4f %s %s %.4f %s ' % ("sigma clipped mean (Stokes I) = ", cmean*1000.0, \
'mJy and ','sigma clipped rms = ',crms*1000.0, 'mJy'))
else:
img.clipped_mean_QUV.append(cmean)
img.clipped_rms_QUV.append(crms)
mylog.info('%s %s %s %.4f %s %s %.4f %s ' % ("sigma clipped mean (Stokes ", pol, ") = ", cmean*1000.0, \
'mJy and ','sigma clipped rms = ',crms*1000.0, 'mJy'))
image = img.ch0_arr
# Check if pixels are outside the universe
if opts.check_outsideuniv:
mylogger.userinfo(mylog, "Checking for pixels outside the universe")
noutside_univ = self.outside_univ(img)
img.noutside_univ = noutside_univ
frac_blank = round(float(noutside_univ)/float(image.shape[0]*image.shape[1]),3)
mylogger.userinfo(mylog, "Number of additional pixels blanked", str(noutside_univ)
+' ('+str(frac_blank*100.0)+'%)')
else:
noutside_univ = 0
# If needed, (re)mask the image
if noutside_univ > 0:
mask = N.isnan(img.ch0_arr)
masked = mask.any()
img.masked = masked
if masked:
img.mask_arr = mask
img.blankpix = N.sum(mask)
### max/min pixel value & coordinates
shape = image.shape[0:2]
if mask is not None:
img.blankpix = N.sum(mask)
if img.blankpix == 0:
max_idx = image.argmax()
min_idx = image.argmin()
else:
max_idx = N.nanargmax(image)
min_idx = N.nanargmin(image)
img.maxpix_coord = N.unravel_index(max_idx, shape)
img.minpix_coord = N.unravel_index(min_idx, shape)
img.max_value = image.flat[max_idx]
img.min_value = image.flat[min_idx]
### Solid angle of the image
cdelt = N.array(img.wcs_obj.acdelt[:2])
img.omega = N.product(shape)*abs(N.product(cdelt))/(180.*180./pi/pi)
### Total flux in ch0 image
if 'atrous' in img.filename or img._pi or img.log == 'Detection image':
# Don't do this estimate for atrous wavelet images
# or polarized intensity image,
# as it doesn't give the correct flux. Also, ignore
# the flux in the detection image, as it's likely
# wrong (e.g., not corrected for the primary beam).
img.ch0_sum_jy = 0
else:
im_flux = N.nansum(image)/img.pixel_beamarea() # Jy
img.ch0_sum_jy = im_flux
mylogger.userinfo(mylog, 'Flux from sum of (non-blank) pixels',
'%.3f Jy' % (im_flux,))
### if image seems confused, then take background mean as zero instead
alpha_sourcecounts = 2.5 # approx diff src count slope. 2.2?
if opts.bmpersrc_th is None:
if mask is not None:
unmasked = N.where(~img.mask_arr)
n = (image[unmasked] >= 5.*crms).sum()
else:
n = (image >= 5.*crms).sum()
if n <= 0:
n = 1
mylog.info('No pixels in image > 5-sigma.')
mylog.info('Taking number of pixels above 5-sigma as 1.')
img.bmpersrc_th = N.product(shape)/((alpha_sourcecounts-1.)*n)
mylog.info('%s %6.2f' % ('Estimated bmpersrc_th = ', img.bmpersrc_th))
else:
img.bmpersrc_th = opts.bmpersrc_th
mylog.info('%s %6.2f' % ('Taking default bmpersrc_th = ', img.bmpersrc_th))
confused = False
if opts.mean_map == 'default':
if img.bmpersrc_th <= 25. or cmean/crms >= 0.1:
confused = True
img.confused = confused
mylog.info('Parameter confused is '+str(img.confused))
img.completed_Ops.append('preprocess')
return img
def outside_univ(self,img):
""" Checks if a pixel is outside the universe and is not blanked,
and blanks it. (fits files written by CASA dont do this). """
noutside = 0
n, m = img.ch0_arr.shape
for i in range(n):
for j in range(m):
out = False
err = ''
pix1 = (i,j)
try:
skyc = img.pix2sky(pix1)
pix2 = img.sky2pix(skyc)
if abs(pix1[0]-pix2[0]) > 0.5 or abs(pix1[1]-pix2[1]) > 0.5: out=True
except RuntimeError as err:
pass
if out or ("8" in str(err)):
noutside += 1
ch0 = img.ch0_arr
ch0[pix1] = float("NaN")
img.ch0_arr = ch0
return noutside
| 6,799 | 37.418079 | 116 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/statusbar.py
|
"""Display an animated statusbar"""
from __future__ import absolute_import
import sys
import os
from . import functions as func
class StatusBar():
# class variables:
# max: number of total items to be completed
# pos: number of completed items
# spin_pos: current position in array of busy_chars
# inc: amount of items to increment completed 'pos' by
# (shared resource)
# comp: amount of '=' to display in the progress bar
# started: whether or not the statusbar has been started
# color: color of text
def __init__(self, text, pos=0, max=100, color='\033[0m'):
self.text = text
self.pos = pos
self.max = max
self.busy_char = '|'
self.spin_pos = 0
self.inc = 0
self.started = 0
self.color = color
self.__getsize()
if max > 0:
self.comp = int(float(self.pos) / self.max * self.columns)
else:
self.comp = 0
# find number of columns in terminal
def __getsize(self):
try:
rows, columns = func.getTerminalSize()
except ValueError:
rows = columns = 0
if int(columns) > self.max + 2 + 44 + (len(str(self.max))*2 + 2):
self.columns = self.max
else:
# note: -2 is for brackets, -44 for 'Fitting islands...' text, rest is for pos/max text
self.columns = int(columns) - 2 - 44 - (len(str(self.max))*2 + 2)
return
# redraw progress bar
def __print(self):
self.__getsize()
sys.stdout.write('\x1b[1G')
if self.max == 0:
sys.stdout.write(self.color + self.text + '[] 0/0\033[0m\n')
else:
sys.stdout.write(self.color + self.text + '[' + '=' * self.comp + self.busy_char + '-'*(self.columns - self.comp - 1) + '] ' + str(self.pos) + '/' + str(self.max) + '\033[0m')
sys.stdout.write('\x1b[' + str(self.comp + 2 + 44) + 'G')
sys.stdout.flush()
return
# spin the spinner by one increment
def spin(self):
busy_chars = ['|','/','-','\\']
self.spin_pos += 1
if self.spin_pos >= len(busy_chars):
self.spin_pos = 0
# display the busy spinning icon
self.busy_char = busy_chars[self.spin_pos]
sys.stdout.write(self.color + busy_chars[self.spin_pos] + '\x1b[1D' + '\033[0m')
sys.stdout.flush()
# increment number of completed items
def increment(self):
self.inc = 1
if (self.pos + self.inc) >= self.max:
self.pos = self.max
self.comp = self.columns
self.busy_char = ''
self.__print()
return 0
else:
self.pos += self.inc
self.inc = 0
self.spin()
self.comp = int(float(self.pos) / self.max \
* self.columns)
self.__print()
return 1
def start(self):
self.started = 1
self.__print()
def stop(self):
if self.started:
self.pos = self.max
self.comp = self.columns
self.busy_char = ''
self.__print()
sys.stdout.write('\n')
self.started = 0
return 0
| 3,257 | 31.58 | 187 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/rmsimage.py
|
"""Module rmsimage.
Defines operation Op_rmsimage which calculates mean and
rms maps.
The current implementation will handle both 2D and 3D images,
where for 3D case it will calculate maps for each plane (=
Stokes images).
"""
from __future__ import absolute_import
import numpy as N
import scipy.ndimage as nd
from . import _cbdsm
from .image import Op, Image, NArray, List
from . import const
from . import mylogger
import os
from . import functions as func
import scipy.ndimage as nd
from . import multi_proc as mp
import itertools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from .functions import read_image_from_file
class Op_rmsimage(Op):
"""Calculate rms & noise maps
Prerequisites: Module preprocess should be run first.
"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"RMSimage")
mylogger.userinfo(mylog, "Calculating background rms and mean images")
if img.opts.polarisation_do:
pols = ['I', 'Q', 'U', 'V']
ch0_images = [img.ch0_arr, img.ch0_Q_arr, img.ch0_U_arr, img.ch0_V_arr]
cmeans = [img.clipped_mean] + img.clipped_mean_QUV
crmss = [img.clipped_rms] + img.clipped_rms_QUV
else:
pols = ['I'] # assume I is always present
ch0_images = [img.ch0_arr]
cmeans = [img.clipped_mean]
crmss = [img.clipped_rms]
mask = img.mask_arr
opts = img.opts
cdelt = N.array(img.wcs_obj.acdelt[:2])
# Determine box size for rms/mean map calculations.
# If user specifies rms_box, use it. Otherwise, use either an
# adaptive binning scheme that shrinks the box near
# the brightest sources or estimate rms_box from bright sources.
#
# The adaptive scheme calculates the rms/mean map
# at two different scales:
# 1) using a large rms_box, set by size of largest source
# 2) using a small rms_box, set by size of largest bright source
# Then, the rms and mean values at a given point are determined
# by a weighted average of the values in the maps at the two
# scales.
fwsig = const.fwsig
min_adapt_threshold = 10.0
if opts.adaptive_thresh is None:
adapt_thresh = 50.0
start_thresh = 500.0
else:
adapt_thresh = opts.adaptive_thresh
if adapt_thresh < min_adapt_threshold:
adapt_thresh = min_adapt_threshold
opts.adaptive_thresh = min_adapt_threshold
start_thresh = adapt_thresh
brightsize = None
isl_pos = []
do_adapt = img.opts.adaptive_rms_box
img.use_rms_map = None
img.mean_map_type = None
# 'size' of brightest source
kappa1 = 3.0
try:
brightsize = int(round(2.*img.beam[0]/cdelt[0]/fwsig*
sqrt(2.*log(img.max_value/(kappa1*crms)))))
except:
brightsize = int(round(2.*img.beam[0]/cdelt[0]/fwsig))
mylog.info('Estimated size of brightest source (pixels) = '+str(brightsize))
# Using clipped mean and rms and a starting threshold of 500 sigma,
# search for bright sources. If fewer than 5 are found, reduce
# threshold until limit set by adapt_thresh is hit.
cmean = cmeans[0]
crms = crmss[0]
image = ch0_images[0]
shape = image.shape
isl_size_bright = []
isl_area_highthresh = []
isl_peak = []
max_isl_brightsize = 0.0
threshold = start_thresh
if do_adapt:
mylogger.userinfo(mylog, "Using adaptive scaling of rms_box")
while len(isl_size_bright) < 5 and threshold >= adapt_thresh:
isl_size_bright=[]
isl_maxposn = []
if img.masked:
act_pixels = ~(mask.copy())
act_pixels[~mask] = (image[~mask]-cmean)/threshold >= crms
else:
act_pixels = (image-cmean)/threshold >= crms
threshold *= 0.8
rank = len(image.shape)
connectivity = nd.generate_binary_structure(rank, rank)
labels, count = nd.label(act_pixels, connectivity)
slices = nd.find_objects(labels)
for idx, s in enumerate(slices):
isl_size_bright.append(max([s[0].stop-s[0].start, s[1].stop-s[1].start]))
size_area = (labels[s] == idx+1).sum()/img.pixel_beamarea()*2.0
isl_area_highthresh.append(size_area)
isl_maxposn.append(tuple(N.array(N.unravel_index(N.argmax(image[s]), image[s].shape))+\
N.array((s[0].start, s[1].start))))
isl_peak.append(nd.maximum(image[s], labels[s], idx+1))
# Check islands found above at thresh_isl threshold to determine if
# the bright source is embedded inside a large island or not. If it is,
# exclude it from the bright-island list. Also find the size of the
# largest island at this threshold to set the large-scale rms_box
bright_threshold = threshold
threshold = 10.0
if img.masked:
act_pixels = ~(mask.copy())
act_pixels[~mask] = (image[~mask]-cmean)/threshold >= crms
else:
act_pixels = (image-cmean)/threshold >= crms
rank = len(image.shape)
connectivity = nd.generate_binary_structure(rank, rank)
labels, count = nd.label(act_pixels, connectivity)
slices = nd.find_objects(labels)
isl_size = []
isl_size_highthresh = []
isl_size_lowthresh = []
isl_snr = []
thratio = threshold/bright_threshold
for idx, s in enumerate(slices):
isl_area_lowthresh = (labels[s] == idx+1).sum()/img.pixel_beamarea()*2.0
isl_maxposn_lowthresh = tuple(N.array(N.unravel_index(N.argmax(image[s]), image[s].shape))+
N.array((s[0].start, s[1].start)))
isl_size += [s[0].stop-s[0].start, s[1].stop-s[1].start]
if do_adapt and isl_maxposn_lowthresh in isl_maxposn:
bright_indx = isl_maxposn.index(isl_maxposn_lowthresh)
if isl_area_lowthresh < 25.0 or isl_area_lowthresh/isl_area_highthresh[bright_indx] < 8.0:
isl_pos.append(isl_maxposn_lowthresh)
isl_size_lowthresh.append(max([s[0].stop-s[0].start, s[1].stop-s[1].start]))
isl_size_highthresh.append(isl_size_bright[bright_indx])
isl_snr.append(isl_peak[bright_indx]/crms)
if len(isl_size) == 0:
max_isl_size = 0.0
else:
max_isl_size = max(isl_size)
mylog.info('Maximum extent of largest 10-sigma island using clipped rms (pixels) = '+str(max_isl_size))
if len(isl_size_highthresh) == 0:
max_isl_size_highthresh = 0.0
max_isl_size_lowthresh = 0.0
else:
max_isl_size_highthresh = max(isl_size_highthresh)
max_isl_size_lowthresh = max(isl_size_lowthresh)
avg_max_isl_size = (max_isl_size_highthresh + max_isl_size_lowthresh) / 2.0
if hasattr(img, '_adapt_rms_isl_pos'):
isl_pos = img._adapt_rms_isl_pos # set isl_pos to existing value (for wavelet analysis)
if len(isl_pos) == 0:
# No bright sources found
do_adapt = False
else:
img._adapt_rms_isl_pos = isl_pos
min_size_allowed = int(img.pixel_beam()[0]*9.0)
if opts.rms_box is None or (opts.rms_box_bright is None and do_adapt):
if do_adapt:
bsize = int(max(brightsize, min_size_allowed, max_isl_size_highthresh*2.0))
else:
bsize = int(max(brightsize, min_size_allowed, max_isl_size*2.0))
bsize2 = int(max(min(image.shape)/10.0, max_isl_size*5.0))
if bsize < min_size_allowed:
bsize = min_size_allowed
if bsize % 10 == 0: bsize += 1
if bsize2 < min_size_allowed:
bsize2 = min_size_allowed
if bsize2 % 10 == 0: bsize2 += 1
bstep = int(round(min(bsize/3., min(shape)/10.)))
bstep2 = int(round(min(bsize2/3., min(shape)/10.)))
if opts.rms_box_bright is None:
img.rms_box_bright = (bsize, bstep)
else:
img.rms_box_bright = opts.rms_box_bright
if opts.rms_box is None:
img.rms_box = (bsize2, bstep2)
else:
img.rms_box = opts.rms_box
else:
if do_adapt:
img.rms_box_bright = opts.rms_box_bright
img.rms_box = opts.rms_box
else:
img.rms_box_bright = opts.rms_box
img.rms_box = opts.rms_box
if opts.kappa_clip is None:
kappa = -img.pixel_beamarea()
else:
kappa = img.opts.kappa_clip
if do_adapt:
map_opts = (kappa, img.rms_box_bright, opts.spline_rank)
else:
map_opts = (kappa, img.rms_box, opts.spline_rank)
for ipol, pol in enumerate(pols):
data = ch0_images[ipol]
mean = N.zeros(data.shape, dtype=N.float32)
rms = N.zeros(data.shape, dtype=N.float32)
if len(pols) > 1:
pol_txt = ' (' + pol + ')'
else:
pol_txt = ''
## calculate rms/mean maps if needed
if ((opts.rms_map is not False) or (opts.mean_map not in ['zero', 'const'])) and img.rms_box[0] > min(image.shape)/4.0:
# rms box is too large - just use constant rms and mean
self.output_rmsbox_size(img)
mylogger.userinfo(mylog, 'Size of rms_box larger than 1/4 of image size')
mylogger.userinfo(mylog, 'Using constant background rms and mean')
img.use_rms_map = False
img.mean_map_type = 'const'
else:
if opts.rmsmean_map_filename is not None and len(opts.rmsmean_map_filename)!=0:
# from astropy.io import fits as pyfits
def CheckShape(A):
if len(A.shape)!=4:
raise RuntimeError("Array shape should be len 4 (nch,npol,nx,ny)")
if A.shape[0]!=1:
raise RuntimeError("Array should be single channel")
if A.shape[1]!=1:
raise RuntimeError("Array should be single pol")
mean_fits_name,rms_fits_name=opts.rmsmean_map_filename
mylogger.userinfo(mylog, "Skipping mean and rms image computation as external images supplied")
mylogger.userinfo(mylog, " Opening mean image: %s"%mean_fits_name)
# mean = pyfits.open(mean_fits_name, mode="readonly")[0].data
mean, hdr = read_image_from_file(mean_fits_name, img, img.indir)
CheckShape(mean); mean = mean[0,0]
mylogger.userinfo(mylog, " Opening rms image: %s"%rms_fits_name)
# rms = pyfits.open(rms_fits_name, mode="readonly")[0].data
rms, hdr = read_image_from_file(rms_fits_name, img, img.indir)
CheckShape(rms); rms = rms[0,0]
elif (opts.rms_map is not False) or (opts.mean_map not in ['zero', 'const']):
if len(data.shape) == 2: ## 2d case
mean, rms = self.calculate_maps(img, data, mean, rms, mask, map_opts, do_adapt=do_adapt,
bright_pt_coords=isl_pos, rms_box2=img.rms_box,
logname="PyBDSM."+img.log, ncores=img.opts.ncores)
elif len(data.shape) == 3: ## 3d case
if not isinstance(mask, N.ndarray):
mask = N.zeros(data.shape[0], dtype=bool)
for i in range(data.shape[0]):
## iterate each plane
mean, rms = self.calculate_maps(img, data[i], mean[i], rms[i], mask[i], map_opts,
do_adapt=do_adapt, bright_pt_coords=isl_pos,
rms_box2=img.rms_box, logname="PyBDSM."+img.log,
ncores=img.opts.ncores)
else:
mylog.critical('Image shape not handleable' + pol_txt)
raise RuntimeError("Can't handle array of this shape" + pol_txt)
self.output_rmsbox_size(img)
if do_adapt:
mylogger.userinfo(mylog, 'Number of sources using small scale', str(len(isl_pos)))
mylog.info('Background rms and mean images computed' + pol_txt)
## check if variation of rms/mean maps is significant enough:
# check_rmsmap() sets img.use_rms_map
# check_meanmap() sets img.mean_map_type
if pol == 'I':
if opts.rms_map is None and img.use_rms_map is None:
if do_adapt and len(isl_pos) > 0:
# Always use 2d map if there is at least one bright
# source and adaptive scaling is desired
img.use_rms_map = True
else:
self.check_rmsmap(img, rms)
elif opts.rms_map is not None:
img.use_rms_map = opts.rms_map
if img.use_rms_map is False:
mylogger.userinfo(mylog, 'Using constant background rms')
else:
mylogger.userinfo(mylog, 'Using 2D map for background rms')
if opts.mean_map == 'default' and img.mean_map_type is None:
self.check_meanmap(img, rms)
elif opts.mean_map != 'default':
img.mean_map_type = opts.mean_map
if img.mean_map_type != 'map':
mylogger.userinfo(mylog, 'Using constant background mean')
else:
mylogger.userinfo(mylog, 'Using 2D map for background mean')
## if rms map is insignificant, or rms_map==False use const value
if img.use_rms_map is False:
if opts.rms_value is None:
rms[:] = crmss[ipol]
else:
rms[:] = opts.rms_value
mylogger.userinfo(mylog, 'Value of background rms' + pol_txt,
'%.2e Jy/beam' % rms[0][0])
else:
rms_min = N.nanmin(rms)
rms_max = N.nanmax(rms)
mylogger.userinfo(mylog, 'Min/max values of background rms map' + pol_txt,
'(%.2e, %.2e) Jy/beam' % (rms_min, rms_max))
if img.mean_map_type != 'map':
if opts.mean_map == 'zero':
val = 0.0
else:
val = img.clipped_mean
mean[:] = val
mylogger.userinfo(mylog, 'Value of background mean' + pol_txt,
str(round(val,5))+' Jy/beam')
else:
mean_min = N.nanmin(mean)
mean_max = N.nanmax(mean)
mylogger.userinfo(mylog, 'Min/max values of background mean map' + pol_txt,
'(%.2e, %.2e) Jy/beam' % (mean_min, mean_max))
if pol == 'I':
# Apply mask to mean_map and rms_map by setting masked values to NaN
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
mean[pix_masked] = N.nan
rms[pix_masked] = N.nan
img.mean_arr = mean
img.rms_arr = rms
if opts.savefits_rmsim or opts.output_all:
if img.waveletimage:
resdir = img.basedir + '/wavelet/background/'
else:
resdir = img.basedir + '/background/'
if not os.path.exists(resdir): os.makedirs(resdir)
func.write_image_to_file(img.use_io, img.imagename + '.rmsd_I.fits', rms, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.rmsd_I.fits'))
if opts.savefits_meanim or opts.output_all:
if img.waveletimage:
resdir = img.basedir + '/wavelet/background/'
else:
resdir = img.basedir + '/background/'
if not os.path.exists(resdir): os.makedirs(resdir)
func.write_image_to_file(img.use_io, img.imagename + '.mean_I.fits', mean, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.mean_I.fits'))
if opts.savefits_normim or opts.output_all:
if img.waveletimage:
resdir = img.basedir + '/wavelet/background/'
else:
resdir = img.basedir + '/background/'
if not os.path.exists(resdir): os.makedirs(resdir)
zero_pixels = N.where(rms <= 0.0)
rms_nonzero = rms.copy()
rms_nonzero[zero_pixels] = N.NaN
func.write_image_to_file(img.use_io, img.imagename + '.norm_I.fits', (image-mean)/rms_nonzero, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.norm_I.fits'))
else:
img.__setattr__('mean_'+pol+'_arr', mean)
img.__setattr__('rms_'+pol+'_arr', rms)
img.completed_Ops.append('rmsimage')
return img
def check_rmsmap(self, img, rms):
"""Calculates the statistics of the rms map and decides, when
rms_map=None, whether to take the map (if variance
is significant) or a constant value
"""
from math import sqrt
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Checkrms ")
cdelt = img.wcs_obj.acdelt[:2]
bm = (img.beam[0], img.beam[1])
fw_pix = sqrt(N.product(bm)/abs(N.product(cdelt)))
if img.masked:
unmasked = N.where(~img.mask_arr)
stdsub = N.std(rms[unmasked])
maxrms = N.max(rms[unmasked])
else:
stdsub = N.std(rms)
maxrms = N.max(rms)
rms_expect = img.clipped_rms/sqrt(2)/img.rms_box[0]*fw_pix
mylog.debug('%s %10.6f %s' % ('Standard deviation of rms image = ', stdsub*1000.0, 'mJy'))
mylog.debug('%s %10.6f %s' % ('Expected standard deviation = ', rms_expect*1000.0, 'mJy'))
if stdsub > 1.1*rms_expect:
img.use_rms_map = True
mylogger.userinfo(mylog, 'Variation in rms image significant')
else:
img.use_rms_map = False
mylogger.userinfo(mylog, 'Variation in rms image not significant')
return img
def check_meanmap(self, img, mean):
"""Calculates the statistics of the mean map and decides, when
mean_map=None, whether to take the map (if variance
is significant) or a constant value
"""
from math import sqrt
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Checkmean ")
cdelt = img.wcs_obj.acdelt[:2]
bm = (img.beam[0], img.beam[1])
fw_pix = sqrt(N.product(bm)/abs(N.product(cdelt)))
if img.masked:
unmasked = N.where(~img.mask_arr)
stdsub = N.std(mean[unmasked])
maxmean = N.max(mean[unmasked])
else:
stdsub = N.std(mean)
maxmean = N.max(mean)
rms_expect = img.clipped_rms/img.rms_box[0]*fw_pix
mylog.debug('%s %10.6f %s' % ('Standard deviation of mean image = ', stdsub*1000.0, 'mJy'))
mylog.debug('%s %10.6f %s' % ('Expected standard deviation = ', rms_expect*1000.0, 'mJy'))
# For mean map, use a higher threshold than for the rms map, as radio images
# should rarely, if ever, have significant variations in the mean
if stdsub > 5.0*rms_expect:
img.mean_map_type = 'map'
mylogger.userinfo(mylog, 'Variation in mean image significant')
else:
if img.confused:
img.mean_map_type = 'zero'
else:
img.mean_map_type = 'const'
mylogger.userinfo(mylog, 'Variation in mean image not significant')
return img
def calculate_maps(self, img, data, mean, rms, mask, map_opts, do_adapt,
bright_pt_coords=[], rms_box2=None,
logname=None, ncores=None):
"""Calls map_2d and checks for problems"""
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Calcmaps ")
rms_ok = False
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Calcmaps ")
opts = img.opts
kappa = map_opts[0]
spline_rank = opts.spline_rank
while not rms_ok:
self.map_2d(data, mean, rms, mask, *map_opts, do_adapt=do_adapt,
bright_pt_coords=bright_pt_coords, rms_box2=rms_box2,
logname=logname, ncores=ncores)
if img.masked:
test = N.any(rms[~img.mask_arr] < 0.0)
else:
test = N.any(rms < 0.0)
if test:
rms_ok = False
if (opts.rms_box_bright is None and do_adapt) or (opts.rms_box is None and not do_adapt):
# Increase box by 20%
if do_adapt:
new_width = int(img.rms_box_bright[0]*1.2)
if new_width == img.rms_box_bright[0]:
new_width = img.rms_box_bright[0] + 1
new_step = int(new_width/3.0)
img.rms_box_bright = (new_width, new_step)
if img.rms_box_bright[0] > min(img.ch0_arr.shape)/4.0:
mylogger.userinfo(mylog, 'Size of rms_box_bright larger than 1/4 of image size')
mylogger.userinfo(mylog, 'Using constant background rms and mean')
img.use_rms_map = False
img.rms_box = img.rms_box_bright
img.mean_map_type = 'const'
rms_ok = True
else:
map_opts = (kappa, img.rms_box_bright, spline_rank)
else:
new_width = int(img.rms_box[0]*1.2)
if new_width == img.rms_box[0]:
new_width = img.rms_box[0] + 1
new_step = int(new_width/3.0)
img.rms_box = (new_width, new_step)
if img.rms_box[0] > min(img.ch0_arr.shape)/4.0:
mylogger.userinfo(mylog, 'Size of rms_box larger than 1/4 of image size')
mylogger.userinfo(mylog, 'Using constant background rms and mean')
img.use_rms_map = False
img.mean_map_type = 'const'
rms_ok = True
else:
map_opts = (kappa, img.rms_box, spline_rank)
else:
# User has specified box size, use order=1 to prevent negatives
if spline_rank > 1:
mylog.warning('Negative values found in rms map interpolated with spline_rank = %i' % spline_rank)
mylog.warning('Using spline_rank = 1 (bilinear interpolation) instead')
spline_rank = 1
if do_adapt:
map_opts = (kappa, img.rms_box_bright, spline_rank)
else:
map_opts = (kappa, img.rms_box, spline_rank)
else:
raise RuntimeError('RMS map has negative values')
else:
rms_ok = True
return mean, rms
def map_2d(self, arr, out_mean, out_rms, mask=False,
kappa=3, box=None, interp=1, do_adapt=False,
bright_pt_coords=None, rms_box2=None, logname='', ncores=None):
"""Calculate mean&rms maps and store them into provided arrays
Parameters:
arr: 2D array with data
out_mean, out_rms: 2D arrays where to store calculated maps
mask: mask
kappa: clipping value for rms/mean calculations
box: tuple of (box_size, box_step) for calculating map
rms_box2 = large-scale box size
interp: order of interpolating spline used to interpolate
calculated map
do_adapt: use adaptive binning
"""
mask_small = mask
axes, mean_map1, rms_map1 = self.rms_mean_map(arr, mask_small, kappa, box, ncores)
ax = [self.remap_axis(ashp, axv) for (ashp, axv) in zip(arr.shape, axes)]
ax = N.meshgrid(*ax[-1::-1])
pt_src_scale = box[0]
if do_adapt:
out_rms2 = N.zeros(rms_map1.shape, dtype=N.float32)
out_mean2 = N.zeros(rms_map1.shape, dtype=N.float32)
# Generate rms/mean maps on large scale
box2 = rms_box2
axes2, mean_map2, rms_map2 = self.rms_mean_map(arr, mask, kappa, box2, ncores)
# Interpolate to get maps on small scale grid
axes2mod = axes2[:]
axes2mod[0] = axes2[0]/arr.shape[0]*mean_map1.shape[0]
axes2mod[1] = axes2[1]/arr.shape[1]*mean_map1.shape[1]
ax2 = [self.remap_axis(ashp, axv) for (ashp, axv) in zip(out_rms2.shape, axes2mod)]
ax2 = N.meshgrid(*ax2[-1::-1])
nd.map_coordinates(rms_map2, ax2[-1::-1], order=interp, output=out_rms2)
nd.map_coordinates(mean_map2, ax2[-1::-1], order=interp, output=out_mean2)
rms_map = out_rms2
mean_map = out_mean2
# For each bright source, find nearest points and weight them towards
# the small scale maps.
xscale = float(arr.shape[0])/float(out_rms2.shape[0])
yscale = float(arr.shape[1])/float(out_rms2.shape[1])
scale = [xscale, yscale]
size = 15
for bright_pt in bright_pt_coords:
bbox, src_center = self.make_bright_src_bbox(bright_pt, scale, size, out_rms2.shape)
bbox_xsize = bbox[0].stop-bbox[0].start
bbox_ysize = bbox[1].stop-bbox[1].start
src_center[0] -= bbox[0].start
src_center[1] -= bbox[1].start
weights = N.ones((bbox_xsize, bbox_ysize))
# Taper weights to zero where small-scale value is within a factor of
# 2 of large-scale value. Use distance to center of the box
# to determine taper value. This tapering prevents the use of the
# small-scale box beyond the range of artifacts.
low_vals_ind = N.where(rms_map1[tuple(bbox)]/out_rms2[tuple(bbox)] < 2.0)
if len(low_vals_ind[0]) > 0:
dist_to_cen = []
for (x,y) in zip(low_vals_ind[0],low_vals_ind[1]):
dist_to_cen.append(N.sqrt( (x-src_center[0])**2 +
(y-src_center[1])**2 ))
med_dist_to_cen = N.min(dist_to_cen)
for x in range(bbox_xsize):
for y in range(bbox_ysize):
dist_to_cen = N.sqrt( (x-src_center[0])**2 +
(y-src_center[1])**2 )
if dist_to_cen >= med_dist_to_cen:
weights[x,y] = 1.0 - dist_to_cen/N.sqrt(bbox_xsize**2+bbox_ysize**2)*2.0
rms_map[tuple(bbox)] = rms_map1[tuple(bbox)]*weights + out_rms2[tuple(bbox)]*(1.0-weights)
mean_map[tuple(bbox)] = mean_map1[tuple(bbox)]*weights + out_mean2[tuple(bbox)]*(1.0-weights)
else:
rms_map = rms_map1
mean_map = mean_map1
# Interpolate to image coords
mylog = mylogger.logging.getLogger(logname+"Rmsimage")
nd.map_coordinates(rms_map, ax[-1::-1], order=interp, output=out_rms)
nd.map_coordinates(mean_map, ax[-1::-1], order=interp, output=out_mean)
# Apply mask to mean_map and rms_map by setting masked values to NaN
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
out_mean[pix_masked] = N.nan
out_rms[pix_masked] = N.nan
def rms_mean_map(self, arr, mask=False, kappa=3, box=None, ncores=None):
"""Calculate map of the mean/rms values
Parameters:
arr: 2D array with data
mask: mask
kappa: clipping for calculating rms/mean within each box
box: box parameters (box_size, box_step)
Returns:
axes: list of 2 arrays with coordinates of boxes alongside each axis
mean_map: map of mean values
rms_map: map of rms values
Description:
This function calculates clipped mean and rms maps for the array.
The algorithm is a moving-window algorithm, where mean&rms are
calculated within a window of a size (box_size * box_size), and the
window is stepped withing the image by steps of box_steps.
Special care is taken for the borders of the image -- outer borders
(where box doesn't fit properly) are given one extra round with a box
applied to the border of the image. Additionally outer values are
extrapolated to cover whole image size, to simplify further processing.
See also routine 'remap_axes' for 'inverting' axes array
Example:
for an input image of 100x100 pixels calling rms_mean_map with default
box parameters (50, 25) will result in the following:
axes = [array([ 0. , 24.5, 49.5, 74.5, 99. ]),
array([ 0. , 24.5, 49.5, 74.5, 99. ])]
mean_map = <5x5 array>
rms_map = <5x5 array>
rms_map[1,1] is calculated for arr[0:50, 0:50]
rms_map[2,1] is calculated for arr[25:75, 0:50]
...etc...
rms_map[0,0] is extrapolated as .5*(rms_map[0,1] + rms_map[1,0])
rms_map[0,1] is extrapolated as rms_map[1,1]
"""
mylog = mylogger.logging.getLogger("PyBDSM.RmsMean")
if box is None:
box = (50, 25)
if box[0] < box[1]:
raise RuntimeError('Box size is less than step size.')
# Some math first: boxcount is number of boxes alongsize each axis,
# bounds is non-zero for axes which have extra pixels beyond last box
BS, SS = box
imgshape = N.array(arr.shape)
# If boxize is less than 10% of image, use simple extrapolation to
# derive the edges of the mean and rms maps; otherwise, use padded
# versions of arr and mask to derive the mean and rms maps
if float(BS)/float(imgshape[0]) < 0.1 and \
float(BS)/float(imgshape[1]) < 0.1:
use_extrapolation = True
else:
use_extrapolation = False
if use_extrapolation:
boxcount = 1 + (imgshape - BS)/SS
bounds = N.asarray((boxcount-1)*SS + BS < imgshape, dtype=int)
mapshape = 2 + boxcount + bounds
else:
boxcount = 1 + imgshape/SS
bounds = N.asarray((boxcount-1)*SS < imgshape, dtype=int)
mapshape = boxcount + bounds
pad_border_size = int(BS/2.0)
new_shape = (arr.shape[0] + 2*pad_border_size, arr.shape[1]
+ 2*pad_border_size)
arr_pad = self.pad_array(arr, new_shape)
if mask is None:
mask_pad = None
else:
mask_pad = self.pad_array(mask, new_shape)
# Make arrays for calculated data
mapshape = [int(ms) for ms in mapshape]
boxcount = [int(bc) for bc in boxcount]
mean_map = N.zeros(mapshape, dtype=N.float32)
rms_map = N.zeros(mapshape, dtype=N.float32)
axes = [N.zeros(len, dtype=N.float32) for len in mapshape]
# Step 1: internal area of the image
# Make a list of coordinates to send to process_mean_rms_maps()
coord_list = []
ind_list = []
for i in range(boxcount[0]):
for j in range(boxcount[1]):
if use_extrapolation:
coord_list.append((i+1, j+1))
else:
coord_list.append((i, j))
ind_list.append([i*SS, i*SS+BS, j*SS, j*SS+BS])
# Now call the parallel mapping function. Returns a list of [mean, rms]
# for each coordinate.
if use_extrapolation:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask), itertools.repeat(arr),
itertools.repeat(kappa)), numcores=ncores)
else:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask_pad), itertools.repeat(arr_pad),
itertools.repeat(kappa)), numcores=ncores)
for i, co in enumerate(coord_list):
cm, cr = cm_cr_list[i]
mean_map[co] = cm
rms_map[co] = cr
# Check if all regions have too few unmasked pixels
if mask is not None and N.size(N.where(mean_map != N.inf)) == 0:
raise RuntimeError("No unmasked regions from which to determine "\
"mean and rms maps")
# Step 2: borders of the image
if bounds[0]:
coord_list = []
ind_list = []
for j in range(boxcount[1]):
if use_extrapolation:
coord_list.append((-2, j+1))
ind_list.append([-BS, arr.shape[0], j*SS,j*SS+BS])
else:
coord_list.append((-1, j))
ind_list.append([-BS, arr_pad.shape[0], j*SS,j*SS+BS])
if use_extrapolation:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask), itertools.repeat(arr),
itertools.repeat(kappa)), numcores=ncores)
else:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask_pad), itertools.repeat(arr_pad),
itertools.repeat(kappa)), numcores=ncores)
for i, co in enumerate(coord_list):
cm, cr = cm_cr_list[i]
mean_map[co] = cm
rms_map[co] = cr
if bounds[1]:
coord_list = []
ind_list = []
for i in range(boxcount[0]):
if use_extrapolation:
coord_list.append((i+1, -2))
ind_list.append([i*SS,i*SS+BS, -BS,arr.shape[1]])
else:
coord_list.append((i, -1))
ind_list.append([i*SS,i*SS+BS, -BS,arr_pad.shape[1]])
if use_extrapolation:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask), itertools.repeat(arr),
itertools.repeat(kappa)), numcores=ncores)
else:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask_pad), itertools.repeat(arr_pad),
itertools.repeat(kappa)), numcores=ncores)
for i, co in enumerate(coord_list):
cm, cr = cm_cr_list[i]
mean_map[co] = cm
rms_map[co] = cr
if bounds.all():
if use_extrapolation:
ind = [-BS,arr.shape[0], -BS,arr.shape[1]]
self.for_masked(mean_map, rms_map, mask, arr, ind,
kappa, [-2, -2])
else:
ind = [-BS,arr_pad.shape[0], -BS,arr_pad.shape[1]]
self.for_masked(mean_map, rms_map, mask_pad, arr_pad, ind,
kappa, [-1, -1])
# Step 3: correct(extrapolate) borders of the image
def correct_borders(map):
map[0, :] = map[1, :]
map[:, 0] = map[:, 1]
map[-1, :] = map[-2, :]
map[:, -1] = map[:, -2]
map[0,0] = (map[1,0] + map[0, 1])/2.
map[-1,0] = (map[-2, 0] + map[-1, 1])/2.
map[0, -1] = (map[0, -2] + map[1, -1])/2.
map[-1,-1] = (map[-2, -1] + map[-1, -2])/2.
if use_extrapolation:
correct_borders(mean_map)
correct_borders(rms_map)
# Step 4: fill in coordinate axes
for i in range(2):
if use_extrapolation:
axes[i][1:boxcount[i]+1] = (N.arange(boxcount[i])*SS
+ BS/2. - .5)
if bounds[i]:
axes[i][-2] = imgshape[i] - BS/2. - .5
else:
axes[i][0:boxcount[i]] = N.arange(boxcount[i])*SS - .5
if bounds[i]:
axes[i][-2] = imgshape[i] - .5
axes[i][-1] = imgshape[i] - 1
# Step 5: fill in boxes with < 5 unmasked pixels (set to values of
# N.inf)
unmasked_boxes = N.where(mean_map != N.inf)
if N.size(unmasked_boxes,1) < mapshape[0]*mapshape[1]:
mean_map = self.fill_masked_regions(mean_map)
rms_map = self.fill_masked_regions(rms_map)
return axes, mean_map, rms_map
def process_mean_rms_maps(self, ind, mask, arr, kappa):
"""Finds mean and rms for one region of an input arr"""
cm, cr = self.for_masked_mp(mask, arr, ind,
kappa)
return cm, cr
def fill_masked_regions(self, themap, magic=N.inf):
"""Fill masked regions (defined where values == magic) in themap.
"""
masked_boxes = N.where(themap == magic) # locations of masked regions
for i in range(N.size(masked_boxes,1)):
num_unmasked = 0
x, y = masked_boxes[0][i], masked_boxes[1][i]
delx = dely = 1
while num_unmasked == 0:
x1 = x - delx
if x1 < 0: x1 = 0
x2 = x + 1 + delx
if x2 > themap.shape[0]: x2 = themap.shape[0]
y1 = y - dely
if y1 < 0: y1 = 0
y2 = y + 1 + dely
if y2 > themap.shape[1]: y2 = themap.shape[1]
cutout = themap[x1:x2, y1:y2].ravel()
goodcutout = cutout[cutout != magic]
num_unmasked = len(goodcutout)
if num_unmasked > 0:
themap[x, y] = N.nansum(goodcutout)/float(len(goodcutout))
delx += 1
dely += 1
themap[N.where(N.isnan(themap))] = 0.0
return themap
def pad_array(self, arr, new_shape):
"""Returns a padded array by mirroring around the edges."""
# Assume that padding is the same for both axes and is equal
# around all edges.
half_size = int((new_shape[0] - arr.shape[0]) / 2)
arr_pad = N.zeros( (new_shape), dtype=arr.dtype)
# left band
band = arr[:half_size, :]
arr_pad[:half_size, half_size:-half_size] = N.flipud( band )
# right band
band = arr[-half_size:, :]
arr_pad[-half_size:, half_size:-half_size] = N.flipud( band )
# bottom band
band = arr[:, :half_size]
arr_pad[half_size:-half_size, :half_size] = N.fliplr( band )
# top band
band = arr[:, -half_size:]
arr_pad[half_size:-half_size, -half_size:] = N.fliplr( band )
# central band
arr_pad[half_size:-half_size, half_size:-half_size] = arr
# bottom left corner
band = arr[:half_size,:half_size]
arr_pad[:half_size,:half_size] = N.flipud(N.fliplr(band))
# top right corner
band = arr[-half_size:,-half_size:]
arr_pad[-half_size:,-half_size:] = N.flipud(N.fliplr(band))
# top left corner
band = arr[:half_size,-half_size:]
arr_pad[:half_size,-half_size:] = N.flipud(N.fliplr(band))
# bottom right corner
band = arr[-half_size:,:half_size]
arr_pad[-half_size:,:half_size] = N.flipud(N.fliplr(band))
return arr_pad
def for_masked(self, mean_map, rms_map, mask, arr, ind, kappa, co):
bstat = func.bstat#_cbdsm.bstat
a, b, c, d = ind; i, j = co
if mask is None:
m, r, cm, cr, cnt = bstat(arr[a:b, c:d], mask, kappa)
if cnt > 198: cm = m; cr = r
mean_map[i, j], rms_map[i, j] = cm, cr
else:
pix_unmasked = N.where(mask[a:b, c:d] == False)
npix_unmasked = N.size(pix_unmasked,1)
if npix_unmasked > 20: # find clipped mean/rms
m, r, cm, cr, cnt = bstat(arr[a:b, c:d], mask[a:b, c:d], kappa)
if cnt > 198: cm = m; cr = r
mean_map[i, j], rms_map[i, j] = cm, cr
else:
if npix_unmasked > 5: # just find simple mean/rms
cm = N.mean(arr[pix_unmasked])
cr = N.std(arr[pix_unmasked])
mean_map[i, j], rms_map[i, j] = cm, cr
else: # too few unmasked pixels --> set mean/rms to inf
mean_map[i, j], rms_map[i, j] = N.inf, N.inf
def for_masked_mp(self, mask, arr, ind, kappa):
bstat = func.bstat #_cbdsm.bstat
a, b, c, d = ind
if mask is None:
m, r, cm, cr, cnt = bstat(arr[a:b, c:d], mask, kappa)
if cnt > 198: cm = m; cr = r
else:
pix_unmasked = N.where(mask[a:b, c:d] == False)
npix_unmasked = N.size(pix_unmasked,1)
if npix_unmasked > 20: # find clipped mean/rms
m, r, cm, cr, cnt = bstat(arr[a:b, c:d], mask[a:b, c:d], kappa)
if cnt > 198: cm = m; cr = r
else:
if npix_unmasked > 5: # just find simple mean/rms
cm = N.mean(arr[pix_unmasked])
cr = N.std(arr[pix_unmasked])
else: # too few unmasked pixels --> set mean/rms to inf
cm = N.inf
cr = N.inf
return cm, cr
def remap_axis(self, size, arr):
"""Invert axis mapping done by rms_mean_map
rms_mean_map 'compresses' axes by returning short arrays with
coordinades of the boxes. This routine 'inverts' this compression
by calculating coordinates of each pixel of the original array
within compressed array.
Parameters:
size: size of the original (and resulting) array
arr : 'compressed' axis array from rms_mean_map
Example:
the following 'compressed' axis (see example in rms_mean_map):
ax = array([ 0. , 24.5, 49.5, 74.5, 99. ])
will be remapped as:
print remap_axis(100, ax)
[ 0. 0.04081633 0.08163265 0.12244898 ....
...............................................
3.91836735 3.95918367 4. ]
which means that pixel 0 in the original image corresponds to pixels
0 in the rms/mean_map array (which is 5x5 array).
pixel 1 of the original image has coordinate of 0.04081633 in the
compressed image (e.g. it has no exact counterpart, and it's value
should be obtained by interpolation)
"""
from math import floor, ceil
res = N.zeros(size, dtype=N.float32)
for i in range(len(arr) - 1):
i1 = arr[i]
i2 = arr[i+1]
t = N.arange(ceil(i1), floor(i2)+1, dtype=float)
res[int(ceil(i1)):int(floor(i2))+1] = i + (t-i1)/(i2-i1)
return res
def make_bright_src_bbox(self, coord, scale, size, shape):
"""Returns bbox given coordinates of center and scale"""
xindx = int(coord[0]/scale[0])
yindx = int(coord[1]/scale[1])
xlow = xindx - int(size/2.0)
if xlow < 0:
xlow = 0
xhigh = xindx + int(size/2.0) + 1
if xhigh > shape[0]:
xhigh = shape[0]
ylow = yindx - int(size/2.0)
if ylow < 0:
ylow = 0
yhigh = yindx + int(size/2.0) + 1
if yhigh > shape[1]:
yhigh = shape[1]
src_center = [xindx, yindx]
return [slice(xlow, xhigh, None), slice(ylow, yhigh, None)], src_center
def output_rmsbox_size(self, img):
"""Prints rms/mean box size"""
opts = img.opts
do_adapt = opts.adaptive_rms_box
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"RMSimage")
if (opts.rms_map is not False) or (opts.mean_map not in ['zero', 'const']):
if do_adapt:
if opts.rms_box_bright is None:
mylogger.userinfo(mylog, 'Derived rms_box (box size, step size)',
'(' + str(img.rms_box_bright[0]) + ', ' +
str(img.rms_box_bright[1]) + ') pixels (small scale)')
else:
mylogger.userinfo(mylog, 'Using user-specified rms_box',
'(' + str(img.rms_box_bright[0]) + ', ' +
str(img.rms_box_bright[1]) + ') pixels (small scale)')
if opts.rms_box is None:
mylogger.userinfo(mylog, 'Derived rms_box (box size, step size)',
'(' + str(img.rms_box[0]) + ', ' +
str(img.rms_box[1]) + ') pixels (large scale)')
else:
mylogger.userinfo(mylog, 'Using user-specified rms_box',
'(' + str(img.rms_box[0]) + ', ' +
str(img.rms_box[1]) + ') pixels (large scale)')
else:
if opts.rms_box is None:
mylogger.userinfo(mylog, 'Derived rms_box (box size, step size)',
'(' + str(img.rms_box[0]) + ', ' +
str(img.rms_box[1]) + ') pixels')
else:
mylogger.userinfo(mylog, 'Using user-specified rms_box',
'(' + str(img.rms_box[0]) + ', ' +
str(img.rms_box[1]) + ') pixels')
| 47,148 | 43.818441 | 129 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/shapelets.py
|
"""Module shapelets.
nmax => J = 0..nmax; hence nmax+1 orders calculated.
ordermax = nmax+1; range(ordermax) has all the values of n
Order n => J=n, where J=0 is the gaussian.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
try:
from astropy.io import fits as pyfits
except ImportError as err:
import pyfits
from scipy.optimize import leastsq
def decompose_shapelets(image, mask, basis, beta, centre, nmax, mode):
""" Decomposes image (with mask) and beta, centre (2-tuple) , nmax into basis
shapelets and returns the coefficient matrix cf.
Mode is 'fit' or 'integrate' for method finding coeffs. If fit then integrated
values are taken as initial guess.
"""
# bad = False
# if (beta < 0 or beta/max(image.shape) > 5 or \
# (max(N.abs(list(centre)))-max(image.shape)/2) > 10*max(image.shape)): bad = True
hc = shapelet_coeff(nmax, basis)
ordermax=nmax+1
Bset=N.zeros((ordermax, ordermax, image.shape[0], image.shape[1]), dtype=N.float32)
cf = N.zeros((ordermax,ordermax)) # coefficient matrix, will fill up only lower triangular part.
index = [(i,j) for i in range(ordermax) for j in range(ordermax-i)] # i=0->nmax, j=0-nmax-i
for coord in index:
B = shapelet_image(basis, beta, centre, hc, coord[0], coord[1], image.shape)
if mode == 'fit': Bset[coord[0] , coord[1], ::] = B
m = N.copy(mask)
for i, v in N.ndenumerate(mask): m[i] = not v
cf[coord] = N.sum(image*B*m)
if mode == 'fit':
npix = N.product(image.shape)-N.sum(mask)
npara = (nmax+1)*(nmax+2)*0.5
cfnew = fit_shapeletbasis(image, mask, cf, Bset)
recon1 = reconstruct_shapelets(image.shape, mask, basis, beta, centre, nmax, cf)
recon2 = reconstruct_shapelets(image.shape, mask, basis, beta, centre, nmax, cfnew)
if N.std(recon2) < 1.2*N.std(recon1): cf = cfnew
return cf
def fit_shapeletbasis(image, mask, cf0, Bset):
""" Fits the image to the shapelet basis functions to estimate shapelet coefficients
instead of integrating it out. This should avoid the problems of digitisation and hence
non-orthonormality. """
from . import functions as func
ma = N.where(~mask.flatten())
cfshape = cf0.shape
res=lambda p, image, Bset, cfshape, mask_flat : (image.flatten()-func.shapeletfit(p, Bset, cfshape))[ma]
if len(ma) <= 5:
# Not enough degrees of freedom
cf = cf0
else:
(cf, flag)=leastsq(res, cf0.flatten(), args=(image, Bset, cfshape, ma))
cf = cf.reshape(cfshape)
return cf
def reconstruct_shapelets(size, mask, basis, beta, centre, nmax, cf):
""" Reconstructs a shapelet image of size, for pixels which are unmasked, for a given
beta, centre, nmax, basis and the shapelet coefficient matrix cf. """
rimage = N.zeros(size, dtype=N.float32)
hc = []
hc = shapelet_coeff(nmax, basis)
index = [(i,j) for i in range(nmax) for j in range(nmax-i)]
for coord in index:
B = shapelet_image(basis, beta, centre, hc, coord[0], coord[1], size)
rimage += B*cf[coord]
return rimage
def shapelet_image(basis, beta, centre, hc, nx, ny, size):
""" Takes basis, beta, centre (2-tuple), hc matrix, x, y, size and returns the image of the shapelet of
order nx,ny on an image of size size. Does what getcartim.f does in fBDSM. nx,ny -> 0-nmax
Centre is by Python convention, for retards who count from zero. """
from math import sqrt,pi
try:
from scipy import factorial
except ImportError:
try:
from scipy.misc.common import factorial
except ImportError:
try:
from scipy.misc import factorial
except ImportError:
from scipy.special import factorial
hcx = hc[nx,:]
hcy = hc[ny,:]
ind = N.array([nx,ny])
fact = factorial(ind)
dumr1 = N.sqrt((2.0**(ind))*sqrt(pi)*fact)
x = (N.arange(size[0],dtype=float)-centre[0])/beta
y = (N.arange(size[1],dtype=float)-centre[1])/beta
dumr3 = N.zeros(size[0])
for i in range(size[0]):
for j in range(ind[0]+1):
dumr3[i] += hcx[j]*(x[i]**j)
B_nx = N.exp(-0.50*x*x)*dumr3/dumr1[0]/sqrt(beta)
dumr3 = N.zeros(size[1])
for i in range(size[1]):
for j in range(ind[1]+1):
dumr3[i] += hcy[j]*(y[i]**j)
B_ny = N.exp(-0.50*y*y)*dumr3/dumr1[1]/sqrt(beta)
return N.outer(B_nx,B_ny)
def shape_findcen(image, mask, basis, beta, nmax, beam_pix): # + check_cen_shapelet
""" Finds the optimal centre for shapelet decomposition. Minimising various
combinations of c12 and c21, as in literature doesnt work for all cases.
Hence, for the c1 image, we find the zero crossing for every vertical line
and for the c2 image, the zero crossing for every horizontal line, and then
we find intersection point of these two. This seems to work even for highly
non-gaussian cases. """
from . import functions as func
import sys
hc = []
hc = shapelet_coeff(nmax, basis)
msk=N.zeros(mask.shape, dtype=bool)
for i, v in N.ndenumerate(mask): msk[i] = not v
n,m = image.shape
cf12 = N.zeros(image.shape, dtype=N.float32)
cf21 = N.zeros(image.shape, dtype=N.float32)
index = [(i,j) for i in range(n) for j in range(m)]
for coord in index:
if msk[coord]:
B12 = shapelet_image(basis, beta, coord, hc, 0, 1, image.shape)
cf12[coord] = N.sum(image*B12*msk)
if coord==(27,51): dumpy = B12
B21 = shapelet_image(basis, beta, coord, hc, 1, 0, image.shape)
cf21[coord] = N.sum(image*B21*msk)
else:
cf12[coord] = None
cf21[coord] = None
(xmax,ymax) = N.unravel_index(image.argmax(),image.shape) # FIX with mask
if xmax in [1,n] or ymax in [1,m]:
(m1, m2, m3) = func.moment(mask)
xmax,ymax = N.round(m2)
# in high snr area, get zero crossings for each horizontal and vertical line for c1, c2 resp
tr_mask=mask.transpose()
tr_cf21=cf21.transpose()
try:
(x1,y1) = getzeroes_matrix(mask, cf12, ymax, xmax) # y1 is array of zero crossings
(y2,x2) = getzeroes_matrix(tr_mask, tr_cf21, xmax, ymax) # x2 is array of zero crossings
# find nominal intersection pt as integers
xind=N.where(x1==xmax)
yind=N.where(y2==ymax)
xind=xind[0][0]
yind=yind[0][0]
# now take 2 before and 2 after, fit straight lines, get proper intersection
ninter=5
if xind<3 or yind<3 or xind>n-2 or yind>m-2:
ninter = 3
xft1 = x1[xind-(ninter-1)/2:xind+(ninter-1)/2+1]
yft1 = y1[xind-(ninter-1)/2:xind+(ninter-1)/2+1]
xft2 = x2[yind-(ninter-1)/2:yind+(ninter-1)/2+1]
yft2 = y2[yind-(ninter-1)/2:yind+(ninter-1)/2+1]
sig = N.ones(ninter, dtype=float)
smask1=N.array([r == 0 for r in yft1])
smask2=N.array([r == 0 for r in xft2])
cen=[0.]*2
if sum(smask1)<len(yft1) and sum(smask2)<len(xft2):
[c1, m1], errors = func.fit_mask_1d(xft1, yft1, sig, smask1, func.poly, do_err=False, order=1)
[c2, m2], errors = func.fit_mask_1d(xft2, yft2, sig, smask2, func.poly, do_err=False, order=1)
if m2-m1 == 0:
cen[0] = cen[1] = 0.0
else:
cen[0]=(c1-c2)/(m2-m1)
cen[1]=c1+m1*cen[0]
else:
cen[0] = cen[1] = 0.0
# check if estimated centre makes sense
error=shapelet_check_centre(image, mask, cen, beam_pix)
except:
error = 1
if error > 0:
#print 'Error '+str(error)+' in finding centre, will take 1st moment instead.'
(m1, m2, m3) = func.moment(image, mask)
cen = m2
return cen
def getzeroes_matrix(mask, cf, cen, cenx):
""" For a matrix cf, and a mask, this returns two vectors; x is the x-coordinate
and y is the interpolated y-coordinate where the matrix cf croses zero. If there
is no zero-crossing, y is zero for that column x. """
x = N.arange(cf.shape[0], dtype=N.float32)
y = N.zeros(cf.shape[0], dtype=N.float32)
# import pylab as pl
# pl.clf()
# pl.imshow(cf, interpolation='nearest')
# ii = N.random.randint(100); pl.title(' zeroes' + str(ii))
# print 'ZZ ',cen, cenx, ii
for i in range(cf.shape[0]):
l = [mask[i,j] for j in range(cf.shape[1])]
npts = len(l)-sum(l)
#print 'npts = ',npts
if npts > 3 and not N.isnan(cf[i,cen]):
mrow=mask[i,:]
if sum(l) == 0:
low=0
up=cf.shape[1]-1
else:
low = mrow.nonzero()[0][mrow.nonzero()[0].searchsorted(cen)-1]
#print 'mrow = ',i, mrow, low,
try:
up = mrow.nonzero()[0][mrow.nonzero()[0].searchsorted(cen)]
#print 'up1= ', up
except IndexError:
if [mrow.nonzero()[0].searchsorted(cen)][0]==len(mrow.nonzero()):
up = len(mrow)
#print 'up2= ', up,
else:
raise
#print
low += 1; up -= 1
npoint = up-low+1
xfn = N.arange(npoint)+low
yfn = cf[i,xfn]
root, error = shapelet_getroot(xfn, yfn, x[i], cenx, cen)
if error != 1:
y[i] = root
else:
y[i] = 0.0
else:
y[i] = 0.0
return x,y
def shapelet_getroot(xfn, yfn, xco, xcen, ycen):
""" This finds the root for finding the shapelet centre. If there are multiple roots, takes
that which closest to the 'centre', taken as the intensity barycentre. This is the python
version of getroot.f of anaamika."""
from . import functions as func
root=None
npoint=len(xfn)
error=0
if npoint == 0:
error = 1
elif yfn.max()*yfn.min() >= 0.:
error=1
minint=0; minintold=0
for i in range(1,npoint):
if yfn[i-1]*yfn[i] < 0.:
if minintold == 0: # so take nearest to centre
if abs(yfn[i-1]) < abs(yfn[i]):
minint=i-1
else:
minint=i
else:
dnew=func.dist_2pt([xco,xfn[i]], [xcen,ycen])
dold=func.dist_2pt([xco,xfn[minintold]], [xcen,ycen])
if dnew <= dold:
minint=i
else:
minint=minintold
minintold=minint
if minint < 1 or minint > npoint: error=1
if error != 1:
low=minint-min(2,minint)#-1)
up=minint+min(2,npoint-1-minint) # python array indexing rubbish
nfit=up-low+1
xfit=xfn[low:low+nfit]
yfit=yfn[low:low+nfit]
sig=N.ones(nfit)
smask=N.zeros(nfit, dtype=bool)
xx=[i for i in range(low,low+nfit)]
[c, m], errors = func.fit_mask_1d(xfit, yfit, sig, smask, func.poly, do_err=False, order=1)
root=-c/m
if root < xfn[low] or root > xfn[up]: error=1
return root, error
def shapelet_check_centre(image, mask, cen, beam_pix):
"Checks if the calculated centre for shapelet decomposition is sensible. """
from math import pi
error = 0
n, m = image.shape
x, y = round(cen[0]), round(cen[1])
if x <= 0 or x >= n or y <= 0 or y >= m: error = 1
if error == 0:
if not mask[int(round(x)),int(round(y))]: error == 2
if error > 0:
if (N.product(mask.shape)-sum(sum(mask)))/(pi*0.25*beam_pix[0]*beam_pix[1]) < 2.5:
error = error*10 # expected to fail since source is too small
return error
def shape_varybeta(image, mask, basis, betainit, cen, nmax, betarange, plot):
""" Shapelet decomposes and then reconstructs an image with various values of beta
and looks at the residual rms vs beta to estimate the optimal value of beta. """
from . import _cbdsm
nbin = 30
delta = (2.0*betainit-betainit/2.0)/nbin
beta_arr = betainit/4.0+N.arange(nbin)*delta
beta_arr = N.arange(0.5, 6.05, 0.05)
nbin = len(beta_arr)
res_rms=N.zeros(nbin)
for i in range(len(beta_arr)):
cf = decompose_shapelets(image, mask, basis, beta_arr[i], cen, nmax, mode='')
im_r = reconstruct_shapelets(image.shape, mask, basis, beta_arr[i], cen, nmax, cf)
im_res = image - im_r
ind = N.where(~mask)
res_rms[i] = N.std(im_res[ind])
minind = N.argmin(res_rms)
if minind > 1 and minind < nbin:
beta = beta_arr[minind]
error = 0
else:
beta = betainit
error = 1
# if plot:
# pl.figure()
# pl.plot(beta_arr,res_rms,'*-')
# pl.xlabel('Beta')
# pl.ylabel('Residual rms')
return beta, error
def shapelet_coeff(nmax=20,basis='cartesian'):
""" Computes shapelet coefficient matrix for cartesian and polar
hc=shapelet_coeff(nmax=10, basis='cartesian') or
hc=shapelet_coeff(10) or hc=shapelet_coeff().
hc(nmax) will be a nmax+1 X nmax+1 matrix."""
import numpy as N
order=nmax+1
if basis == 'polar':
raise NotImplementedError("Polar shapelets not yet implemented.")
hc=N.zeros([order,order])
hnm1=N.zeros(order); hn=N.zeros(order)
hnm1[0]=1.0; hn[0]=0.0; hn[1]=2.0
hc[0]=hnm1
hc[1]=hn
for ind in range(3,order+1):
n=ind-2
hnp1=-2.0*n*hnm1
hnp1[1:] += 2.0*hn[:order-1]
hc[ind-1]=hnp1
hnm1=hn
hn=hnp1
return hc
| 13,371 | 33.552972 | 108 |
py
|
PyBDSF
|
PyBDSF-master/bdsf/nat/__init__.py
|
# Adapted for numpy/ma/cdms2 by convertcdms.py
"""---------------------------------------------------------------------------------------------
INTRODUCTION TO NGMATH
The ngmath library is a collection of interpolators and approximators for one-dimensional, two-dimensional
and three-dimensional data. The packages, which were obtained from NCAR, are:
natgrid -- a two-dimensional random data interpolation package based on Dave Watson's nngridr.
dsgrid -- a three-dimensional random data interpolator based on a simple inverse distance weighting
algorithm.
fitgrid -- an interpolation package for one-dimensional and two-dimensional gridded data based on
Alan Cline's Fitpack. Fitpack uses splines under tension to interpolate in one and two
dimensions.
csagrid -- an approximation package for one-dimensional, two-dimensional and three-dimensional random
data based on David Fulker's Splpack. csagrid uses cubic splines to calculate its
approximation function.
cssgrid -- an interpolation package for random data on the surface of a sphere based on the work of
Robert Renka. cssgrid uses cubic splines to calculate its interpolation function.
shgrid -- an interpolation package for random data in 3-space based on the work of Robert Renka.
shgrid uses a modified Shepard's algorithm to calculate its interpolation function.
COMPARISION OF NGMATH PACKAGES
Three-dimensional packages -- shgrid, csagrid and dsgrid.
shgrid is probably the package of choice for interpolation. It uses a least squares fit of biquadratics
to construct its interpolation function. The interpolation function will pass through the original data
points.
csagrid uses a least squares fit of cubic splines to calculate its approximation function: the calculated
surface will not necesarily pass through the original data points. The algorithm can become unstable in data
sparse regions.
dsgrid uses a weighted average algorithm and is stable in all cases, but the resultant interpolation is
not usually smooth and execution time is very slow. dsgrid is probably best used when csagrid and shgrid
fail or for comparative purposes.
Two-dimensional packages -- natgrid, fitgrid, csagrid and dsgrid.
natgrid is the package of choice in most cases. It implements a very stable algorithm and has parameters
for adjusting the smoothness of the output surface.
fitgrid offers user-settable parameters for specifiying derivatives along the boundary of the output grid
which are not available in natgrid.
csagrid produces an approximate two-dimensional surface which may be smoother than that produced by fitgrid
and natgrid.
dsgrid is not recommended for two-dimensional surfaces. natgrid is superior in all respects.
One-dimensional packages -- fitgrid and csagrid.
fitgrid is definitely the package of choice. It has many features not available in csagrid, such as
interpolating parametric curves, finding integrals, handling periodic functions, allowing smoothing that
varies from linear to a full cubic spline interpolation and specifying slopes at the end points.
Interpolation on a sphere -- cssgrid.
cssgrid is designed specifically for interpolating on a sphere. It uses cubic splines to calculate an
interpolation function.
NATGRID PACKAGE
natgrid implements a natural neighbor interpolation method. The input for the interpolation is a set
of randomly spaced two-dimensional coordinates with functional values at those coordinates; the output is a
set of interpolated values at coordinates in a user specified rectangular grid. The coordinates in the output
grid must be monotonic in each coordinate direction, but need not be evenly spaced. It is also possible to
interpolate at a single point.
natgrid uses a weighted average method that is much more sophisticated than the inverse distance weighted
average used by dsgrid. One distinguishing quality of natural neighbor interpolation is the way in which
a set of neighboring points (the natural neighbor) is selected to use for interpolating at a point. The
natural neighbor selection process avoids the problems common to methods based on choosing a fixed number
of neighboring points, or all points within a fixed distance. Another distinguishing quality of natural
neighbor interpolation is the way that the weights are calculated for the functional values at the natural
neighbor coordinates. These weights are based on proportionate area, rather than distances.
The method of finding the natural neighbors and calculating area-based weights to produce interpolated
values is called natural neighbor linear interpolation. This produces an interpolation surface that has a
continous slope at all points, except at the original input points. The result of natural neighbor linear
interpolation can be visualized as producing a snugly fit sheet stretched over all of the input points.
The interpolation method in natgrid also allows for natural neighbor linear interpolation augmented by
blending in gradient estimates. This is called natural neighbor nonlinear interpolation. It produces an
interpolation surface that has a continuous slope at all locations; two tautness parameters can be set by
the user to control the apparent smoothness of the output surface.
NATGRID CONTENTS
Access through Python to the natgrid package from NCAR's ngmath distribution is provided directly through the module
natgridmodule.so which was generated as a Python C language extension in order to export the natgrid functions
from the original C language library to Python.
REQUIRED FILE
natgridmodule.so -- the Python interface to the ngmath natgrid package.
USEFUL FILES
nat.py -- the object oriented interface including a general help package.
natgridtest.py -- the code to test nat.py and to write documentation.
USAGE
This module is designed to use in two ways. One is through the use of the object oriented interface to the underlying
functions. This approach is recommended for users not already familiar with the original natgrid distribtution because
it simplifies the calls to the routines. The other method uses the original functions calling them directly from Python.
------------------- OBJECT ORIENTED APPROACH ----------------
The nat module contains the Natgrid class and its single method, rgrd, which provides access to all the natgrid
functions. The object oriented approach has been organized as a two step process.
STEP 1.
To make an instance, r, type:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
or
r = nat.Natgrid(xi, yi, xo, yo, listOutput = 'yes')
where xi, yi and xo, yo are the input and output grid coordinate arrays. The optional listOutput must
set to anything except 'no' if xo, yo are in list format as explained below. It is the responsibility
of the user to set listOutput if the output is in the list form.
The input grid must be organized in a list format always. The size of the xi array and the yi array are
necessarily equal. For example, if there are n randomly spaced input data points, there
are n values in xi and n values in yi.
There are two possible formats for the output grid. The output grid coordinate arrays may be a list like
the input array or it may be a rectangular grid. The choice between the two posibilities is made according
to requirements in subseqent calls to the method function. The first choice is required if the subsequent
call is to the single point mode interpolation. The list can have one or more points. Of course, the list
could describe a rectangular grid. For example, a rectangular grid with 10 x values and 20 y values can be
rewrtten in list form with 200 x value and 200 y values. However, this form requires calling the slower
single point interpolator. The second choice is most efficient for the basic interpolation to a rectangular
output grid. The output grid must be monotonic but need not be equally spced.
The grid coordinate arrays can be single precision (numpy.float32) or double precision (numpy.float64). The
decision on whether to call for a single or a double precision computation subsequently is made by looking at
the type of these arrays.
To look at the default settings for the control parameters and a brief description of thier properties, type
r.printDefaultParameterTable()
To change a setting type the new value. For example, to set igr to 1, type
r.igr = 1
To find a value without printing the table, type the name. For example, to exam the value of hor, type
r.hor
To check the settings type
r.printInstanceParameterTable() -- prints in tabular form the parameters used in subsequent calls to the method
function rgrd.
or
printStoredParameters() -- prints the parameters in memory which may differ from the above if the user
has made more than one instance of the Natgrid class.
STEP 2.
natgrid is restricted to two dimensions . Consequently, it is the user's responsibility to reduce the processing of
higher dimensional data to a sequence of calls using only two dimensional data.
The computations are divided into two groups depending on whether the output arrays are in list form or in rectilinear
grid form. If they are in list format the single point mode is called to interpolate to those individual points. This is
the only process possible. On the other hand, if the output goes to a rectangular grid there are more choices. In
addition to carrying out linear and nonlinear interpolations, it is possible to request aspects and slopes. The aspect
at a point on the interpolated surface is the direction of steepest descend. The slope is the value of the partial
derivative taken in the direction of the aspect. The slope is measured as an angle that is zero in a horizonal surface
and positive below the horizontal.
The following examples cover the basic computations. They start with a indication of the appropriate STEP 1.
Example 1: the basic natural neighbor linear interpolation
As STEP 1 make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
where the xo, yo grid is rectilinear as explained above in STEP 1.
Then call the primary interpolation computation to regrid the input data, dataIn, on the grid (xi, yi) to
the output data, dataOut, on the grid (xo, yo), with
dataOut = r.rgrd( dataIn )
The computation is either single or double precision as determined by the precision submitted in the grid
description in STEP 1.
It is also possible to request a wrap in the input grid and the input data in the longitude direction, assumed
to be the yi grid coordinate, by adding a keyword as
dataOut = r.rgrd( dataIn, wrap = 'yes' )
Example 2: natural neighbor linear interpolation returning the aspect and the slope.
As STEP 1 make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
where the xo, yo grid is rectilinear as explained above in STEP 1.
Then call the primary interpolation computation to regrid the input data, dataIn, on the grid (xi, yi) to
the output data, dataOut, on the grid (xo, yo), while asking for the aspect and the slope on this output grid, with
dataOut, a, s = r.rgrd( dataIn, aspectSlope = 'yes' )
where a is the aspect, the direction of the steepest descent in degrees measured from 'north' and s is the
slope in degrees measured from the horizontal. Necessarily, these are arrays aligned with the rectilinear
output grid, xo, yo.
The computation is either single or double precision as determined by the precision submitted in the grid
description in STEP 1.
It is also possible to request a wrap in the input grid and the input data in the longitude direction, assumed
to be the yi grid coordinate, by adding a keyword as
dataOut, a, s = r.rgrd( dataIn, aspectSlope = 'yes', wrap = 'yes' )
Example 3: the basic natural neighbor nonlinear interpolation
The procedure for the nonlinear interpolation differs from the linear case in the need to set the control
parameter igr. Follow Example 1 and insert the following statament after making the instance, r.
r.igr = 1
Example 4: natural neighbor nonlinear interpolation returning the aspect and the slope.
The procedure for the nonlinear interpolation differs from the linear case in the need to set the control
parameter igr. Follow Example 2 and insert the following statament after making the instance, r.
r.igr = 1
Example 5: single point mode natural neighbor linear interpolation
As STEP 1 make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo, listOutput = 'yes')
where the xo, yo output grid is in the list form (not a rectangular output grid) as explained above in
STEP 1.
To call the single point mode interpolation computation to regrid the input data, dataIn, on the grid (xi, yi)
to the output data, dataOut, on the grid (xo, yo), type
dataOut = r.rgrd( dataIn )
The computation is either single or double precision as determined by the precision submitted in the grid
description in STEP 1. In the single point mode it is not possible to request the aspect and the slope.
Example 6: single point mode natural neighbor nonlinear interpolation
The procedure for the nonlinear interpolation differs from the linear case in the need to set the control
parameter igr. Follow Example 5 and insert the following statament after making the instance, r.
r.igr = 1
------------------- ORIGINAL FUNCTION APPROACH -----------------
The module natgridmodule.so exports the following functions to Python from the original ngmath C library:
Single precision procedures:
natgrids - primary function for gridding.
seti - set int parameter values.
geti - retrieve values for int parameters.
setr - set float parameter values.
getr - retrieve values for float parameters
setc - set char parameter values.
getc - retrieve values for char parameters.
getaspects - get aspect values, if calculated by setting sdi = 1.
getslopes - get slope values, if calculated by setting sdi = 1.
pntinits - initiate single point mode.
pnts - interpolate at a single point.
pntend - terminate single point mode.
Double precision procedures:
natgridd - primary function for gridding.
setrd - set float parameter values.
getrd - retrieve values for float parameters
getaspectd - get aspect values, if calculated by setting sdi = 1.
getsloped - get slope values, if calculated by setting sdi = 1.
pntinitd - initiate single point mode.
pntd - interpolate at a single point.
pntendd - terminate single point mode.
Information on the use of the routines is available by importing natgridmodule and printing the docstring
of interest. For example, documentation for the routine natgrids is obtained by typing
import natgridmodule
print natgridmodule.natgrids.__doc__
This same information is available in the help package.
A description of the control parameters is not in the natgridmodule documentation. It can be found by typing
import nat
nat.printParameterTable()
The documentation associated with the natgridmodule.so, such as the doctrings, describe the C code.
DOCUMENTATION
Documentation is provided through Python's docstrings, essentially Python style program
comments. A help package provides instructions on the use of the natgrid module. A table of contents
is printed to the screen by typing
nat.help()
after importing nat.
A hard copy of all the pertinent 'docstring' documentation written to the file natgridmodule.doc can
be produced by typing
nat.document()
As an alternate to using the help package, online documentation for the natgrids function, for example,
is available directly from the natgrids doctring by typing
import natgridmodule
print natgridmodule.natgrids.__doc__
TESTING
To run a test of the natgrid computations and to get a copy of this documentation, type
cdat natgridtest.py
--------------------------------------------------------------------------------------------------------------"""
from __future__ import print_function
# import string, math, sys, numpy, cdms2, natgridmodule
import string, math, sys, numpy
from . import natgridmodule
# writeTestcase = 'yes'
# try:
# import cdms2
# except ImportError:
# print 'Can not write test case results to netCDF files without module cdms'
# writeTestcase = 'no'
writeTestcase = 'no'
usefilled = 'yes'
try:
import numpy.ma
except ImportError:
print('Can not convert from numpy.ma array to numpy array without module numpy.ma')
usefilled = 'no'
debug = 0
class Natgrid:
#-------------------------------------------------------------------------------------------------------------
#
# Contents of Natgrid class
#
#
# Natgrid class
# __init__ -- initialization
# rgrd -- the regridder called from Python
#
# rgrdPrimary -- called by rgrd if the output grid is montonically increasing
# rgrdSinglePoint -- called by rgrd if the output grid is random or single point mode is selected
# setInstanceParameters -- sets the C values to the instance values
#
#---------------------------------------------------------------------------------------------------------------
def __init__(self, xi, yi, xo, yo, listOutput = 'no'):
""" --------------------------------------------------------------------------------------------------------
routine: __init__ for class Natgrid
purpose: init makes an instance of the Natgrid class while performing the following:
1. checks the argument list for the correct types.
2. selects single or double precision computation.
3. assigns the coordinate grid arrays to self data.
4. assigns default control parameter values from the parameter dictionary.
usage: r = nat.Natgrid(xi, yi, xo, yo)
or
r = nat.Natgrid(xi, yi, xo, yo, listOutput = 'yes')
where xi, yi and xo, yo are the input and output grid coordinate arrays. The optional listOutput is
set to anything except 'no' if xo, yo are in list format as explained below.
The input grid must be organized in a list format always. The size of the xi array and the yi array are
necessarily equal. For example, if there are n randomly spaced input data points, there
are n values in xi and n values in yi.
There are two possible formats for the output grid. The output grid coordinate arrays may be a list like
the input array or it may be a rectangular grid. The choice between the two posibilities is made according
to requirements in subseqent calls to the method function. The first choice is required if the subsequent
call is to the single point mode interpolation. The list can have one or more points. Of course, the list
could describe a rectangular grid. For example, a rectangular grid with 10 x values and 20 y values can be
rewrtten in list form with 200 x value and 200 y values. However, this form requires calling the slower
single point interpolator. The second choice is most efficient for the basic interpolation to a rectangular
output grid. The output grid must be monotonic but need not be equally spced.
Note: the index in the data associated with y varies the fastest.
definition: __init__(self, xi, yi, xo, yo, listOutput = 'no'):
--------------------------------------------------------------------------------------------------------"""
# ---- check the input grid argument list
try:
size = len(xi)
except:
msg = 'CANNOT CREATE INSTANCE - The first argument must be an array'
raise TypeError(msg)
if size < 4:
msg = 'CANNOT CREATE INSTANCE - The length of the input x coordindate grid must be greater than 3'
raise ValueError(msg)
try:
size = len(yi)
except:
msg = 'CANNOT CREATE INSTANCE - The third argument must be an array'
raise TypeError(msg)
if size < 4:
msg = 'CANNOT CREATE INSTANCE - The length of the input y coordindate grid must be greater than 3'
raise ValueError(msg)
# set the self data for the input grid
self.nxi = len(xi)
self.nyi = len(yi)
if self.nxi != self.nyi:
msg = 'CANNOT CREATE INSTANCE - The length of the input x and y coordindate grids must be equal'
raise ValueError(msg)
self.xi = xi
self.yi = yi
# ---- check the output grid argument list
try:
size = len(xo)
except:
msg = 'CANNOT CREATE INSTANCE - The second argument must be an array'
raise TypeError(msg)
try:
size = len(yo)
except:
msg = 'CANNOT CREATE INSTANCE - The fourth argument must be an array'
raise TypeError(msg)
# set the self data for the output grid
self.nxo = len(xo)
self.nyo = len(yo)
if listOutput == 'no':
self.xo, self.yo, monotonic, self.xreverse, self.yreverse = checkdim(xo, yo) # monotonicity check
if monotonic == 'no':
msg = 'CANNOT CREATE INSTANCE - Rectangular output grid must be monotonic'
raise ValueError(msg)
self.listOutput = 'no'
else:
if self.nxo != self.nyo:
msg = 'CANNOT CREATE INSTANCE - The list type output arrays must have the same length'
raise ValueError(msg)
else:
self.xo = xo
self.yo = yo
self.xreverse = 'no'
self.yreverse = 'no'
self.listOutput = 'yes'
# select the interpolation routines from the single or the double precision group - majority rules here
numberSingles = 0
numberDoubles = 0
if xi.dtype.char == 'f':
numberSingles = numberSingles + 1
else:
numberDoubles = numberDoubles + 1
if xo.dtype.char == 'f':
numberSingles = numberSingles + 1
else:
numberDoubles = numberDoubles + 1
if yi.dtype.char == 'f':
numberSingles = numberSingles + 1
else:
numberDoubles = numberDoubles + 1
if yo.dtype.char == 'f':
numberSingles = numberSingles + 1
else:
numberDoubles = numberDoubles + 1
if debug == 1:
print('number Singles and Doubles : ', numberSingles, numberDoubles)
if numberSingles >= numberDoubles:
self.group = 'single'
if numberSingles < 4:
sendmsg('Changing all the coordinate grid types to float32')
xi = xi.astype(numpy.float32)
xo = xo.astype(numpy.float32)
yi = yi.astype(numpy.float32)
yo = yo.astype(numpy.float32)
else:
self.group = 'double'
if numberDoubles < 4:
sendmsg('Changing all the coordinate grid types to float64')
xi = xi.astype(numpy.float64)
xo = xo.astype(numpy.float64)
yi = yi.astype(numpy.float64)
yo = yo.astype(numpy.float64)
# set the parameter instance data to the default values
defaultDict = Natgrid.makeDefaultParameterTable(self)
self.adf = eval(defaultDict['adf'][2])
self.alg = eval(defaultDict['alg'][2])
self.asc = eval(defaultDict['asc'][2])
self.bI = eval(defaultDict['bI'][2])
self.bJ = eval(defaultDict['bJ'][2])
self.dup = eval(defaultDict['dup'][2])
self.ext = eval(defaultDict['ext'][2])
self.hor = eval(defaultDict['hor'][2])
self.igr = eval(defaultDict['igr'][2])
self.magx = eval(defaultDict['magx'][2])
self.magy = eval(defaultDict['magy'][2])
self.magz = eval(defaultDict['magz'][2])
self.non = eval(defaultDict['non'][2])
self.nul = eval(defaultDict['nul'][2])
self.rad = eval(defaultDict['rad'][2])
self.sdi = eval(defaultDict['sdi'][2])
self.upd = eval(defaultDict['upd'][2])
self.ver = eval(defaultDict['ver'][2])
def rgrd(self, dataIn, aspectSlope = 'no', wrap = 'no'):
""" --------------------------------------------------------------------------------------------------------
routine: rgrd
purpose: Perform one of the following:
1. natural neighbor linear interpolation to a rectilinear grid
2. natural neighbor linear interpolation to a rectilinear grid returning aspects and slopes
3. natural neighbor linear interpolation to a list of points in the single point mode
4. natural neighbor nonlinear interpolation to a rectilinear grid
5. natural neighbor nonlinear interpolation to a rectilinear grid returning aspects and slopes
6. natural neighbor nonlinear interpolation to a list of points in the single point mode
Each of the computations can be single or double precison. The choice is made by examing the precision
in the grid coordinate arrays. In addition, the choice of the single point mode is determined by the
set of the listOuput parameter in creating an instance of the Natgrid class.
Assuming that the instance, r, has been constructed, the choice between a linear or a nonlinear
computation is made with the control parameter igr. The default calls for a linear calculation. To
call for a nonlinear one, type
r.igr = 1
usage: To interpolate the input data, dataIn, to the output data, dataOut, on the output grid, type
dataOut = r.rgrd(dataIn)
If the output grid is rectangular, it is possible to request the associated aspects and slopes with
dataOut, aspect, slope = r.rgrd(dataIn, aspectSlope = 'yes')
For global latitude-longitude grids, it is also possible to request a wrap in the input grid and the input
data in the longitude direction, assumed to be the yi grid coordinate, (with or without associated aspects
and slopes) with
dataOut, aspect, slope = r.rgrd(dataIn, wrap = 'yes')
or
dataOut, aspect, slope = r.rgrd(dataIn, aspectSlope = 'yes', wrap = 'yes')
definition: rgrd(self, dataIn, aspectSlope = 'no', wrap = 'no'):
--------------------------------------------------------------------------------------------------------"""
if self.nxi != len(dataIn):
msg = 'CANNOT CREATE INSTANCE - The length of the input coordindate grids and the data must be equal'
raise ValueError(msg)
if usefilled == 'yes':
dataIn = numpy.ma.filled(dataIn)
# set the instance values of the parameters in the c code
Natgrid.setInstanceParameters(self)
if wrap == 'yes':
self.xi, self.yi, dataIn = Natgrid.wrapAll(self, self.xi, self.yi, dataIn)
self.nxi = len(self.xi)
self.nyi = len(self.yi)
if dataIn.dtype.char == 'f': # single precision
if self.group == 'double': # change the grid type to match dataIn
self.group = 'single' # change the grid type to match dataIn
self.xi = self.xi.astype(numpy.float32)
self.xo = self.xo.astype(numpy.float32)
self.yi = self.yi.astype(numpy.float32)
self.yo = self.yo.astype(numpy.float32)
else: # double precision
if self.group == 'single': # change the grid type to match dataIn
self.group = 'double' # change the grid type to match dataIn
self.xi = self.xi.astype(numpy.float64)
self.xo = self.xo.astype(numpy.float64)
self.yi = self.yi.astype(numpy.float64)
self.yo = self.yo.astype(numpy.float64)
if self.listOutput == 'no': # output grid is rectangular
t = Natgrid.rgrdPrimary(self, dataIn, aspectSlope)
else: # output grid is a list
t = Natgrid.rgrdSinglePoint(self, dataIn)
return t
def rgrdPrimary(self, dataIn, aspectSlope):
""" #-------------------------------------------------------------------
#
#
#-------------------------------------------------------------------------"""
if aspectSlope != 'no':
self.sdi = 1 # calculate aspects and slopes
# set the instance values of the parameters in the c code
#Natgrid.setInstanceParameters(self)
if dataIn.dtype.char == 'f': # single precision
if debug == 1:
print('In rgrdPrimary calling natgrids')
dataOut, ier = natgridmodule.natgrids(self.nxi, self.xi, self.yi, dataIn, self.nxo, self.nyo, self.xo, self.yo)
if ier != 0:
msg = 'Error in return from natgrids call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
if aspectSlope != 'no':
nxo = self.nxo
nyo = self.nyo
a = numpy.zeros((nxo, nyo), numpy.float32)
for i in range(nxo):
for j in range(nyo):
uvtemp, ier = natgridmodule.getaspects(i, j)
if ier != 0:
msg = 'Error in return from getaspects call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
a[i,j] = uvtemp # return aspect in degrees
s = numpy.zeros((nxo, nyo), numpy.float32)
for i in range(nxo):
for j in range(nyo):
uvtemp, ier = natgridmodule.getslopes(i, j)
if ier != 0:
msg = 'Error in return from getslopes call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
s[i,j] = uvtemp # return slope in degrees
else: # double precision
if debug == 1:
print('In rgrdPrimary calling natgridd')
dataOut, ier = natgridmodule.natgridd(self.nxi, self.xi, self.yi, dataIn, self.nxo, self.nyo, self.xo, self.yo)
if ier != 0:
msg = 'Error in return from natgridd call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
if aspectSlope != 'no':
nxo = self.nxo
nyo = self.nyo
a = numpy.zeros((nxo, nyo), numpy.float64)
for i in range(nxo):
for j in range(nyo):
uvtemp, ier = natgridmodule.getsloped(i, j)
if ier != 0:
msg = 'Error in return from getaspectd call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
a[i,j] = uvtemp # return aspect in degrees
s = numpy.zeros((nxo, nyo), numpy.float64)
for i in range(nxo):
for j in range(nyo):
s[i,j], ier = natgridmodule.getsloped(i, j)
if ier != 0:
msg = 'Error in return from getsloped call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
s[i,j] = uvtemp # return slope in degrees
# is a reverse the order in the returned arrays necessary
if (self.xreverse == 'yes') or (self.yreverse == 'yes'):
needReverse = 'yes'
else:
needReverse = 'no'
# construct the tuple for the return of what was calculated
if aspectSlope != 'no':
if needReverse == 'yes':
dataOut = Natgrid.reverseData(self, dataOut)
a = Natgrid.reverseData(self, a)
s = Natgrid.reverseData(self, s)
returnList = [dataOut]
returnList.append(a)
returnList.append(s)
return tuple(returnList)
else:
if needReverse == 'yes':
dataOut = Natgrid.reverseData(self, dataOut)
return dataOut
def rgrdSinglePoint(self, dataIn):
""" #-------------------------------------------------------------------
#
#
#-------------------------------------------------------------------------"""
self.sdi = 0 # turn off calculaton of aspect and slope
if dataIn.dtype.char == 'f': # single precision
if debug == 1:
print('In rgrdSinglePoint using single precision computation')
natgridmodule.pntinits(self.nxi, self.xi, self.yi, dataIn)
dataOut = numpy.zeros((self.nxo), numpy.float32)
for i in range(self.nxo):
dataOut[i] = natgridmodule.pnts(self.xo[i], self.yo[i])
natgridmodule.pntend()
else: # double precision
if debug == 1:
print('In rgrdSinglePoint using double precision computation')
natgridmodule.pntinitd(self.nxi, self.xi, self.yi, dataIn)
dataOut = numpy.zeros((self.nxo), numpy.float64)
for i in range(self.nxo):
dataOut[i] = natgridmodule.pntd(self.xo[i], self.yo[i])
natgridmodule.pntendd()
return dataOut
def reverseData(self, data):
#------------------------------------------------------------------------------
#
# purpose: reverse the order of th data if outgrid submitted was not increasing
#
# usage:
#
# returned: parameters
#
#------------------------------------------------------------------------------
if self.xreverse == 'yes':
data = data[::-1,:]
if self.yreverse == 'yes':
data = data[:, ::-1]
return data
def wrapAll(self, lat, lon, data):
#------------------------------------------------------------------------------
#
# purpose: Adds much wrap in longitude to the linear form of the input data
#
# usage:
#
# passed: lat -- the latitude array
# lon -- the longitude arraywhich requires a large wrap for natgrid
# data -- the data at the associated linear set of points
#
# returned: lat, lon and data differing fom th input by the wrap
#
#
#------------------------------------------------------------------------------
if debug == 1:
print('entering wrapAll with array lengths: ', len(lat))
# Make a wrapped grid and wrapped data
lonList = list(lon) # make Python lists as intermediate step
latList = list(lat)
dataList = list(data)
maxlon = max(lonList) # set up the wrap ranges in longitude
minlon = min(lonList)
distance = (maxlon - minlon)/4. # wrap first and last quarter of points
minlonLow = minlon
minlonHigh = minlon + distance
maxlonLow = maxlon - distance
maxlonHigh = maxlon
for i in range(len(lonList)): # wrap the Python lists
value = lonList[i]
if (value >= minlonLow) and (value < minlonHigh):
lonList.append(value + 360.)
latList.append(latList[i])
dataList.append(dataList[i])
elif (value > maxlonLow) and (value <= maxlonHigh):
lonList.append(value - 360.)
latList.append(latList[i])
dataList.append(dataList[i])
if self.group == 'single': # single precision
lon = numpy.array(lonList, numpy.float32) # convert to numpy arrays
lat = numpy.array(latList, numpy.float32)
data = numpy.array(dataList, numpy.float32)
else: # double precision
lon = numpy.array(lonList, numpy.float64) # convert to numpy arrays
lat = numpy.array(latList, numpy.float64)
data = numpy.array(dataList, numpy.float64)
if debug == 1:
print('leaving wrapAll with array lengths: ', len(lat))
return lat, lon, data
#---------------------------------------------------------------------------------
# **************** Control parameter manipulation functions ********************
#---------------------------------------------------------------------------------
def parameterNames(self):
#------------------------------------------------------------------------------
#
# purpose: produce a list of the natgrid parameters
#
# usage: parameters = parameterNames(self)
#
# passed: self
#
# returned: parameters
#
#------------------------------------------------------------------------------
parameters = ['name', '----', 'adf', 'alg', 'asc', 'bI', 'bJ', 'dup', 'ext', 'hor', 'igr', 'magx',
'magy', 'magz', 'non', 'nul', 'rad', 'sdi', 'upd', 'ver', 'xas', 'yas', 'zas' ]
return parameters
def parameterType(self):
#--------------------------------------------------------------------------------
#
# purpose: produce a dictionary connecting parameter names and their data types
#
# usage: typeDict = parameterType(self)
#
# passed: self
#
# returned: typeDict
#
#---------------------------------------------------------------------------------
typeDict = {
'adf':'int', 'alg':'char', 'asc':'int', 'bI':'float', 'bJ':'float', 'dup':'int', 'ext':'int',
'hor':'float', 'igr':'int', 'magx':'float', 'magy':'float', 'magz':'float', 'non':'int', 'nul':'float',
'rad':'int', 'sdi':'int', 'upd':'int', 'ver':'float', 'xas':'float', 'yas':'float', 'zas':'float' }
return typeDict
def makeDefaultParameterTable(self):
#-----------------------------------------------------------------------------------
#
# purpose: construct the dictionary which is the default control parameters table
#
# usage: makeDefaultParameterTable()
#
# passed: self
#
# returned: parameterDict
#
#----------------------------------------------------------------------------------
parameterDict = {
'name':('type ', ' legal values ',' default values ',' description '),
'----':('-----', '--------------------','-----------------','------------------------------------------------------------'),
'adf': ('int ','0 = no or 1 = yes ',' 0 ','produce data file of algoritmic info for display? (see alg) '),
'alg': ('char ','any file name ',' "nnalg.dat" ','file name for algoritmic display tool (see adf) '),
'asc': ('int ','0 = no or 1 = yes ',' 1 ','is automatic scaling is allowed? '),
'bI': ('float','>= 1. ',' 1.5 ','tautness increasing effect of the gradients by increasing bI'),
'bJ': ('float','>= 1. ',' 7.0 ','tautness decreasing breadth of region affected by gradients '),
'dup': ('int ','0 = yes or 1 = no ',' 1 ','are duplicate input coordinates are allowed? '),
'ext': ('int ','0 = no or 1 = yes ',' 1 ','is extrapolation allowed outside the convex hull? '),
'hor': ('float','>= 0. ',' -1.0 ','amount of horizontal overlap from outside current region '),
'igr': ('int ','0 = no or 1 = yes ',' 0 ','are gradients are to be computed? '),
'magx':('float','> 0. ',' 1.0 ','scale factor for x coordinate values '),
'magy':('float','> 0. ',' 1.0 ','scale factor for y coordinate values '),
'magz':('float','> 0. ',' 1.0 ','scale factor for z coordinate values '),
'non': ('int ','0 = yes or 1 = no ',' 0 ','are interpolated values are allowed to be negative? '),
'nul': ('float','any float ',' 0.0 ','value for points outside the convex hull if no extrapolation'),
'rad': ('int ','0 = rad or 1 = deg ',' 0 ','are slopes and aspects are returned in radians or degrees? '),
'sdi': ('int ','0 = no or 1 = yes ',' 0 ','are slopes and aspects to be computed? '),
'upd': ('int ','0=N to S or 1=S to N',' 1 ','does output array from giving N to S or S to N? '),
'ver': ('float','>= 0. ',' -1.0 ','amount of vertical overlap from outside current region '),
'xas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of x in last interpolation '),
'yas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of y in last interpolation '),
'zas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of z in last interpolation ') }
return parameterDict
def makeInstanceParameterTable(self):
#----------------------------------------------------------------------------------
#
# purpose: construct the dictionary which is the instance control parameters table
#
# usage: makeInstanceParameterTable(self)
#
# passed: self
#
# returned: parameterDict
#
#----------------------------------------------------------------------------------
parameterDict = {
'name':('type ', ' legal values ',' Values ',' description '),
'----':('-----', '-------------------','----------------','------------------------------------------------------------'),
'adf': ('int ','0 = no or 1 = yes ', eval('self.adf') ,'produce data file of algoritmic info for display? (see alg) '),
'alg': ('char ','any file name ', eval('self.alg') ,'file name for algoritmic display tool (see adf) '),
'asc': ('int ','0 = no or 1 = yes ', eval('self.asc') ,'is automatic scaling is allowed? '),
'bI': ('float','>= 1. ', eval('self.bI') ,'tautness increasing effect of the gradients by increasing bI'),
'bJ': ('float','>= 1. ', eval('self.bJ') ,'tautness decreasing breadth of region affected by gradients '),
'dup': ('int ','0 = yes or 1 = no ', eval('self.dup') ,'are duplicate input coordinates are allowed? '),
'ext': ('int ','0 = no or 1 = yes ', eval('self.ext') ,'is extrapolation allowed outside the convex hull? '),
'hor': ('float','>= 0. ', eval('self.hor') ,'amount of horizontal overlap from outside current region '),
'igr': ('int ','0 = no or 1 = yes ', eval('self.igr') ,'are gradients are to be computed? '),
'magx':('float','> 0. ', eval('self.magx'),'scale factor for x coordinate values '),
'magy':('float','> 0. ', eval('self.magy'),'scale factor for y coordinate values '),
'magz':('float','> 0. ', eval('self.magz'),'scale factor for z coordinate values '),
'non': ('int ','0 = yes or 1 = no ', eval('self.non') ,'are interpolated values are allowed to be negative? '),
'nul': ('float','any float ', eval('self.nul') ,'value for points outside the convex hull if no extrapolation'),
'rad': ('int ','0 = rad or 1 = deg ', eval('self.rad') ,'are slopes and aspects are returned in radians or degrees? '),
'sdi': ('int ','0 = no or 1 = yes ', eval('self.sdi') ,'are slopes and aspects to be computed? '),
'upd': ('int ','0=N to S or 1=S to N', eval('self.upd') ,'does output array from giving N to S or S to N? '),
'ver': ('float','>= 0. ', eval('self.ver') ,'amount of vertical overlap from outside current region '),
'xas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of x in last interpolation'),
'yas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of y in last interpolation'),
'zas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of z in last interpolation') }
return parameterDict
def printDefaultParameterTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the value of all the parameters
usage: r.printDefaultParameterTable()
where r is an instance of Natgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Natgrid.parameterNames(self)
names = names[2:]
parameterDict = Natgrid.makeDefaultParameterTable(self)
for item in names:
items = (item, parameterDict[item][0], parameterDict[item][1], parameterDict[item][2], parameterDict[item][3])
print('%-7.7s %-6.6s %-12.12s %-15.15s %s' % items)
return
def printInstanceParameterTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the value of all the parameters
usage: r.printInstanceParameterTable()
where r is an instance of Natgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Natgrid.parameterNames(self)
names = names[2:]
parameterDict = Natgrid.makeInstanceParameterTable(self)
for item in names:
items = (item, parameterDict[item][0], parameterDict[item][1], parameterDict[item][2], parameterDict[item][3])
print('%-7.7s %-6.6s %-12.12s %-7.7s %s' % items)
return
def printInstanceParameters(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the values of the current natgrid control parameters in c code
usage: r. printInstanceParameters()
where r is an instance of Natgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Natgrid.parameterNames(self)
names = names[2:]
typeDict = Natgrid.parameterType(self)
for name in names:
if typeDict[name] == 'int':
print('Currently, %s = %d' % (name, eval('self.' + name)))
elif typeDict[name] == 'char':
print('Currently, %s = %s' % (name, eval('self.' + name)))
elif typeDict[name] == 'float':
print('Currently, %s = %f' % (name, eval('self.' + name)))
elif typeDict[name] == 'double':
print('Currently, %s = %f' % (name, eval('self.' + name)))
return None
def setInstanceParameters(self):
#---------------------------------------------------------------------------
#
# purpose: set the instance values of the current natgrid control parameters in c code
#
# usage: r.setInstanceParameters()
#
# where r is an instance of Natgrid
#
# passed: self
#
# returned: None
#
#----------------------------------------------------------------------------
names = Natgrid.parameterNames(self)
names = names[2:-3] # the -3 eliminates the nonsettable xas, yas and zas
typeDict = Natgrid.parameterType(self)
# set the current values for the natgrid control parameters
for name in names:
if typeDict[name] == 'int':
natgridmodule.seti(name, eval('self.' + name))
elif typeDict[name] == 'char':
natgridmodule.setc(name, eval('self.' + name))
elif typeDict[name] == 'float':
natgridmodule.setr(name, eval('self.' + name))
elif typeDict[name] == 'double':
natgridmodule.setrd(name, eval('self.' + name))
return None
#---------------------------------------------------------------------------------
# ***************************** Error Table ************************************
#---------------------------------------------------------------------------------
def errorTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: construct the dictionary which provides access to error messages
usage: errorDict = r.errorTable()
where r is an instance of Natgrid
returned: errorDict
--------------------------------------------------------------------------------------------------------"""
errorDict = {
1: 'Insufficient data in gridded region to triangulate',
2: 'Dulpicate input data coordinates are not allowed',
3: 'Unable to open file for writing algorithmic',
4: 'WARNING: The ratio of vertical to horizontal scales too large for gradients. Rescale if gradients required',
5: 'WARNING: The ratio of vertical to horizontal scales too small for gradients. Rescale if gradients required',
6: 'WARNING: The ratio of x to y-axis breath too extreme. Change proportions or rescale. Gradients disabled',
7: 'Unable to allocate storage for ivector',
8: 'Unable to allocate storage for dvector',
9: 'Unable to allocate storage for **imatrix',
10: 'Unable to allocate storage for imatrix[]',
11: 'Unable to allocate storage for **fmatrix',
12: 'Unable to allocate storage for fmatrix[]',
13: 'Unable to allocate storage for **dmatrix',
14: 'Unable to allocate storage for dmatrix[]',
15: 'Unable to allocate storage for raw data',
16: 'Unable to allocate storage for a simplex',
17: 'Unable to allocate storage for temp',
18: 'Unable to allocate storage for neig',
19: 'Slopes have not been computed, set sdip',
20: 'Row argument out of range',
21: 'Column argument out of range',
22: 'Aspects have not been computed, set sdip',
23: 'Parameter name not known',
24: 'Can not open error file',
25: 'Automatic scaling done - distorted aspects not returned. Rescale data or set magx, magy and magz appropriately',
26: 'Automatic scaling done - distorted slopes not returned. Rescale data or set magx, magy and magz appropriately',
27: 'Coordinate is outside the gridded region for a single point interpolation',
28: 'Can not compute aspects and slopes in conjunction with single point interpolation mode',
29: 'Fortran DOUBLE PRECISION entries not supported on UNICOS',
30: 'Error number out of range' }
return errorDict
#---------------------------------------------------------------------------------
# *************************** magic functions *********************************
#---------------------------------------------------------------------------------
def __setattr__(self, name, value):
#---------------------------------------------------------------------------------
#
# purpose: '__setattr__' is called on every assignment to an instance attribute.
# Consequently, it must put the value in through the __dict__ to avoid
# calling itself and setting up an infinite recursion loop.It sets the
# attribute called name to value in two steps.
# One -- set the global C code control parameter
# Two -- set the instance self data control parameter
#
# usage: x.name = value
#
# passed : name and value
#
# returned: None
#
#---------------------------------------------------------------------------------
typeDict = Natgrid.parameterType(self)
if name in typeDict.keys():
if typeDict[name] == 'int':
natgridmodule.seti(name, value)
self.__dict__[name] = value
elif typeDict[name] == 'char':
natgridmodule.setc(name, value)
self.__dict__[name] = value
elif typeDict[name] == 'float':
natgridmodule.setr(name, value)
self.__dict__[name] = value
elif typeDict[name] == 'double':
natgridmodule.setrd(name, value)
self.__dict__[name] = value
else:
self.__dict__[name] = value
return None
def __getattr__(self, name):
#---------------------------------------------------------------------------------
#
# purpose: '__getattr__' is called only if a referenced attribute can not be found
# in the instance. It gets the attribute from natgridmodule if possible.
#
# usage: x.name -- name is the oject and not a string repr
#
# passed : name
#
# returned: x.name
#
#---------------------------------------------------------------------------------
typeDict = Natgrid.parameterType(self)
if name in typeDict.keys():
if typeDict[name] == 'int':
value = natgridmodule.geti(name)
elif typeDict[name] == 'char':
value = natgridmodule.getc(name)
elif typeDict[name] == 'float':
value = natgridmodule.getr(name)
elif typeDict[name] == 'double':
value = natgridmodule.getrd(name)
else:
raise AttributeError(name)
return value
#---------------------------------------------------------------------------------
# *******************************************************************
# **************** end of magic functions **************************
# *******************************************************************
#---------------------------------------------------------------------------------
def printParameterTable():
""" --------------------------------------------------------------------------------------------------------
routine: printParameterTable
purpose: print the control parameter table using the default values from outside the Natgrid class
usage: import nat
nat.printParameterTable()
passed: nothing
returned: None
definition: printParameterTable():
--------------------------------------------------------------------------------------------------------"""
names = ['name', '----', 'adf', 'alg', 'asc', 'bI', 'bJ', 'dup', 'ext', 'hor', 'igr', 'magx',
'magy', 'magz', 'non', 'nul', 'rad', 'sdi', 'upd', 'ver', 'xas', 'yas', 'zas' ]
parameterDict = {
'name':('type ', ' legal values ',' default values ',' description '),
'----':('-----', '--------------------','-----------------','------------------------------------------------------------'),
'adf': ('int ','0 = no or 1 = yes ',' 0 ','produce data file of algoritmic info for display? (see alg) '),
'alg': ('char ','any file name ',' "nnalg.dat" ','file name for algoritmic display tool (see adf) '),
'asc': ('int ','0 = no or 1 = yes ',' 1 ','is automatic scaling is allowed? '),
'bI': ('float','>= 1. ',' 1.5 ','tautness increasing effect of the gradients by increasing bI'),
'bJ': ('float','>= 1. ',' 7.0 ','tautness decreasing breadth of region affected by gradients '),
'dup': ('int ','0 = yes or 1 = no ',' 1 ','are duplicate input coordinates are allowed? '),
'ext': ('int ','0 = no or 1 = yes ',' 1 ','is extrapolation allowed outside the convex hull? '),
'hor': ('float','>= 0. ',' -1.0 ','amount of horizontal overlap from outside current region '),
'igr': ('int ','0 = no or 1 = yes ',' 0 ','are gradients are to be computed? '),
'magx':('float','> 0. ',' 1.0 ','scale factor for x coordinate values '),
'magy':('float','> 0. ',' 1.0 ','scale factor for y coordinate values '),
'magz':('float','> 0. ',' 1.0 ','scale factor for z coordinate values '),
'non': ('int ','0 = yes or 1 = no ',' 0 ','are interpolated values are allowed to be negative? '),
'nul': ('float','any float ',' 0.0 ','value for points outside the convex hull if no extrapolation'),
'rad': ('int ','0 = rad or 1 = deg ',' 0 ','are slopes and aspects are returned in radians or degrees? '),
'sdi': ('int ','0 = no or 1 = yes ',' 0 ','are slopes and aspects to be computed? '),
'upd': ('int ','0=N to S or 1=S to N',' 1 ','does output array from giving N to S or S to N? '),
'ver': ('float','>= 0. ',' -1.0 ','amount of vertical overlap from outside current region '),
'xas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of x in last interpolation '),
'yas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of y in last interpolation '),
'zas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of z in last interpolation ') }
for item in names:
items = (item, parameterDict[item][0], parameterDict[item][1], parameterDict[item][2], parameterDict[item][3])
print('%-7.7s %-6.6s %-12.12s %-15.15s %s' % items)
return
def printStoredParameters():
""" --------------------------------------------------------------------------------------------------------
routine: printStoredParameters
purpose: print the values of the current natgrid control parameters in c code. The call
to the method function rgrd will change them to the instance values.
usage: import nat
nat.printStoredParameters()
passed: nothing
returned: None
definition: printStoredParameters():
--------------------------------------------------------------------------------------------------------"""
names = ['name', '----', 'adf', 'alg', 'asc', 'bI', 'bJ', 'dup', 'ext', 'hor', 'igr', 'magx',
'magy', 'magz', 'non', 'nul', 'rad', 'sdi', 'upd', 'ver', 'xas', 'yas', 'zas' ]
names = names[2:]
typeDict = {
'adf':'int', 'alg':'char', 'asc':'int', 'bI':'float', 'bJ':'float', 'dup':'int', 'ext':'int',
'hor':'float', 'igr':'int', 'magx':'float', 'magy':'float', 'magz':'float', 'non':'int', 'nul':'float',
'rad':'int', 'sdi':'int', 'upd':'int', 'ver':'float', 'xas':'float', 'yas':'float', 'zas':'float' }
for item in names:
if typeDict[item] == 'int':
print(' %s = %d' % (item, natgridmodule.geti(item)))
elif typeDict[item] == 'char':
print(' %s = %s' % (item, natgridmodule.getc(item)))
elif typeDict[item] == 'float':
print(' %s = %f' % (item, natgridmodule.getr(item)))
elif typeDict[item] == 'double':
print(' %s = %f' % (item, natgridmodule.getrd(item)))
return None
def checkdim(x, y):
#------------------------------------------------------------------------------------------
#
# purpose: determine whether the coordinate grid is random or monotonically increasing
#
# usage:
#
# returned: x, y, monotonic, xreverse, yreverse
#
#-------------------------------------------------------------------------------------------
xsize = len(x)
if x[0] > x[xsize - 1]:
x = x[::-1]
xreverse = 'yes'
else:
xreverse = 'no'
xmonotonic = 'yes' # monotonic and possibly reversed to make it montonically increasing
for n in range(1, xsize):
if x[n] < x[n - 1]:
xmonotonic = 'no' # not monotonic so return the original grid
ysize = len(y)
if y[0] > y[ysize - 1]:
y = y[::-1]
yreverse = 'yes'
else:
yreverse = 'no'
ymonotonic = 'yes' # monotonic and possibly reversed to make it montonically increasing
for n in range(1, ysize):
if y[n] < y[n - 1]:
ymonotonic = 'no' # not monotonic so return the original grid
if xmonotonic == 'yes' and ymonotonic == 'yes': # if both are monotonic the grid is monotonic
monotonic = 'yes'
else:
monotonic = 'no'
if xreverse == 'yes': # return vectors to thier original state
x = x[::-1]
xreverse = 'no'
if yreverse == 'yes':
y = y[::-1]
yreverse = 'no'
# note that x and y may be returned reversed as necessary only if monotonic is set to yes
return x, y, monotonic, xreverse, yreverse
#---------------------------------------------------------------------------------
# ********************************************************************************
# ********************************************************************************
#---------------------------------------------------------------------------------
def sendOutput(output, msg, value = None):
""" #---------------------------------------------------------------------------------
#
# purpose: send the same message to the screen and to a file
#
# passed : msg - the string
#
# returned: return
#
#---------------------------------------------------------------------------------"""
if value is None:
print(msg)
output.write(msg + '\n')
else:
print(msg, repr(value))
output.write(msg + ' %15.11e\n' % (value,))
return None
def document():
""" #-------------------------------------------------------------------------
#
# purpose: 'docstrings' writes the doc strings contained in the regrid module
# to a file as documentation for the user
#
# usage: import regrid2 as regrid
# regrid.document()
#
# passed : nothing
#
# returned: nothing
#
#-------------------------------------------------------------------------"""
import nat
std = sys.stdout # save sys.stout to allow reassigning later
sys.stdout = open( 'natgrid.doc', 'w')
print('**********************************************************************************************\n')
print('**************************** Overview of the CDAT interface to natgrid ***********************\n')
print('**********************************************************************************************\n')
print(nat.__doc__)
print()
print()
print(' ******************** Instructions for use of the natgrids function **************************')
print(natgridmodule.natgrids.__doc__)
print()
print(' ******************** Instructions for use of the seti function **************************')
print(natgridmodule.seti.__doc__)
print()
print(' ******************** Instructions for use of the geti function **************************')
print(natgridmodule.geti.__doc__)
print()
print(' ******************** Instructions for use of the setr function **************************')
print(natgridmodule.setr.__doc__)
print()
print(' ******************** Instructions for use of the getr function **************************')
print(natgridmodule.getr.__doc__)
print()
print(' ******************** Instructions for use of the setc function **************************')
print(natgridmodule.setc.__doc__)
print()
print(' ******************** Instructions for use of the getc function **************************')
print(natgridmodule.getc.__doc__)
print()
print(' ******************** Instructions for use of the getaspects function **************************')
print(natgridmodule.getaspects.__doc__)
print()
print(' ******************** Instructions for use of the getslopes function **************************')
print(natgridmodule.getslopes.__doc__)
print()
print(' ******************** Instructions for use of the pntinits function **************************')
print(natgridmodule.pntinits.__doc__)
print()
print(' ******************** Instructions for use of the pnts function **************************')
print(natgridmodule.pnts.__doc__)
print()
print(' ******************** Instructions for use of the pntend function **************************')
print(natgridmodule.pntend.__doc__)
print()
print(' ******************** Instructions for use of the natgridd function **************************')
print(natgridmodule.natgridd.__doc__)
print()
print(' ******************** Instructions for use of the setrd function **************************')
print(natgridmodule.setrd.__doc__)
print()
print(' ******************** Instructions for use of the getrd function **************************')
print(natgridmodule.getrd.__doc__)
print()
print(' ******************** Instructions for use of the getaspectd function **************************')
print(natgridmodule.getaspectd.__doc__)
print()
print(' ******************** Instructions for use of the getsloped function **************************')
print(natgridmodule.getsloped.__doc__)
print()
print(' ******************** Instructions for use of the pntinitd function **************************')
print(natgridmodule.pntinitd.__doc__)
print()
print(' ******************** Instructions for use of the pntd function **************************')
print(natgridmodule.pntd.__doc__)
print()
print(' ******************** Instructions for use of the pntendd function **************************')
print(natgridmodule.pntendd.__doc__)
print()
sys.stdout = std
return None
def sendmsg(msg, value1 = None, value2 = None):
""" #---------------------------------------------------------------------------------
#
# purpose: send the same message to the screen
#
# passed : msg - the string
# value - the number associated with the string
#
# returned: return
#
#---------------------------------------------------------------------------------"""
print('*******************************************************************')
if value1 is None:
print(msg)
elif value2 is None:
print(msg, value1)
else:
print(msg, value1, value2)
print('*******************************************************************')
return None
def help(choice = None):
import nat
if choice is None: # get instructions for use of help
print(""" ----------------------------------------------------------------------------------------
INSTRUCTIONS ON USE THE OBJECT ORIENTED INTERFACE TO THE NATGRID PACKAGE FROM NGMATH
This module is built as one class, Natgrid, which sports a single method called rgrd.
To get instructions on making an instance of Natgrid, type
nat.help('Natgrid')
To get instructions on using the control parameters, type
nat.help('parameters')
To print the table describing the control parameters, type
nat.help('table')
To get instructions on performing a regridding, type
nat.help('regrid')
To get instructions on calculating slopes and aspects, type
nat.help('aspectSlope')
To get instructions using the single point computational mode, type
nat.help('singlePoint')
INSTRUCTIONS ON USE OF ORIGINAL NATGRID PACKAGE FROM NGMATH
This module is built as an interface to natgridmodule.so which exports the following functions:
Single precision procedures:
natgrids - primary function for gridding.
seti - set int parameter values.
geti - retrieve values for int parameters.
setr - set float parameter values.
getr - retrieve values for float parameters
setc - set char parameter values.
getc - retrieve values for char parameters.
getaspects - get aspect values, if calculated.
getslopes - get slope values, if calculated.
pntinits - initiate single point mode.
pnts - interpolate at a single point.
pntend _ terminate single point mode.
Double precision procedures:
natgridd - primary function for gridding.
setrd - set float parameter values.
getrd - retrieve values for float parameters
getaspectd - get aspect values, if calculated.
getsloped - get slope values, if calculated.
pntinitd - initiate single point mode.
pntd - interpolate at a single point.
pntendd _ terminate single point mode.
It is feasible to use these functions directly without this module. Information is available
through their docstrings. For example, to get the docstring for the routine natgrids, follow this
procedure at the Python prompt:
import natgridmodule
print natgridmodule.natgrids.__doc__
or simply type
nat.help('natgrids')
------------------------------------------------------------------------------------------------------""")
elif choice == 'Natgrid':
print(""" ----------------------------------------------------------------------------------------
To make an instance, r, type:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
or
r = nat.Natgrid(xi, yi, xo, yo, listOutput = 'yes')
where xi, yi and xo, yo are the input and output grid coordinate arrays. The optional listOutput is
set to anything except 'no' if xo, yo are in list format as explained below.
The input grid must be organized in a list format always. The size of the xi array and the yi array are
necessarily equal. For example, if there are n randomly spaced input data points, there
are n values in xi and n values in yi.
There are two possible formats for the output grid. The output grid coordinate arrays may be a list like
the input array or it may be a rectangular grid. The choice between the two posibilities is made according
to requirements in subseqent calls to the method function. The first choice is required if the subsequent
call is to the single point mode interpolation. The list can have one or more points. Of course, the list
could describe a rectangular grid. For example, a rectangular grid with 10 x values and 20 y values can be
rewrtten in list form with 200 x value and 200 y values. However, this form requires calling the slower
single point interpolator. The second choice is most efficient for the basic interpolation to a rectangular
output grid. The output grid must be monotonic but need not be equally spced.
The grid coordinate arrays can be single precision (numpy.float32) or double precision (numpy.float64). The
decision on whether to call for a single or a double precision computation subsequently is made by looking at
the type of these arrays.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'parameters':
print(""" ----------------------------------------------------------------------------------------
In the absence of an instance of the class Natgrid, a description of the control parameters can be found
by typing
import nat
nat.printParameterTable()
The control parameters are easily available within the class. First make an instance, r, type:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
To change a setting type the new value. For example, to set igr to 1, type
r.igr = 1
To find an individual value, type the name. For example, to exam the value of hor, type
r.hor
To check the settings type
r.printInstanceParameterTable() -- prints the table with values and a description of the parameters
used in subsequent calls to the method function rgrd
or
r.printInstanceParameters() -- prints a list of the parameters values used in subsequent calls to the
the rgrd method
nat. printStoredParameters() -- prints the parameters in memory which may differ from the above if the
user has made more than one instance of the Natgrid class.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'table':
printParameterTable()
#-----------------------------------------------------------------------------------------------------
elif choice == 'regrid':
print(""" ----------------------------------------------------------------------------------------
natgrid is restricted to two dimensions . Consequently, it is the user's responsibility to reduce the processing
of higher dimensional data to a sequence of calls using only two dimensional data. A description of the basic
natural neighbor linear interpolation and nonlinear interpolations follow.
Make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
where the xo, yo grid is rectilinear as explained in the help choice 'Natgrid'.
r.igr = 1 -- in order to set up the computation for nonlinear interpolation. The default value
for igr calls for a linear interpolation.
Then call the primary interpolation computation to regrid the input data, dataIn, on the grid (xi, yi) to
the output data, dataOut, on the grid (xo, yo), with
dataOut = r.rgrd( dataIn )
When dealing with global data described on a latitude-longitude grid, it is also possible to request a wrap
in the input grid and the input data in the longitude direction, assumed to be the yi grid coordinate, with
dataOut = r.rgrd(dataIn, wrap = 'yes')
The computation is either single or double precision as determined by the precision submitted in making
the instance.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'aspectSlope':
print(""" ----------------------------------------------------------------------------------------
natgrid is restricted to two dimensions . Consequently, it is the user's responsibility to reduce the processing
of higher dimensional data to a sequence of calls using only two dimensional data. A description of the basic
natural neighbor linear and nonlinear interpolations returning the aspect and the slope at the output grid
points follows.
First make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
where the xo, yo grid is rectilinear as explained in the help choice 'Natgrid'.
r.igr = 1 -- in order to set up the computation for nonlinear interpolation. The default value
for igr calls for a linear interpolation.
Then call the primary interpolation computation to regrid the input data, dataIn, on the grid (xi, yi) to
the output data, dataOut, on the grid (xo, yo), while asking for the aspect and the slope on this output grid, with
dataOut, a, s = r.rgrd( dataIn, aspectSlope = 'yes' )
where a is the aspect, the direction of the steepest descent in degrees measured from 'north' and s is the
slope in degrees measured from the horizontal. Necessarily, these are arrays aligned with the rectilinear
output grid, xo, yo.
It is also possible to request a wrap in the input grid and the input data in the longitude direction, assumed
to be the yi grid coordinate, by adding a keyword as
dataOut, a, s = r.rgrd( dataIn, aspectSlope = 'yes', wrap = 'yes' )
The computation is either single or double precision as determined by the precision submitted in making
the instance.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'singlePoint':
print(""" ----------------------------------------------------------------------------------------
natgrid is restricted to two dimensions . Consequently, it is the user's responsibility to reduce the processing
of higher dimensional data to a sequence of calls using only two dimensional data. A description of the single
point natural neighbor linear and nonlinear interpolations follows.
First make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo, listOutput)
where the xo, yo output grid is in the list form (not a rectangular output grid) as explained
in the help choice 'Natgrid'.
r.igr = 1 -- in order to set up the computation for nonlinear interpolation. The default value
for igr calls for a linear interpolation.
Then call the single point mode interpolation computation to regrid the input data, dataIn, on the grid (xi, yi)
to the output data, dataOut, on the grid (xo, yo), type
dataOut = r.rgrd( dataIn )
The single point mode is slow but it provides a choice where the interpolation is to one or more points
rather than to a complete rectangular grid..
The computation is either single or double precision as determined by the precision submitted in making
the instance.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'natgrids':
print(natgridmodule.natgrids.__doc__)
elif choice == 'seti':
print(natgridmodule.seti.__doc__)
elif choice == 'geti':
print(natgridmodule.geti.__doc__)
elif choice == 'setr':
print(natgridmodule.setr.__doc__)
elif choice == 'getr':
print(natgridmodule.getr.__doc__)
elif choice == 'setc':
print(natgridmodule.setc.__doc__)
elif choice == 'getc':
print(natgridmodule.getc.__doc__)
elif choice == 'getaspects':
print(natgridmodule.getaspects.__doc__)
elif choice == 'getslopes':
print(natgridmodule.getslopes.__doc__)
elif choice == 'pntinits':
print(natgridmodule.pntinits.__doc__)
elif choice == 'pnts':
print(natgridmodule.pnts.__doc__)
elif choice == 'pntend':
print(natgridmodule.pntend.__doc__)
elif choice == 'natgridd':
print(natgridmodule.natgridd.__doc__)
elif choice == 'setrd':
print(natgridmodule.setrd.__doc__)
elif choice == 'getrd':
print(natgridmodule.getrd.__doc__)
elif choice == 'getaspectd':
print(natgridmodule.getaspectd.__doc__)
elif choice == 'getsloped':
print(natgridmodule.getsloped.__doc__)
elif choice == 'pntinitd':
print(natgridmodule.pntinitd.__doc__)
elif choice == 'pntd':
print(natgridmodule.pntd.__doc__)
elif choice == 'pntendd':
print(natgridmodule.pntendd.__doc__)
else:
print('Your request is not in help. The help choices are: ')
print('Natgrid, parameters, table, regrid, aspectSlope, singlePoint, natgrids, seti, geti, setr, getr, setc, getc, getaspects, getslopes, pntinits, pnts, pntend, natgridd, setrd, getrd, getaspectd, getsloped, pntinitd, pntd, pntendd')
return None
| 88,775 | 47.19544 | 242 |
py
|
PyBDSF
|
PyBDSF-master/natgrid/setup.py
|
#!/usr/bin/env python
from numpy.distutils.core import setup, Extension
import glob,sys
sources=glob.glob('Src/*.c')
setup (name = "natgrid",
version='1.0',
description = "natgrid",
url = "http://cdat.sf.net",
packages = [''],
package_dir = {'': 'Lib'},
include_dirs = ['Include',],
ext_modules = [Extension('natgridmodule', sources),
]
)
| 404 | 22.823529 | 58 |
py
|
PyBDSF
|
PyBDSF-master/natgrid/Test/test_natgrid.py
|
# Adapted for numpy/ma/cdms2 by convertcdms.py
"""Documentation for module natgridtest: an automatic test for natgrid, an interface to the ngmath NATGRID
TESTING
Typing
cdat natgridtest.py
generates some testing of the natgridmodule using analytical functions as fields. It also writes a
hard copy of the documentation to the file natgridmodule.doc and a copy of the information describing
the nature of the tests to test.asc. For the single and the double precision interpolations from
randomly spaced data to a rectangular grid on a sphere, the numerical results are written to netCDF files
if there is access to the module cdms.
DOCUMENTATION
Without conducting the tests, documentation written to the file natgridmodule.doc can be produced after
importing the natgridtest module by typing
natgridtest.document()
"""
import sys, numpy, math, random, nat, natgridmodule
writeTestcase = 'yes'
try:
import cdms2
except ImportError:
print 'Can not write test case results to netCDF files without module cdms2'
writeTestcase = 'no'
def document():
#-------------------------------------------------------------------------------
#
# purpose: 'document' writes documentation for the user to a file
#
# usage: import natgridtest
# natgridtest.document()
#
# passed : nothing
#
# returned: nothing
#
#-------------------------------------------------------------------------------
import nat
std = sys.stdout # save sys.stout to allow reassigning later
sys.stdout = open( 'natgridmodule.doc', 'w')
print '**********************************************************************************************\n'
print '*************************** Overview of the CDAT interface to natgrid ************************\n'
print '**********************************************************************************************\n'
print nat.__doc__
print
print
print ' HELP PACKAGE EXAMPLE \n'
print ' ************************ Default Parameter Table **********************\n'
print ' -----------------------------------------------------------------------------------------------------'
nat.help('table')
print
sys.stdout = std
return None
def sendOutput(msg, value = None, screen = 'no'):
#------------------------------------------------------------------------------
#
# purpose: send a message and optionally a value a file and if screen is not 'no'
# send the same thing to the screen
#
# usage: sendOutput(msg, value = number, screen = 'yes')
#
# passed : msg - the string to write to the output media
# value - a number
# screen - a string set to something different from 'no' if the output also
# goes to the screen
#
# returned: None
#
#------------------------------------------------------------------------------
if value is None:
if screen != 'no':
print msg
output.write(msg + '\n')
else:
if screen != 'no':
print msg, `value`
output.write(msg + ' %15.11e\n' % (value,))
return None
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++ Autotest Calls ++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def runtests():
#-----------------------------------------------------------------------------
#
# purpose: call test cases
#
#-----------------------------------------------------------------------------
sendOutput('############################################################################################')
sendOutput('################## Here are the results of running analytical test cases #####################')
sendOutput('############################################################################################')
testError = 0
for n in range(1,8):
err = choose(n)
if err != 0:
#print 'test number with error :',n,err
testError = testError + 1
return testError
def choose(case):
#-------------------------------------------------------------------------------
#
# purpose: check out natgrid
#
# case 1: a simple 2D interpolation using y32 -- single precision
#---------------------------------------------------------------------------------
err = 0
if case == 1:
sendOutput('\n******* natural neighbor linear interpolation -- single precision *****\n')
# array dimensions
ni = 6 # size of xi, yi, and dataIn
nxo = 21
nyo = 21
# input arrays and data
xiList = [0.00, 1.00, 0.00, 1.00, 0.40, 0.75]
yiList = [0.00, 0.00, 1.00, 1.00, 0.20, 0.65]
dataInList = [0.00, 0.00, 0.00, 0.00, 1.25, 0.80]
xi = numpy.array(xiList, numpy.float32)
yi = numpy.array(yiList, numpy.float32)
dataIn = numpy.array(dataInList, numpy.float32)
# output array
xo = uniformGrid(nxo, 1.0, 0.0)
yo = uniformGrid(nyo, 1.0, 0.0)
r = nat.Natgrid(xi, yi, xo, yo)
dataOut = r.rgrd(dataIn)
sendOutput('*** writing single precision linear interpolation test case to the netCDF file SingleLinearRegrid.nc')
write1D_4DField('SingleLinearRegrid', dataOut, xo, yo)
dataCheck = storedAnswers('linearRegrid')
dataCheck = numpy.reshape(dataCheck, (nxo,nyo))
error = rmserror(dataOut, dataCheck) # find the rms error
sendOutput('\n******* compare results\n')
sendOutput('*** the linear interpolation test case rms error is usually less than 1.e-05')
sendOutput('*** the linear interpolation test case rms error = ', error)
if error > .0001:
err = 1
return err
elif case == 2:
sendOutput('\n******* natural neighbor linear interpolation -- double precision *****\n')
# array dimensions
ni = 6 # size 0f xi, yi, and dataIn
nxo = 21
nyo = 21
# input arrays and data
xiList = [0.00, 1.00, 0.00, 1.00, 0.40, 0.75]
yiList = [0.00, 0.00, 1.00, 1.00, 0.20, 0.65]
dataInList = [0.00, 0.00, 0.00, 0.00, 1.25, 0.80]
xi = numpy.array(xiList, numpy.float64)
yi = numpy.array(yiList, numpy.float64)
dataIn = numpy.array(dataInList, numpy.float64)
# output array
xo = uniformGrid(nxo, 1.0, 0.0)
yo = uniformGrid(nyo, 1.0, 0.0)
xo = xo.astype(numpy.float64)
yo = yo.astype(numpy.float64)
r = nat.Natgrid(xi, yi, xo, yo)
dataOut = r.rgrd(dataIn)
xo = xo.astype(numpy.float32) # convert back to single precision
yo = yo.astype(numpy.float32)
dataOut = dataOut.astype(numpy.float32)
sendOutput('*** writing double precision linear interpolation test case to the netCDF file DoubleLinearRegrid.nc')
write1D_4DField('DoubleLinearRegrid', dataOut, xo, yo)
dataCheck = storedAnswers('linearRegrid')
dataCheck = numpy.reshape(dataCheck, (nxo,nyo))
error = rmserror(dataOut, dataCheck) # find the rms error
sendOutput('\n******* compare results\n')
sendOutput('*** the linear interpolation test case rms error is usually less than 1.e-05')
sendOutput('*** the linear interpolation test case rms error = ', error)
if error > .0001:
err = 1
return err
elif case == 3:
sendOutput('\n******* natural neighbor nonlinear interpolation -- single precision *****\n')
# array dimensions
ni = 6 # size of xi, yi, and dataIn
nxo = 21
nyo = 21
# input arrays and data
xiList = [0.00, 1.00, 0.00, 1.00, 0.40, 0.75]
yiList = [0.00, 0.00, 1.00, 1.00, 0.20, 0.65]
dataInList = [0.00, 0.00, 0.00, 0.00, 1.25, 0.80]
xi = numpy.array(xiList, numpy.float32)
yi = numpy.array(yiList, numpy.float32)
dataIn = numpy.array(dataInList, numpy.float32)
# output array
xo = uniformGrid(nxo, 1.0, 0.0)
yo = uniformGrid(nyo, 1.0, 0.0)
r = nat.Natgrid(xi, yi, xo, yo)
r.igr = 1 # choose nonlinear interpolation
dataOut = r.rgrd(dataIn)
sendOutput('*** writing single precision nonlinear interpolation test case to the netCDF file SingleNonlinearRegrid.nc')
write1D_4DField('SingleNonlinearRegrid', dataOut, xo, yo)
dataCheck = storedAnswers('nonlinearRegrid')
dataCheck = numpy.reshape(dataCheck, (nxo,nyo))
error = rmserror(dataOut, dataCheck) # find the rms error
sendOutput('\n******* compare results\n')
sendOutput('*** the nonlinear interpolation test case rms error is usually less than 1.e-05')
sendOutput('*** the nonlinear interpolation test case rms error = ', error)
if error > .0001:
err = 1
return err
elif case == 4:
sendOutput('\n******* natural neighbor nonlinear interpolation -- double precision *****\n')
# array dimensions
ni = 6 # size 0f xi, yi, and dataIn
nxo = 21
nyo = 21
# input arrays and data
xiList = [0.00, 1.00, 0.00, 1.00, 0.40, 0.75]
yiList = [0.00, 0.00, 1.00, 1.00, 0.20, 0.65]
dataInList = [0.00, 0.00, 0.00, 0.00, 1.25, 0.80]
xi = numpy.array(xiList, numpy.float64)
yi = numpy.array(yiList, numpy.float64)
dataIn = numpy.array(dataInList, numpy.float64)
# output array
xo = uniformGrid(nxo, 1.0, 0.0)
yo = uniformGrid(nyo, 1.0, 0.0)
xo = xo.astype(numpy.float64)
yo = yo.astype(numpy.float64)
r = nat.Natgrid(xi, yi, xo, yo)
r.igr = 1 # choose nonlinear interpolation
dataOut = r.rgrd(dataIn)
xo = xo.astype(numpy.float32) # convert back to single precision
yo = yo.astype(numpy.float32)
dataOut = dataOut.astype(numpy.float32)
sendOutput('*** writing double precision nonlinear interpolation test case to the netCDF file DoubleNonlinearRegrid.nc')
write1D_4DField('DoubleNonlinearRegrid', dataOut, xo, yo)
dataCheck = storedAnswers('nonlinearRegrid')
dataCheck = numpy.reshape(dataCheck, (nxo,nyo))
error = rmserror(dataOut, dataCheck) # find the rms error
sendOutput('\n******* compare results\n')
sendOutput('*** the nonlinear interpolation test case rms error is usually less than 1.e-05')
sendOutput('*** the nonlinear interpolation test case rms error = ', error)
if error > .0001:
err = 1
return err
elif case == 5:
sendOutput('\n******* interpolation and computation of aspects and slopes -- single precision *******\n')
# array dimensions
ni = 800 # size of xi, yi, and dataIn
nxo = 21
nyo = 21
# input array and data
xisort, xi = randomGrid(ni, 1.2, -0.2) # xisort has random numbers monotonically increasing
yisort, yi = randomGrid(ni, 1.2, -0.2)
dataIn = numpy.zeros((ni,), numpy.float32)
for i in range(ni):
dataIn[i] = (xi[i] - 0.25)*(xi[i] - 0.25) + (yi[i] - 0.50)*(yi[i] - 0.50)
# output array
xo = uniformGrid(nxo, 1.0, 0.0)
yo = uniformGrid(nyo, 1.0, 0.0)
r = nat.Natgrid(xi, yi, xo, yo)
dataOut, aspect, slope = r.rgrd(dataIn, aspectSlope = 'yes')
sendOutput('*** writing single precision linear interpolation test case to the netCDF file AspectSlopeRegrid.nc')
write1D_4DField('AspectSlopeRegrid', dataOut, xo, yo)
# Calculate the exact answer
dataCheck = numpy.zeros((nxo, nyo), numpy.float32)
for i in range(nxo):
for j in range(nyo):
dataCheck[i,j] = (xo[i] - 0.25)*(xo[i] - 0.25) + (yo[j] - 0.50)*(yo[j] - 0.50)
sendOutput('*** writing exact answer to single precision interpolation test case to the netCDF file AspectSlopeExact.nc')
write1D_4DField('AspectSlopeExact', dataOut, xo, yo)
error = rmserror(dataOut, dataCheck) # find the rms error
sendOutput('\n******* compare results\n')
sendOutput('*** the linear interpolation test case rms error is usually about 1.e-03')
sendOutput('*** the linear interpolation test case rms error = ', error)
# Calculate the x and y aspects
u = numpy.zeros((nxo, nyo), numpy.float32)
v = numpy.zeros((nxo, nyo), numpy.float32)
for i in range(nxo):
for j in range(nyo):
uvtemp = (math.pi/180.)*aspect[i,j]
u[i,j] = math.cos(uvtemp)
v[i,j] = math.sin(uvtemp)
sendOutput('*** writing the cosine of the aspect to xaspect.nc')
sendOutput('*** writing the sine of the aspect to yaspect.nc')
write1D_4DField('xaspect', u, xo, yo)
write1D_4DField('yaspect', v, xo, yo)
if error > .01:
err = 1
return err
elif case == 6:
sendOutput('\n******* single point mode -- single precision *****\n')
# array dimensions
ni = 171 # size of xi, yi, and dataIn
nxo = 21
nyo = 21
# input arrays and data
xisort, xi = randomGrid(ni, 1.2, -0.2) # xisort has random numbers monotonically increasing
yisort, yi = randomGrid(ni, 1.2, -0.2)
dataIn = numpy.zeros((ni,), numpy.float32)
for i in range(ni):
dataIn[i] = (xi[i] - 0.25)*(xi[i] - 0.25) + (yi[i] - 0.50)*(yi[i] - 0.50)
# output array
xo = uniformGrid(nxo, 1.0, 0.0)
yo = uniformGrid(nyo, 1.0, 0.0)
xn, yn = grid2Dto1D(xo, yo)
r = nat.Natgrid(xi, yi, xn, yn, listOutput = 'yes')
r.igr = 1 # choose nonlinear interpolation
zn = r.rgrd(dataIn)
xo, yo, dataOut = c1Dto2D(nxo, nyo, xn, yn, zn)
sendOutput('*** writing single precision single point mode test case to the netCDF file SinglePointMode.nc')
write1D_4DField('SinglePointMode', dataOut, xo, yo)
dataCheck = numpy.zeros((nxo,nyo), numpy.float32)
for i in range(nxo):
for j in range(nyo):
dataCheck[i,j] = (xo[i] - 0.25)*(xo[i] - 0.25) + (yo[j] - 0.50)*(yo[j] - 0.50)
sendOutput('*** writing exact answer to single precision single point mode test case to the netCDF file SinglePointExact.nc')
write1D_4DField('SinglePointExact', dataOut, xo, yo)
error = rmserror(dataOut, dataCheck) # find the rms error
sendOutput('\n******* compare results\n')
sendOutput('*** the nonlinear single point mode test case rms error is usually less than 1.e-02')
sendOutput('*** the nonlinear single point test case rms error = ', error)
if error > .01:
err = 1
return err
elif case == 7:
sendOutput('\n******* nonlinear interpolation of y32 with a wrap -- single precision *****\n')
# input arrays and data
lati,latiSort,loni,loniSort = storedGrids()
y32 = YData(loni, lati) # y32(lati[i], loni[j]) format
newOrder = (1,0)
y32 = numpy.transpose(y32, newOrder)
lonLinear, latLinear, y32Linear = c2Dto1D(loni, lati, y32) # change to the linear list format
# output array
nlato = 71
nlono = 144
lato = uniformGrid(nlato, 87.5, -87.5) # start at - 87.5
lono = uniformGrid(nlono, 357.5, 0.0) # start at 0.
r = nat.Natgrid(latLinear, lonLinear, lato, lono)
#r.igr = 1 # choose nonlinear interpolation
dataOut = r.rgrd(y32Linear, wrap = 'yes')
dataCheck = YData(lono, lato) # longitude varies the fastest
sendOutput('*** writing exact answer to single precision y32 interpolatiion test case to the netCDF file y32Exact.nc')
write1D_4DField('y32Exact', dataCheck, lato, lono) # lono varies the fastest. Shape is(nlati, nloni)
sendOutput('*** writing single precision y32 interpolation test case to the netCDF file y32Regrid.nc')
write1D_4DField('y32Regrid', dataOut, lato, lono)
error = rmserror(dataOut, dataCheck) # find the rms error
sendOutput('\n******* compare results\n')
sendOutput('*** the nonlinear interpolation test case rms error is usually less than 1.e-02')
sendOutput('*** the nonlinear interpolation test case rms error = ', error)
if error > .01:
err = 1
return err
dataCheck = YData(lono, lato) # longitude varies the fastest
write1D_4DField('data_Check', dataCheck, lato, lono) # lono varies the fastest. Shape is(nlati, nloni)
# ------------------------------------------------------------------------------
# Call the interpolator
print 'making instance for case 8'
r = nat.Natgrid(latLinear, lonLinear, lato, lono)
print 'call rgrd method for case 8'
dataOut = r.rgrd(y32Linear, wrap = 'yes')
print 'returning from call rgrd method for case 8'
write1D_4DField('wrapdata_Out', dataOut, lato, lono) # loni varies the fastest. Shape is(nlati, nloni)
print 'dataOut and dataCheck shapes before call to rmserror', dataOut.shape, dataCheck.shape
error = rmserror(dataOut, dataCheck) # find the rms error
print 'case 1 rms error = ', error
return None
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++ Autotest Utilities +++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def randomGrid(number, vmax, vmin):
#----------------------------------------------------------------------------------------
#
# purpose: to construct a grid coordinate which is random but monotonically increasing
#
# usage: vsort, vraw = randomGrid(number, vmax, vmin)
#
# passed: number - the size of the array
# vmax - the largest possible value
# vmin - the smallest possible value
#
# returned: vsort - a numpy array sorted to be monotonically increasing
# vraw - the same array as vsort without the sort into a monotonically
# increasing values
#
#-----------------------------------------------------------------------------------------
listNumbers = [] # generate random numbers
vrange = vmax - vmin
for i in range(number):
listNumbers.append(vmin + vrange*random.random() )
vraw = numpy.array(listNumbers, numpy.float32) # make array of raw list of random numbers
listNumbers.sort() # make array of sorted list of random numbers
listNumbers.reverse() # make array of sorted list of random numbers
vsort = numpy.array(listNumbers, numpy.float32)
return vsort, vraw
def storedAnswers(choice):
#----------------------------------------------------------------------------------------
#
# purpose: to store the answers to selected test cases
#
# usage: data = storedAnswers(choice)
#
# passed : choice -- a string idetifying the desired data
#
# returned: data
#
#----------------------------------------------------------------------------------------
if choice == 'linearRegrid':
linearRegridList = [
8.4993E-07, 3.7050E-03, 6.9907E-03, 9.8621E-03, 1.2324E-02, 1.4383E-02, 1.6037E-02, 1.7102E-02,
1.7583E-02, 1.7586E-02, 1.7189E-02, 1.6454E-02, 1.5428E-02, 1.4147E-02, 1.2642E-02, 1.0937E-02,
9.0519E-03, 7.0031E-03, 4.8043E-03, 2.4671E-03, 4.3879E-06, 7.2811E-03, 1.5625E-01, 1.5625E-01,
1.5625E-01, 1.5625E-01, 1.5625E-01, 1.5476E-01, 1.5088E-01, 1.4598E-01, 1.4069E-01, 1.3534E-01,
1.3005E-01, 1.2487E-01, 1.1980E-01, 1.1476E-01, 1.0963E-01, 1.0414E-01, 9.7756E-02, 8.8053E-02,
6.6249E-02, 8.4452E-04, 1.3750E-02, 2.6209E-01, 3.1250E-01, 3.1250E-01, 3.1250E-01, 3.1158E-01,
3.0454E-01, 2.9412E-01, 2.8246E-01, 2.7050E-01, 2.5865E-01, 2.4703E-01, 2.3563E-01, 2.2431E-01,
2.1283E-01, 2.0076E-01, 1.8705E-01, 1.6770E-01, 1.3780E-01, 8.7170E-02, 1.6137E-03, 1.9411E-02,
2.9894E-01, 4.5966E-01, 4.6875E-01, 4.6875E-01, 4.6336E-01, 4.4829E-01, 4.2966E-01, 4.1005E-01,
3.9052E-01, 3.7142E-01, 3.5281E-01, 3.3451E-01, 3.1623E-01, 2.9748E-01, 2.7694E-01, 2.5040E-01,
2.1500E-01, 1.6605E-01, 9.5877E-02, 2.3095E-03, 2.4271E-02, 3.1159E-01, 5.5232E-01, 6.2500E-01,
6.2490E-01, 6.1118E-01, 5.8621E-01, 5.5806E-01, 5.2959E-01, 5.0180E-01, 4.7493E-01, 4.4884E-01,
4.2320E-01, 3.9749E-01, 3.7065E-01, 3.3827E-01, 2.9815E-01, 2.4785E-01, 1.8341E-01, 1.0017E-01,
2.9305E-03, 2.8341E-02, 3.1249E-01, 6.0503E-01, 7.7272E-01, 7.8001E-01, 7.5547E-01, 7.1872E-01,
6.7979E-01, 6.4164E-01, 6.0504E-01, 5.6997E-01, 5.3607E-01, 5.0277E-01, 4.6931E-01, 4.3199E-01,
3.8746E-01, 3.3460E-01, 2.7123E-01, 1.9442E-01, 1.0243E-01, 3.4747E-03, 3.1627E-02, 3.1249E-01,
6.2499E-01, 8.7816E-01, 9.3479E-01, 8.9655E-01, 8.4565E-01, 7.9466E-01, 7.4613E-01, 7.0030E-01,
6.5677E-01, 6.1484E-01, 5.7368E-01, 5.3151E-01, 4.8291E-01, 4.2690E-01, 3.6244E-01, 2.8776E-01,
2.0100E-01, 1.0437E-01, 3.9397E-03, 3.4140E-02, 3.1249E-01, 6.2500E-01, 9.3601E-01, 1.0908E+00,
1.0330E+00, 9.6510E-01, 9.0112E-01, 8.4192E-01, 7.8681E-01, 7.3480E-01, 6.8485E-01, 6.3583E-01,
5.8391E-01, 5.2465E-01, 4.5798E-01, 3.8304E-01, 2.9858E-01, 2.0549E-01, 1.0620E-01, 4.3220E-03,
3.5887E-02, 3.1250E-01, 6.2500E-01, 9.3750E-01, 1.2500E+00, 1.1529E+00, 1.0692E+00, 9.9449E-01,
9.2588E-01, 8.6236E-01, 8.0251E-01, 7.4497E-01, 6.8836E-01, 6.2670E-01, 5.5756E-01, 4.8112E-01,
3.9679E-01, 3.0586E-01, 2.0979E-01, 1.0798E-01, 4.6174E-03, 3.6877E-02, 3.1250E-01, 6.2396E-01,
9.1734E-01, 1.1479E+00, 1.1546E+00, 1.1088E+00, 1.0471E+00, 9.8424E-01, 9.2126E-01, 8.5666E-01,
7.9286E-01, 7.2938E-01, 6.5911E-01, 5.8108E-01, 4.9636E-01, 4.0694E-01, 3.1293E-01, 2.1400E-01,
1.0972E-01, 4.8200E-03, 3.7117E-02, 3.1097E-01, 6.0588E-01, 8.6639E-01, 1.0526E+00, 1.0806E+00,
1.0693E+00, 1.0360E+00, 9.9296E-01, 9.4356E-01, 8.8783E-01, 8.2419E-01, 7.5644E-01, 6.8069E-01,
5.9785E-01, 5.0976E-01, 4.1712E-01, 3.1998E-01, 2.1814E-01, 1.1139E-01, 4.9216E-03, 3.6509E-02,
3.0353E-01, 5.7902E-01, 8.1072E-01, 9.6003E-01, 9.8676E-01, 1.0031E+00, 9.9376E-01, 9.6959E-01,
9.3602E-01, 8.9422E-01, 8.4272E-01, 7.7847E-01, 7.0090E-01, 6.1499E-01, 5.2351E-01, 4.2744E-01,
3.2700E-01, 2.2214E-01, 1.1290E-01, 4.9105E-03, 3.4793E-02, 2.9338E-01, 5.4929E-01, 7.5334E-01,
8.6617E-01, 8.9081E-01, 9.1544E-01, 9.3057E-01, 9.2741E-01, 9.1235E-01, 8.8750E-01, 8.5160E-01,
8.0010E-01, 7.2228E-01, 6.3305E-01, 5.3775E-01, 4.3786E-01, 3.3380E-01, 2.2572E-01, 1.1400E-01,
4.7721E-03, 3.2230E-02, 2.8206E-01, 5.1778E-01, 6.9308E-01, 7.7022E-01, 7.9485E-01, 8.1948E-01,
8.4411E-01, 8.6324E-01, 8.6898E-01, 8.6426E-01, 8.4838E-01, 8.1552E-01, 7.4542E-01, 6.5225E-01,
5.5230E-01, 4.4790E-01, 3.3973E-01, 2.2820E-01, 1.1429E-01, 4.5258E-03, 2.9007E-02, 2.6994E-01,
4.8377E-01, 6.2700E-01, 6.7426E-01, 6.9889E-01, 7.2353E-01, 7.4816E-01, 7.7279E-01, 7.9709E-01,
8.1464E-01, 8.2358E-01, 8.1914E-01, 7.7103E-01, 6.7221E-01, 5.6584E-01, 4.5576E-01, 3.4286E-01,
2.2858E-01, 1.1429E-01, 4.1797E-03, 2.5235E-02, 2.5676E-01, 4.4512E-01, 5.5023E-01, 5.7831E-01,
6.0294E-01, 6.2757E-01, 6.5220E-01, 6.7608E-01, 6.9914E-01, 7.2227E-01, 7.4625E-01, 7.7187E-01,
7.9999E-01, 6.8572E-01, 5.7143E-01, 4.5715E-01, 3.4286E-01, 2.2858E-01, 1.1429E-01, 3.7338E-03,
2.0987E-02, 2.4149E-01, 3.9756E-01, 4.5772E-01, 4.8235E-01, 5.0624E-01, 5.2689E-01, 5.4531E-01,
5.6257E-01, 5.7940E-01, 5.9628E-01, 6.1336E-01, 6.2992E-01, 6.3999E-01, 6.3186E-01, 5.6427E-01,
4.5715E-01, 3.4286E-01, 2.2858E-01, 1.1429E-01, 3.1880E-03, 1.6310E-02, 2.2155E-01, 3.3264E-01,
3.6166E-01, 3.8203E-01, 3.9797E-01, 4.1139E-01, 4.2346E-01, 4.3486E-01, 4.4598E-01, 4.5695E-01,
4.6754E-01, 4.7658E-01, 4.7999E-01, 4.7999E-01, 4.7245E-01, 4.2657E-01, 3.4218E-01, 2.2858E-01,
1.1429E-01, 2.5420E-03, 1.1239E-02, 1.8956E-01, 2.4052E-01, 2.5713E-01, 2.6835E-01, 2.7702E-01,
2.8442E-01, 2.9121E-01, 2.9769E-01, 3.0399E-01, 3.1006E-01, 3.1554E-01, 3.1941E-01, 3.1999E-01,
3.1999E-01, 3.1999E-01, 3.1737E-01, 2.9044E-01, 2.2479E-01, 1.1429E-01, 1.7957E-03, 5.7978E-03,
1.1982E-01, 1.3170E-01, 1.3741E-01, 1.4126E-01, 1.4439E-01, 1.4721E-01, 1.4990E-01, 1.5251E-01,
1.5503E-01, 1.5735E-01, 1.5920E-01, 1.6000E-01, 1.6000E-01, 1.6000E-01, 1.6000E-01, 1.6000E-01,
1.6000E-01, 1.5335E-01, 1.1016E-01, 9.4906E-04, 7.6115E-08, 1.3378E-03, 2.5593E-03, 3.6624E-03,
4.6450E-03, 5.5041E-03, 6.2360E-03, 6.8355E-03, 7.2956E-03, 7.6065E-03, 7.7542E-03, 7.7177E-03,
7.4883E-03, 7.1010E-03, 6.5572E-03, 5.8568E-03, 4.9997E-03, 3.9857E-03, 2.8145E-03, 1.4861E-03,
9.4486E-07]
return numpy.array((linearRegridList), numpy.float32)
elif choice == 'nonlinearRegrid':
nonlinearRegridList = [
1.4061E-07, 5.9856E-04, 3.3025E-03, 7.1219E-03, 1.1544E-02, 1.6204E-02, 2.0812E-02, 2.5066E-02,
2.8761E-02, 3.1773E-02, 3.4004E-02, 3.5371E-02, 3.5807E-02, 3.5254E-02, 3.3662E-02, 3.0987E-02,
2.7190E-02, 2.2235E-02, 1.6086E-02, 8.6904E-03, 9.3576E-07, -1.6475E-03, 4.0636E-02, 5.1891E-02,
6.2648E-02, 7.2691E-02, 8.1856E-02, 8.8773E-02, 9.2560E-02, 9.4276E-02, 9.4444E-02, 9.3304E-02,
9.0947E-02, 8.7384E-02, 8.2576E-02, 7.6457E-02, 6.8940E-02, 5.9917E-02, 4.9275E-02, 3.6498E-02,
2.0278E-02, 1.0188E-04, -2.3704E-03, 1.1213E-01, 1.6880E-01, 1.8774E-01, 2.0453E-01, 2.1804E-01,
2.2176E-01, 2.1920E-01, 2.1323E-01, 2.0517E-01, 1.9562E-01, 1.8480E-01, 1.7278E-01, 1.5950E-01,
1.4489E-01, 1.2884E-01, 1.1104E-01, 8.9636E-02, 6.3626E-02, 3.2008E-02, -8.7618E-05, -2.3976E-03,
1.5408E-01, 3.2600E-01, 3.6266E-01, 3.8633E-01, 3.9820E-01, 3.9203E-01, 3.7764E-01, 3.5940E-01,
3.3920E-01, 3.1785E-01, 2.9562E-01, 2.7254E-01, 2.4856E-01, 2.2356E-01, 1.9698E-01, 1.6591E-01,
1.2967E-01, 8.7819E-02, 4.0778E-02, -4.2476E-04, -1.9720E-03, 1.7771E-01, 4.5068E-01, 5.6699E-01,
5.9747E-01, 6.0036E-01, 5.8014E-01, 5.5077E-01, 5.1774E-01, 4.8328E-01, 4.4829E-01, 4.1306E-01,
3.7759E-01, 3.4182E-01, 3.0540E-01, 2.6448E-01, 2.1798E-01, 1.6583E-01, 1.0820E-01, 4.7477E-02,
-8.5668E-04, -1.2987E-03, 1.8918E-01, 5.3532E-01, 7.6919E-01, 8.1425E-01, 8.0561E-01, 7.7032E-01,
7.2542E-01, 6.7709E-01, 6.2785E-01, 5.7870E-01, 5.2997E-01, 4.8176E-01, 4.3409E-01, 3.8373E-01,
3.2738E-01, 2.6522E-01, 1.9749E-01, 1.2515E-01, 5.2692E-02, -1.3503E-03, -5.4612E-04, 1.9741E-01,
5.7911E-01, 9.1567E-01, 1.0141E+00, 9.9646E-01, 9.4885E-01, 8.9058E-01, 8.2845E-01, 7.6536E-01,
7.0264E-01, 6.4088E-01, 5.8039E-01, 5.2051E-01, 4.5538E-01, 3.8409E-01, 3.0698E-01, 2.2467E-01,
1.3902E-01, 5.7014E-02, -1.8810E-03, 1.5272E-04, 2.0341E-01, 5.9346E-01, 9.9962E-01, 1.1721E+00,
1.1542E+00, 1.1021E+00, 1.0360E+00, 9.6363E-01, 8.8911E-01, 8.1453E-01, 7.4107E-01, 6.6956E-01,
5.9736E-01, 5.1877E-01, 4.3381E-01, 3.4303E-01, 2.4748E-01, 1.5066E-01, 6.0634E-02, -2.4281E-03,
6.9861E-04, 2.0731E-01, 6.0225E-01, 1.0160E+00, 1.2500E+00, 1.2555E+00, 1.2148E+00, 1.1508E+00,
1.0743E+00, 9.9241E-01, 9.0888E-01, 8.2606E-01, 7.4565E-01, 6.6273E-01, 5.7280E-01, 4.7605E-01,
3.7323E-01, 2.6681E-01, 1.6090E-01, 6.3627E-02, -2.9730E-03, 1.0247E-03, 2.0918E-01, 6.0431E-01,
9.9699E-01, 1.2241E+00, 1.2631E+00, 1.2530E+00, 1.2085E+00, 1.1440E+00, 1.0666E+00, 9.7985E-01,
8.9131E-01, 8.0517E-01, 7.1501E-01, 6.1671E-01, 5.1081E-01, 3.9933E-01, 2.8423E-01, 1.6982E-01,
6.6003E-02, -3.4978E-03, 1.0951E-03, 2.0736E-01, 5.7738E-01, 9.3136E-01, 1.1486E+00, 1.2035E+00,
1.2158E+00, 1.1947E+00, 1.1512E+00, 1.0908E+00, 1.0171E+00, 9.3223E-01, 8.4588E-01, 7.5422E-01,
6.5262E-01, 5.4169E-01, 4.2310E-01, 2.9972E-01, 1.7733E-01, 6.7690E-02, -3.9847E-03, 8.6025E-04,
1.9721E-01, 5.3426E-01, 8.4983E-01, 1.0425E+00, 1.0970E+00, 1.1360E+00, 1.1387E+00, 1.1160E+00,
1.0752E+00, 1.0199E+00, 9.5238E-01, 8.7484E-01, 7.8660E-01, 6.8440E-01, 5.6941E-01, 4.4420E-01,
3.1296E-01, 1.8312E-01, 6.8501E-02, -4.4155E-03, 2.4226E-04, 1.8267E-01, 4.8442E-01, 7.5872E-01,
9.1395E-01, 9.6583E-01, 1.0156E+00, 1.0482E+00, 1.0516E+00, 1.0343E+00, 1.0007E+00, 9.5303E-01,
8.9252E-01, 8.1160E-01, 7.1103E-01, 5.9312E-01, 4.6183E-01, 3.2305E-01, 1.8641E-01, 6.8047E-02,
-4.7698E-03, -5.8461E-04, 1.6578E-01, 4.3042E-01, 6.5949E-01, 7.7110E-01, 8.2280E-01, 8.7431E-01,
9.2200E-01, 9.5600E-01, 9.6629E-01, 9.5780E-01, 9.3276E-01, 8.9167E-01, 8.2627E-01, 7.3058E-01,
6.1102E-01, 4.7395E-01, 3.2792E-01, 1.8550E-01, 6.5685E-02, -5.0087E-03, -1.4594E-03, 1.4726E-01,
3.7257E-01, 5.5181E-01, 6.2548E-01, 6.7771E-01, 7.3133E-01, 7.8294E-01, 8.2931E-01, 8.6682E-01,
8.8664E-01, 8.8759E-01, 8.6922E-01, 8.2572E-01, 7.3918E-01, 6.1846E-01, 4.7522E-01, 3.2240E-01,
1.7822E-01, 6.2363E-02, -5.0958E-03, -2.2396E-03, 1.2721E-01, 3.1004E-01, 4.3490E-01, 4.8592E-01,
5.3958E-01, 5.9600E-01, 6.5193E-01, 7.0330E-01, 7.4713E-01, 7.8192E-01, 8.0552E-01, 8.1423E-01,
8.0000E-01, 7.2297E-01, 5.9985E-01, 4.5521E-01, 3.0629E-01, 1.6880E-01, 5.8577E-02, -4.9947E-03,
-2.7897E-03, 1.0526E-01, 2.4121E-01, 3.1212E-01, 3.6074E-01, 4.1573E-01, 4.6891E-01, 5.1792E-01,
5.6226E-01, 6.0169E-01, 6.3568E-01, 6.6300E-01, 6.8064E-01, 6.7757E-01, 6.4324E-01, 5.5705E-01,
4.2785E-01, 2.8778E-01, 1.5805E-01, 5.4318E-02, -4.6651E-03, -2.9748E-03, 8.0481E-02, 1.6467E-01,
2.0663E-01, 2.5106E-01, 2.9319E-01, 3.3190E-01, 3.6737E-01, 3.9985E-01, 4.2931E-01, 4.5529E-01,
4.7648E-01, 4.8955E-01, 4.8514E-01, 4.6828E-01, 4.3856E-01, 3.6952E-01, 2.6597E-01, 1.4587E-01,
4.9559E-02, -4.0612E-03, -2.6588E-03, 5.0914E-02, 8.7792E-02, 1.1716E-01, 1.4398E-01, 1.6823E-01,
1.9056E-01, 2.1134E-01, 2.3069E-01, 2.4842E-01, 2.6399E-01, 2.7621E-01, 2.8241E-01, 2.7877E-01,
2.7097E-01, 2.5980E-01, 2.4222E-01, 2.0011E-01, 1.2931E-01, 4.4238E-02, -3.1307E-03, -1.6950E-03,
1.5529E-02, 2.6578E-02, 3.5795E-02, 4.4418E-02, 5.2999E-02, 6.1652E-02, 7.0306E-02, 7.8788E-02,
8.6822E-02, 9.3985E-02, 9.9597E-02, 1.0247E-01, 1.0283E-01, 1.0170E-01, 9.8775E-02, 9.3695E-02,
8.5980E-02, 7.1050E-02, 3.6490E-02, -1.8052E-03, -9.6657E-07, -8.0903E-03, -1.3190E-02, -1.6109E-02,
-1.7301E-02, -1.7095E-02, -1.5765E-02, -1.3557E-02, -1.0694E-02, -7.3887E-03, -3.8441E-03, -2.5851E-04,
3.1767E-03, 6.2818E-03, 8.8673E-03, 1.0736E-02, 1.1677E-02, 1.1452E-02, 9.7757E-03, 6.2375E-03,
4.3809E-07]
return numpy.array((nonlinearRegridList), numpy.float32)
else:
print 'unknown option in call for data in storedAnswers'
return None
def uniformGrid(number, vend, vstart):
#----------------------------------------------------------------------------
#
# purpose: to construct a grid coordinate which is uniform
#
# usage: v = uniformGrid(number, vend, vstart)
#
# passed: number - the size of the array
# vend - the last value
# vstart - the first value
#
# returned: v - a float32 numpy array with values from vstart to v end
#
#-----------------------------------------------------------------------------
v = numpy.zeros((number,), numpy.float32)
vinc = (vend - vstart)/(number - 1)
for n in range(number):
v[n] = vstart + n*vinc
return v
def storedGrids():
""" #-------------------------------------------------------------------
#
# purpose: to construct a grid coordinate which is random
#
# passed : nothing
#
# returned: lati -- a 60 element latitude grid from -90. to +90. degrees
# latiSort -- lati sorted to be montonically decreasing
# loni -- a 120 element longitude grid from 0. to 360. degrees
# loniSort -- loni sorted to be montonically increasing
#
#------------------------------------------------------------------------"""
latiList = [
1.3092E+01, 7.1081E+01, 3.2199E+01, 2.6314E+01, -7.5665E+01, -7.2182E+00, -2.1963E+01, -8.3351E+01,
4.8161E+01, 8.6379E+01, -5.6722E+01, -3.3604E+01, 3.4670E-01, -5.9393E+00, -1.7894E+01, 1.7068E+01,
-1.0846E+01, -6.0505E+00, -4.9974E+01, 7.1796E+01, 3.3333E+01, 8.0870E+01, 2.7362E+00, 2.6315E+00,
-3.9012E+01, 5.2667E+00, -8.1956E+01, 8.8042E+01, 8.0710E+00, -5.3203E+01, -6.5512E+00, 5.0851E+01,
2.2580E+00, -2.2110E+01, 5.3739E+01, -8.7512E+01, 6.7964E+01, 3.9599E+01, 1.2495E+01, -1.1603E+01,
-1.3217E+01, 3.0072E+01, -6.2477E+01, 8.9158E+01, 6.1896E+01, 3.5624E+01, -3.5438E+01, 6.2368E+01,
-3.2040E+01, 7.2130E+01, -7.9999E+01, 6.4780E+01, 5.3882E+01, 6.9012E+01, 7.9715E+01, -7.2460E+01,
7.5047E+00, -1.5061E+01, 2.5178E+01, 6.9948E+00]
latiSortList = [
-8.7512E+01, -8.3351E+01, -8.1956E+01, -7.9999E+01, -7.5665E+01, -7.2460E+01, -6.2477E+01, -5.6722E+01,
-5.3203E+01, -4.9974E+01, -3.9012E+01, -3.5438E+01, -3.3604E+01, -3.2040E+01, -2.2110E+01, -2.1963E+01,
-1.7894E+01, -1.5061E+01, -1.3217E+01, -1.1603E+01, -1.0846E+01, -7.2182E+00, -6.5512E+00, -6.0505E+00,
-5.9393E+00, 3.4670E-01, 2.2580E+00, 2.6315E+00, 2.7362E+00, 5.2667E+00, 6.9948E+00, 7.5047E+00,
8.0710E+00, 1.2495E+01, 1.3092E+01, 1.7068E+01, 2.5178E+01, 2.6314E+01, 3.0072E+01, 3.2199E+01,
3.3333E+01, 3.5624E+01, 3.9599E+01, 4.8161E+01, 5.0851E+01, 5.3739E+01, 5.3882E+01, 6.1896E+01,
6.2368E+01, 6.4780E+01, 6.7964E+01, 6.9012E+01, 7.1081E+01, 7.1796E+01, 7.2130E+01, 7.9715E+01,
8.0870E+01, 8.6379E+01, 8.8042E+01, 8.9158E+01]
latiSortList.reverse()
loniList = [
1.0950E+02, 3.1987E+02, 1.6087E+02, 2.2737E+02, 1.4790E+02, 6.2704E+01, 6.2566E+01, 2.4556E+02,
2.4902E+01, 9.1912E+01, 1.2039E+02, 1.6807E+02, 1.8303E+02, 2.4495E+02, 1.1643E+01, 9.5821E+01,
1.6826E+02, 2.3723E+02, 1.4022E+01, 2.6537E+02, 3.4034E+01, 1.0511E+02, 2.4025E+02, 1.0651E+02,
8.4892E+01, 3.4940E+02, 1.6315E+02, 1.1100E+02, 1.4735E+02, 1.7356E+02, 7.5067E+01, 2.9491E+02,
1.3526E+02, 3.4038E+02, 3.1191E+02, 2.4636E+02, 1.0361E+02, 3.1934E+02, 2.5720E+02, 3.5403E+02,
1.8194E+02, 2.8795E+02, 9.0098E+01, 2.7536E+02, 4.1070E+01, 3.7064E+01, 1.5244E+02, 8.5413E+01,
1.3328E+02, 3.2401E+02, 2.7889E+01, 1.3045E+02, 2.3126E+01, 2.2804E+02, 1.2270E+02, 1.5981E+02,
2.1705E+02, 2.2611E+02, 2.9517E+02, 3.5181E+02, 3.0866E+02, 1.0522E+01, 2.2290E+01, 1.2809E+02,
3.1070E+01, 2.3676E+02, 1.6915E+01, 3.2640E+02, 7.1367E+01, 1.9983E+02, 1.0566E+02, 2.7452E+02,
1.3069E+02, 2.5578E+02, 2.2619E+02, 3.5151E+02, 3.3032E+01, 1.2169E+02, 1.4333E+02, 8.3669E+01,
3.3945E-01, 2.8520E+02, 9.7079E+01, 3.1794E+02, 1.7400E+02, 3.1042E+02, 1.2403E+02, 2.8891E+02,
2.5776E+02, 1.5096E+02, 4.0489E+01, 2.1803E+02, 2.6891E+02, 2.5970E+02, 2.3404E+02, 3.2476E+01,
6.4254E+01, 2.9157E+02, 4.8417E+00, 2.7701E+02, 7.5394E+01, 1.5646E+02, 4.3079E+01, 1.6228E+02,
3.3645E+02, 2.8462E+02, 3.4708E+02, 1.8942E+02, 1.4303E+02, 1.8721E+00, 1.3013E+02, 1.9077E+02,
1.8328E+02, 3.5694E+02, 3.5559E+02, 1.4661E+01, 8.7624E+01, 2.0111E+02, 1.5145E+02, 1.8391E+02]
loniSortList = [
3.3945E-01, 1.8721E+00, 4.8417E+00, 1.0522E+01, 1.1643E+01, 1.4022E+01, 1.4661E+01, 1.6915E+01,
2.2290E+01, 2.3126E+01, 2.4902E+01, 2.7889E+01, 3.1070E+01, 3.2476E+01, 3.3032E+01, 3.4034E+01,
3.7064E+01, 4.0489E+01, 4.1070E+01, 4.3079E+01, 6.2566E+01, 6.2704E+01, 6.4254E+01, 7.1367E+01,
7.5067E+01, 7.5394E+01, 8.3669E+01, 8.4892E+01, 8.5413E+01, 8.7624E+01, 9.0098E+01, 9.1912E+01,
9.5821E+01, 9.7079E+01, 1.0361E+02, 1.0511E+02, 1.0566E+02, 1.0651E+02, 1.0950E+02, 1.1100E+02,
1.2039E+02, 1.2169E+02, 1.2270E+02, 1.2403E+02, 1.2809E+02, 1.3013E+02, 1.3045E+02, 1.3069E+02,
1.3328E+02, 1.3526E+02, 1.4303E+02, 1.4333E+02, 1.4735E+02, 1.4790E+02, 1.5096E+02, 1.5145E+02,
1.5244E+02, 1.5646E+02, 1.5981E+02, 1.6087E+02, 1.6228E+02, 1.6315E+02, 1.6807E+02, 1.6826E+02,
1.7356E+02, 1.7400E+02, 1.8194E+02, 1.8303E+02, 1.8328E+02, 1.8391E+02, 1.8942E+02, 1.9077E+02,
1.9983E+02, 2.0111E+02, 2.1705E+02, 2.1803E+02, 2.2611E+02, 2.2619E+02, 2.2737E+02, 2.2804E+02,
2.3404E+02, 2.3676E+02, 2.3723E+02, 2.4025E+02, 2.4495E+02, 2.4556E+02, 2.4636E+02, 2.5578E+02,
2.5720E+02, 2.5776E+02, 2.5970E+02, 2.6537E+02, 2.6891E+02, 2.7452E+02, 2.7536E+02, 2.7701E+02,
2.8462E+02, 2.8520E+02, 2.8795E+02, 2.8891E+02, 2.9157E+02, 2.9491E+02, 2.9517E+02, 3.0866E+02,
3.1042E+02, 3.1191E+02, 3.1794E+02, 3.1934E+02, 3.1987E+02, 3.2401E+02, 3.2640E+02, 3.3645E+02,
3.4038E+02, 3.4708E+02, 3.4940E+02, 3.5151E+02, 3.5181E+02, 3.5403E+02, 3.5559E+02, 3.5694E+02]
lati = numpy.array((latiList), numpy.float32)
latiSort = numpy.array((latiSortList), numpy.float32)
loni = numpy.array((loniList), numpy.float32)
loniSort = numpy.array((loniSortList), numpy.float32)
return lati, latiSort, loni, loniSort
def grid2Dto1D(x, y):
""" #-------------------------------------------------------------------
#
# purpose: to construct a linear grid from a rectangular one
#
# passed : x[i] and y[j]
#
# returned: xn[n], yn[n]
#
#------------------------------------------------------------------------"""
numberx = len(x)
numbery = len(y)
size =numberx*numbery
xn = numpy.zeros(size, numpy.float32)
yn = numpy.zeros(size, numpy.float32)
for i in range(numberx):
for j in range(numbery):
n = j + i*numbery
xn[n] = x[i]
yn[n] = y[j]
return (xn, yn)
def c1Dto2D(numberx, numbery, xn, yn, zn):
""" #-------------------------------------------------------------------
#
# purpose: to construct 2D z[i,j] 1D zn[n] format
#
# passed: xn[n], yn[n], zn[n]
#
# returned : x[i], y[j] and z[i,j]
#
#------------------------------------------------------------------------"""
x = numpy.zeros(numberx, numpy.float32)
y = numpy.zeros(numbery, numpy.float32)
for i in range(numberx):
x[i] = xn[i*numbery]
for j in range(numbery):
y[j] = yn[j]
z = numpy.reshape(zn, (numberx, numbery))
return (x, y, z)
def c2Dto1D(x, y, z):
#---------------------------------------------------------------------------------------------------
#
# purpose: to construct 1D zn[n] from 2D z[i,j] format
#
# usage: xn, yn, zn = c2Dto1D(x, y, z)
#
# passed: x - the array which describes the rectangular grid associated with the first z index
# y - the array which describes the rectangular grid associated with the second z index
# z - the 2D data associated with the x, y grid
#
# returned: xn - a list form of the x array
# yn - a list form of the y array
# zn - a list form of the data array (this array has the same length as xn and yn
#
#---------------------------------------------------------------------------------------------------
numberx = len(x)
numbery = len(y)
size =numberx*numbery
xn = numpy.zeros(size, numpy.float32)
yn = numpy.zeros(size, numpy.float32)
for i in range(numberx):
for j in range(numbery):
n = j + i*numbery
xn[n] = x[i]
yn[n] = y[j]
zn = numpy.ravel(z)
return (xn, yn, zn)
def write1D_4DField(varname, dataField, x, y = None, z = None, t = None):
#------------------------------------------------------------------------------
#
# purpose: write an output field which may be 1D, 2D, 3D or 4D to a NetCDF file
#
# usage: write1D_4DField(varname, dataField, x, y, z = None, t = None) for a 2D write
#
# passed : varname - name of the variable and the file id
# x,y,z,t - dimension vectors
# dataField - the data
#
# returned: None
#
#-------------------------------------------------------------------------------
import cdms2
fileObj = cdms2.createDataset(varname + '.nc')
# construct the axis tuple
x = x.astype(numpy.float64)
x_axis = fileObj.createAxis('x', x)
axisList = [x_axis]
if y is not None:
y = y.astype(numpy.float64)
y_axis = fileObj.createAxis('y', y)
axisList.append(y_axis)
if z is not None:
z = z.astype(numpy.float64)
z_axis = fileObj.createAxis('z', z)
axisList.append(z_axis)
if t is not None:
t = t.astype(numpy.float64)
t_axis = fileObj.createAxis('t', t)
axisList.append(t_axis)
if len(axisList) == 1:
axisTuple = (x_axis,)
else:
axisTuple = tuple(axisList)
# write the data to the file
var = fileObj.createVariable(varname, numpy.float32, axisTuple) # variable without data
var[:] = dataField # copy in the data
fileObj.close()
return None
#-----------------------------------------------------------------
def YData(lonvals, latvals, data_name = 'Y32'):
#----------------------------------------------------------------------------
#
# purpose: construct Y33, Y32, Y31 or Y30 data
#
# usage: data = YData(lonvals, latvals, data_name = 'Y32'):
#
# passed : lonvals -- longitude vactor
# latvals -- latitude vactor
#
# returned: data
#-----------------------------------------------------------------------------
if data_name[:3] == 'Y33':
data = Y33(lonvals, latvals)
elif data_name[:3] == 'Y32':
data = Y32(lonvals, latvals)
elif data_name[:3] == 'Y31':
data = Y31(lonvals, latvals)
elif data_name[:3] == 'Y30':
data = Y30(lonvals, latvals)
else:
msg = 'Must choose Y33, Y32, Y31 or Y30'
raise ValueError, msg
return
return data
def Y33(lonvals, latvals):
#------------------------------------------------------------------------------
#
# purpose: construct Y33 data
#
# usage: y33 = Y33(lonvals, latvals)
#
# passed : lonvals -- longitude vactor
# latvals -- latitude vactor
#
# returned: data
#------------------------------------------------------------------------------
nlon = len(lonvals)
nlat = len(latvals)
phi = (math.pi/180.)*lonvals
theta = (math.pi/180.)*latvals
y33 = numpy.zeros( (nlat,nlon), numpy.float32) # memory
fac = -(1./4.)*math.sqrt( (35./(4.*math.pi)) )
fac = 1.0
for i in range(nlon):
for j in range(nlat):
y33[j,i] = fac*(math.sin(theta[j])**3)*math.cos(3.*phi[i])
return y33
def Y32(lonvals, latvals):
#-------------------------------------------------------------------------------
#
# purpose: construct Y32 data
#
# usage: y32 = Y32(lonvals, latvals)
#
# passed : lonvals -- longitude vactor
# latvals -- latitude vactor
#
# returned: data
#-------------------------------------------------------------------------------
nlon = len(lonvals)
nlat = len(latvals)
phi = (math.pi/180.)*lonvals
theta = (math.pi/180.)*latvals
y32 = numpy.zeros( (nlat,nlon), numpy.float32) # memory
fac = (1./4.)*math.sqrt( (105./(4.*math.pi)) )
fac = 1.0
for i in range(nlon):
for j in range(nlat):
y32[j,i] = fac*(math.sin(theta[j])**2)*math.cos(theta[j])*math.cos(2.*phi[i])
return y32
def Y31(lonvals, latvals):
#--------------------------------------------------------------------------------
#
# purpose: construct Y31 data
#
# usage: y31 = Y31(lonvals, latvals)
#
# passed : lonvals -- longitude vactor
# latvals -- latitude vactor
#
# returned: data
#--------------------------------------------------------------------------------
nlon = len(lonvals)
nlat = len(latvals)
phi = (math.pi/180.)*lonvals
theta = (math.pi/180.)*latvals
y31 = numpy.zeros( (nlat,nlon), numpy.float32) # memory
fac = -(1./4.)*math.sqrt( (21./(4.*math.pi)) )
fac = 1.0
for i in range(nlon):
for j in range(nlat):
y31[j,i] = fac*math.sin(theta[j])*(5.*math.cos(theta[j])**2 - 1.)*math.cos(phi[i])
return y31
def Y30(lonvals, latvals):
#----------------------------------------------------------------------------------
#
# purpose: construct Y30 data
#
# usage: y30 = Y30(lonvals, latvals)
#
# passed : lonvals -- longitude vactor
# latvals -- latitude vactor
#
# returned: data
#-----------------------------------------------------------------------------------
nlon = len(lonvals)
nlat = len(latvals)
phi = (math.pi/180.)*lonvals
theta = (math.pi/180.)*latvals
lonvals = makelon(nlon)
phi = lonvals
phi = (math.pi/180.)*lonvals
latvals, colatvals = makelat(nlat, grid_type)
latvals, colatvals = makelat(nlat)
theta = (math.pi/180.)*colatvals
y30 = numpy.zeros( (nlat,nlon), numpy.float32) # memory
fac = math.sqrt( (7./(4.*math.pi)) )
fac = 1.0
for i in range(nlon):
for j in range(nlat):
y30[j,i] = fac*( (5./2.)*math.cos(theta[j])**3 - (3./2.)*math.cos(theta[j]) )
return y30
#-----------------------------------------------------------------
def rmserror(data1, data2):
#---------------------------------------------------------------------------------
#
# purpose: compute the rms error for two data sets having the same shape
#
# passed : the two data sets
#
# returned: rms error
#
#---------------------------------------------------------------------------------
if data1.shape != data2.shape:
print 'Error in shape in rmserror'
print 'data1 shape = ', data1.shape
print 'data2 shape = ', data2.shape
raise ValueError
d1 = numpy.ravel(data1)
d2 = numpy.ravel(data2)
sq = (d1 - d2)*(d1 - d2)
error = numpy.sum(sq)/len(d1)
rmserror = numpy.sqrt(error)
return rmserror
if __name__ == "__main__":
output = open('test.asc', 'w') # global file name
print 'Running the test computations'
testError = runtests()
write = document()
sendOutput(' ')
sendOutput('*********')
sendOutput('General information on the use of NATGRID has been written to the file natgridmodule.doc.')
sendOutput('*********')
sendOutput(' ')
if testError == 0:
print 'Testing Completed Successfully'
else:
print 'Testing completed but it may have problems. Look at test.asc for an explanation'
print 'Some details on the testing have been written to the file test.asc.'
print 'General information on the use of NATGRID has been written to the file natgridmodule.doc.'
output.close()
| 51,193 | 45.120721 | 133 |
py
|
PyBDSF
|
PyBDSF-master/doc/source/conf.py
|
# -*- coding: utf-8 -*-
#
# PyBDSF documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 19 13:27:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyBDSF'
copyright = u'2022, David Rafferty and Niruj Mohan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.11'
# The full version, including alpha/beta/rc tags.
release = '1.11.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'front_pic.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyBDSFdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyBDSF.tex', u'PyBDSF Documentation',
u'David Rafferty and Niruj Mohan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pybdsf', u'PyBDSF Documentation',
[u'David Rafferty and Niruj Mohan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyBDSF', u'PyBDSF Documentation',
u'David Rafferty and Niruj Mohan', 'PyBDSF', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'PyBDSF'
epub_author = u'David Rafferty and Niruj Mohan'
epub_publisher = u'David Rafferty and Niruj Mohan'
epub_copyright = u'2022, David Rafferty and Niruj Mohan'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 9,149 | 30.6609 | 83 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/load_data_clean.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 17 21:11:58 2021
@author: Vu Nguyen
"""
import pandas as pd
from sklearn import preprocessing
import numpy as np
from sklearn.datasets import load_iris,load_breast_cancer,load_digits
import pickle
import os
path ='./vector_data/'
#====================================================== read dataset
def load_encode_data( df ):
x=df.values[:,:-1]
y=df.values[:,-1]
label_encoder = preprocessing.LabelEncoder()
label_encoder = label_encoder.fit(y)
label_encoded_y = label_encoder.transform(y)
y=label_encoded_y
features = []
for i in range(0, x.shape[1]):
try:
x[:,i].astype(float)
features.append(x[:,i])
except:
feat_encoder = preprocessing.LabelEncoder()
feature = feat_encoder.fit_transform(x[:,i])
features.append(feature)
encoded_x = np.array(features).T
encoded_x = encoded_x.reshape(x.shape[0], x.shape[1])
x=encoded_x
return x,y
#path_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/blood-transfusion/transfusion.data' # binary
#path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data'
#path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/chess/king-rook-vs-king-pawn/kr-vs-kp.data'
#path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.data'
#df = pd.read_csv(path_url)
#data = load_iris()
#data=load_breast_cancer() # binary
#data=load_digits()
#x=data['data']
#y = data['target']
all_data = []
s_dataname = []
shapes = []
_datasetName =["cjs","hill-valley","segment_2310_20","wdbc_569_31","steel-plates-fault",
"analcatdata_authorship","synthetic_control_6c","vehicle_846_19","German-credit",
"gina_agnostic_no","madelon_no","texture","gas_drift","dna_no"
] #
for ii in range(len(_datasetName)):
if ii in [0,1,9,11,12]:
continue
temp = pd.read_csv(os.path.join(path ,_datasetName[ii]+".csv"))
if temp.shape[0] <= 30000000:
print(_datasetName[ii])
all_data.append(temp)
s_dataname.append(_datasetName[ii]+".csv")
_datasetName=np.delete(_datasetName,(0,1,9,11,12))
_datasetName=_datasetName.tolist()
# load data from UCI
# path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data'
# X,Y=load_encode_data( pd.read_csv(path_url) )
# _datasetName.append("car")
# all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/chess/king-rook-vs-king-pawn/kr-vs-kp.data'
X,Y=load_encode_data( pd.read_csv(path_url) )
_datasetName.append("kr_vs_kp")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.data'
X,Y=load_encode_data( pd.read_csv(path_url) )
_datasetName.append("agaricus-lepiota")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
data=load_breast_cancer() # binary
X=data['data']
Y = data['target']
_datasetName.append("breast_cancer")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
data=load_digits()
X=data['data']
Y = data['target']
_datasetName.append("digits")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
print(_datasetName)
#with open('all_data_protocol4.pickle', 'wb') as handle:
with open('all_data.pickle', 'wb') as handle:
#pickle.dump([all_data,_datasetName], handle,protocol=4)
pickle.dump([all_data,_datasetName], handle)
| 3,686 | 27.804688 | 115 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/setup.py
|
from setuptools import setup, find_packages
setup(
name='csa',
version='1.0',
packages=find_packages(),
include_package_data = True,
description='Confident Sinkhorn Allocation',
install_requires=[
"colorama>=0.4.5",
"cycler>=0.11.0",
"fonttools>=4.33.3",
"joblib>=1.1.0",
"kiwisolver>=1.4.3",
"matplotlib>=3.1.2",
"numpy>=1.21.0",
"packaging>=21.3",
"pandas>=1.2.3",
"Pillow>=9.2.0",
"pyparsing>=3.0.9",
"python-dateutil>=2.8.2",
"pytz>=2022.1",
"scikit-learn>=1.0",
"scipy>=1.7.1",
"six>=1.16.0",
"threadpoolctl>=3.1.0",
"tqdm>=4.64.0",
"xgboost>=1.6.1",
],
)
| 741 | 23.733333 | 48 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/load_multi_label_data.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 3 14:25:12 2022
@author: Vu Nguyen
"""
#import arff
from scipy.io import arff
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
import pickle
def load_yeast_multilabel(folder=''):
# temp = arff.loadarff(open('vector_data/yeast-train.arff', 'r'))
# df_train = pd.DataFrame(temp[0])
# temp = arff.loadarff(open('vector_data/yeast-test.arff', 'r'))
# df_test = pd.DataFrame(temp[0])
# X_train=df_train.values[:,:103]
# Y_train=df_train.values[:,103:].astype(int)
# X_test=df_test.values[:,:103]
# Y_test=df_test.values[:,103:].astype(int)
temp = arff.loadarff(open(folder+'yeast.arff', 'r'))
df = pd.DataFrame(temp[0])
data={}
data['target']=df.values[:,103:].astype(int)
data['data']=df.values[:,:103]
return data
def load_emotions_multilabel(folder=''):
# temp = arff.loadarff(open('vector_data/yeast-train.arff', 'r'))
# df_train = pd.DataFrame(temp[0])
# temp = arff.loadarff(open('vector_data/yeast-test.arff', 'r'))
# df_test = pd.DataFrame(temp[0])
# X_train=df_train.values[:,:103]
# Y_train=df_train.values[:,103:].astype(int)
# X_test=df_test.values[:,:103]
# Y_test=df_test.values[:,103:].astype(int)
temp = arff.loadarff(open(folder+'emotions/emotions.arff', 'r'))
df = pd.DataFrame(temp[0])
data={}
data['target']=df.values[:,-6:].astype(int)
data['data']=df.values[:,:-6]
return data
def load_genbase_multilabel(folder=''):
temp = arff.loadarff(open(folder+'/genbase/genbase.arff', 'r'))
df = pd.DataFrame(temp[0])
data={}
data['target']=df.values[:,-27:].astype(int)
data['data']=df.values[:,:-27]
ord_enc = OneHotEncoder()
data['data'] = ord_enc.fit_transform(data['data'])
data['data']=data['data'].todense()
return data
def load_corel5k_multilabel(folder):
temp = arff.loadarff(open(folder+'corel5k/corel5k.arff', 'r'))
df = pd.DataFrame(temp[0])
data={}
data['target']=df.values[:,-374:].astype(int)
data['data']=df.values[:,:-374]
# ord_enc = OneHotEncoder()
# data['data'] = ord_enc.fit_transform(data['data'])
# data['data']=data['data'].todense()
return data
#vu=load_emotions_multilabel()
#vu=load_corel5k_multilabel()
path ='./vector_data/'
all_data=[]
_datasetName=['yeast','emotions','genbase']
all_data.append(load_yeast_multilabel(path))
all_data.append(load_emotions_multilabel(path))
all_data.append(load_genbase_multilabel(path))
with open('all_data_multilabel.pickle', 'wb') as handle:
pickle.dump([all_data,_datasetName], handle)
| 2,733 | 24.082569 | 69 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/load_data.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 17 21:11:58 2021
@author: Vu Nguyen
"""
import pandas as pd
from sklearn import preprocessing
import numpy as np
from sklearn.datasets import load_iris,load_breast_cancer,load_digits
import pickle
import os
path ='./vector_data/'
#====================================================== read dataset
def load_encode_data( df ):
x=df.values[:,:-1]
y=df.values[:,-1]
label_encoder = preprocessing.LabelEncoder()
label_encoder = label_encoder.fit(y)
label_encoded_y = label_encoder.transform(y)
y=label_encoded_y
features = []
for i in range(0, x.shape[1]):
try:
x[:,i].astype(float)
features.append(x[:,i])
except:
feat_encoder = preprocessing.LabelEncoder()
feature = feat_encoder.fit_transform(x[:,i])
features.append(feature)
encoded_x = np.array(features).T
encoded_x = encoded_x.reshape(x.shape[0], x.shape[1])
x=encoded_x
return x,y
#path_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/blood-transfusion/transfusion.data' # binary
#path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data'
#path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/chess/king-rook-vs-king-pawn/kr-vs-kp.data'
#path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.data'
#df = pd.read_csv(path_url)
#data = load_iris()
#data=load_breast_cancer() # binary
#data=load_digits()
#x=data['data']
#y = data['target']
all_data = []
s_dataname = []
shapes = []
_datasetName =["cjs","hill-valley","segment_2310_20","wdbc_569_31","steel-plates-fault",
"analcatdata_authorship","synthetic_control_6c","vehicle_846_19","German-credit",
"gina_agnostic_no","madelon_no","texture","gas_drift","dna_no"
] #
for ii in range(len(_datasetName)):
temp = pd.read_csv(os.path.join(path ,_datasetName[ii]+".csv"))
if temp.shape[0] <= 30000000:
print(_datasetName[ii])
all_data.append(temp)
s_dataname.append(_datasetName[ii]+".csv")
# load data from UCI
path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data'
X,Y=load_encode_data( pd.read_csv(path_url) )
_datasetName.append("car")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/chess/king-rook-vs-king-pawn/kr-vs-kp.data'
X,Y=load_encode_data( pd.read_csv(path_url) )
_datasetName.append("kr_vs_kp")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
path_url='https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.data'
X,Y=load_encode_data( pd.read_csv(path_url) )
_datasetName.append("agaricus-lepiota")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
data=load_breast_cancer() # binary
X=data['data']
Y = data['target']
_datasetName.append("breast_cancer")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
data=load_digits()
X=data['data']
Y = data['target']
_datasetName.append("digits")
all_data.append( np.hstack((X, np.reshape(Y,(-1,1)))))
#with open('all_data_protocol4.pickle', 'wb') as handle:
with open('all_data.pickle', 'wb') as handle:
#pickle.dump([all_data,_datasetName], handle,protocol=4)
pickle.dump([all_data,_datasetName], handle)
| 3,540 | 27.556452 | 115 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/__init__.py
|
#from algorithm import *
#from utilities import *
| 49 | 24 | 24 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/utilities/utils.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 20:14:22 2022
@author: Vu Nguyen
"""
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def str2num(s, encoder):
return encoder[s]
def append_acc_early_termination(AccList, NumIter):
if len(AccList)<=NumIter:
Acc_Last_Iter=AccList[-1]
AccList = AccList + [Acc_Last_Iter]*(1+NumIter-len(AccList))
return AccList
def rename_dataset(dataset_name):
print(dataset_name)
newname=[]
if dataset_name=="madelon_no":
return "Madelon"
elif dataset_name=="synthetic_control_6c":
return "Synthetic Control"
elif dataset_name=="digits":
return "Digits"
elif dataset_name=="analcatdata_authorship":
return "Analcatdata"
elif dataset_name=="German-credit":
return "German Credit"
elif dataset_name=="segment_2310_20":
return "Segment"
elif dataset_name=="wdbc_569_31":
return "Wdbc"
elif dataset_name=="dna_no":
return "Dna"
elif dataset_name=="agaricus-lepiota":
return "Agaricus-Lepiota"
elif dataset_name=="breast_cancer":
return "Breast Cancer"
elif dataset_name=="agaricus-lepiota":
return "Agaricus-Lepiota"
elif dataset_name=="emotions":
return "Emotions"
# 18,7,6,4,2
def get_train_test_unlabeled(_datasetName,path_to_data,random_state=0): # for multi-classification
"""
path_to_data='all_data.pickle'
"""
# load the data
with open(path_to_data, 'rb') as handle:
[all_data, datasetName_list] = pickle.load(handle)
dataset_index= datasetName_list.index(_datasetName)
data=all_data[dataset_index]
#if dataset_index<14:
if _datasetName in ['segment_2310_20','wdbc_569_31','steel-plates-fault','analcatdata_authorship','synthetic_control_6c',\
'vehicle_846_19','German-credit','gina_agnostic_no','madelon_no','texture','gas_drift','dna_no']:
_dic = list(set(data.values[:, -1]))
num_labels = len(_dic)
encoder = {}
for i in range(len(_dic)):
encoder[_dic[i]] = i
# shuffle original dataset
data = data.sample(frac=1,random_state=42)
X = data.values[:, :-1]
# X = scale(X) # scale the X
scaler = StandardScaler()
X = scaler.fit_transform(X)
Y = np.array([str2num(s, encoder) for s in data.values[:, -1]])
else:
X = data[:, :-1]
Y=data[:,-1]
#if dataset_index in [9,1,16]:
if _datasetName in ['hill-valley','gina_agnostic_no','agaricus-lepiota']:
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,
random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.6, random_state=random_state)
#elif dataset_index in [17,8]:
elif _datasetName in ['German-credit','breast_cancer']:
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,
random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.8, random_state=random_state)
#elif dataset_index in [18,6,4]:
elif _datasetName in ['steel-plates-fault','synthetic_control_6c','digits']:
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,
random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.9, random_state=random_state)
#elif dataset_index in [10,15,12,14,11,13]: # label / unlabel > 15:1
elif _datasetName in ['madelon_no','texture','gas_drift','dna_no','car','kr_vs_kp']:
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,
random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.94, random_state=random_state)
#elif dataset_index in [3,5]: # label / unlabel > 15:1
elif _datasetName in ['wdbc_569_31','analcatdata_authorship']:
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,
random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.9, random_state=random_state)
#elif dataset_index in [7]:
elif _datasetName in ['vehicle_846_19']:
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,
random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.8, random_state=random_state)
#elif dataset_index in [2]:
elif _datasetName in ['segment_2310_20']:
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,
random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.7, random_state=random_state)
else:
print(_datasetName + "is not defined. please check!")
p = np.random.permutation(x_train.shape[0])
x_train, y_train = x_train[p], y_train[p]
p = np.random.permutation(x_unlabeled.shape[0])
x_unlabeled, y_unlabeled = x_unlabeled[p], y_unlabeled[p]
y_test=np.reshape(y_test,(-1,1))
y_train=np.reshape(y_train,(-1,1))
return x_train,y_train, x_test, y_test, x_unlabeled
def get_train_test_unlabeled_for_multilabel(_datasetName,path_to_data='all_data_multilabel.pickle',random_state=0): # for multi-label classification
"""
path_to_data='all_data.pickle'
"""
# load the data
with open(path_to_data, 'rb') as handle:
[all_data, datasetName_list] = pickle.load(handle)
dataset_index= datasetName_list.index(_datasetName)
data=all_data[dataset_index]
X = data['data']
Y=data['target']
if _datasetName=="emotions": # emotions dataset
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.5, random_state=random_state)
elif _datasetName=="genbase": # genbase dataset
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.7, random_state=random_state)
elif _datasetName=="yeast": # yeast dataset
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=random_state)
x_train, x_unlabeled, y_train, y_unlabeled = train_test_split(x_train, y_train,
test_size=0.7, random_state=random_state)
else:
print(_datasetName + "is not defined. please check!")
p = np.random.permutation(x_train.shape[0])
x_train, y_train = x_train[p], y_train[p]
p = np.random.permutation(x_unlabeled.shape[0])
x_unlabeled, y_unlabeled = x_unlabeled[p], y_unlabeled[p]
return x_train,y_train, x_test, y_test, x_unlabeled
| 8,280 | 39.004831 | 148 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/utilities/__init__.py
| 0 | 0 | 0 |
py
|
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/run_experiments/run_ups.py
|
import sys
#sys.path.insert(0,'..')
sys.path.append('..')
from tqdm import tqdm
import numpy as np
import os
import argparse
import logging
import pickle
from algorithm.pseudo_labeling import Pseudo_Labeling
#from algorithm.flexmatch import FlexMatch
from algorithm.ups import UPS
#from algorithm.csa import CSA
from utilities.utils import get_train_test_unlabeled,append_acc_early_termination
from utilities.utils import get_train_test_unlabeled_for_multilabel
import warnings
warnings.filterwarnings('ignore')
def run_experiments(args, save_dir):
out_file = args.output_filename
numTrials=args.numTrials
numIters=args.numIters
verbose=args.verbose
dataset_name=args.dataset_name
num_XGB_models=args.numXGBs
upper_threshold=args.upper_threshold
lower_threshold=args.lower_threshold
IsMultiLabel=False # by default
# in our list of datasets: ['yeast','emotions'] are multi-label classification dataset
# the rest are multiclassification
if dataset_name in ['yeast','emotions']: # multi-label
IsMultiLabel=True
accuracy = []
for tt in tqdm(range(numTrials)):
np.random.seed(tt)
# load the data
if IsMultiLabel==False: # multiclassification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled(dataset_name,path_to_data='../all_data.pickle',random_state=tt)
else: # multi-label classification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled_for_multilabel(dataset_name,path_to_data='../all_data_multilabel.pickle',random_state=tt)
pseudo_labeller = UPS(x_unlabeled,x_test,y_test,
num_iters=numIters,
upper_threshold=upper_threshold,
lower_threshold=lower_threshold,
num_XGB_models=num_XGB_models,
verbose = 0,
IsMultiLabel=IsMultiLabel
)
pseudo_labeller.fit(x_train, y_train)
accuracy.append( append_acc_early_termination(pseudo_labeller.test_acc,numIters) )
# print and pickle results
filename = os.path.join(save_dir, '{}_{}_{}_M_{}_numIters_{}_numTrials_{}_up_thresh_{}_low_thresh_{}.pkl'.format(out_file, pseudo_labeller.algorithm_name , dataset_name,\
num_XGB_models,numIters,numTrials,upper_threshold,lower_threshold))
print('\n* Trial summary: avgerage of accuracy per Pseudo iterations')
print( np.mean( np.asarray(accuracy),axis=0))
print('\n* Saving to file {}'.format(filename))
with open(filename, 'wb') as f:
pickle.dump([accuracy], f)
f.close()
def main(args):
# make save directory
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_path = save_dir + '/' + '/'
if not os.path.exists(save_path):
os.mkdir(save_path)
# set up logging
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
run_experiments(args, save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Args for UPS experiments')
parser.add_argument('--numIters', type=int, default=5, help='number of Pseudo Iterations')
parser.add_argument('--numTrials', type=int, default=20, help ='number of Trials (Repeated Experiments)' )
parser.add_argument('--numXGBs', type=int, default=10, help ='number of XGB models, M=?' )
parser.add_argument('--upper_threshold', type=float, default=0.8, help ='upper threshold in pseudo-labeling' )
parser.add_argument('--lower_threshold', type=float, default=0.2, help ='lower threshold for uncertainty score' )
parser.add_argument('--dataset_name', type=str, default='synthetic_control_6c', help='segment_2310_20 | wdbc_569_31 | analcatdata_authorship | synthetic_control_6c | \
German-credit | madelon_no | agaricus-lepiota | breast_cancer | digits | yeast | emotions')
parser.add_argument('--verbose', type=str, default='No', help='verbose Yes or No')
parser.add_argument('--output_filename', type=str, default='', help='name of output files')
parser.add_argument('--save_dir', type=str, default='results_output', help='name of save directory')
args = parser.parse_args()
main(args)
| 4,544 | 38.181034 | 175 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/run_experiments/plot_results.py
|
import sys
import os
sys.path.append('../')
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import matplotlib.pyplot as plt
#from algorithm.pseudo_labeling import Pseudo_Labeling
from algorithm.pseudo_labeling import Pseudo_Labeling
from algorithm.flexmatch import FlexMatch
from algorithm.ups import UPS
from algorithm.csa import CSA
from utilities.utils import get_train_test_unlabeled,get_train_test_unlabeled_for_multilabel
import pickle
# load the data
with open('all_data.pickle', 'rb') as handle:
[all_data, _datasetName] = pickle.load(handle)
color_list=['k','g','c','b','r','y']
marker_list=['*','^','x','s','o','>']
linestyle_list=['--',':','-.','-']
save_dir = 'results_output' # path to the folder store the results
out_file=''
numTrials=20 # number of repeated trials
numIters=5 # number of used pseudo-iterations
#====================================================================
# list of datasets
#segment_2310_20 | wdbc_569_31 | analcatdata_authorship | synthetic_control_6c | \
#German-credit | madelon_no | agaricus-lepiota | breast_cancer | digits | emotions | yeast
dataset_name='segment_2310_20'
list_algorithms=['Pseudo_Labeling','FlexMatch','UPS','SLA','CSA'] # list of algorithms to be plotted
# the following parameters to be used to load the correct paths
confidence='ttest' # for CSA
upper_threshold=0.8 # for Pseudo_Labeling,FlexMatch
low_threshold=0.2 # for UPS
num_XGB_models=10 # for CSA and UPS
IsMultiLabel=False # by default
if dataset_name in ['yeast','emotions']: # multi-label
IsMultiLabel=True
# load the data
if IsMultiLabel==False: # multiclassification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled(dataset_name,path_to_data='all_data.pickle',random_state=0)
else: # multi-label classification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled_for_multilabel(dataset_name,path_to_data='all_data_multilabel.pickle',random_state=0)
confidence='variance' # for CSA
fig, ax1 = plt.subplots(figsize=(6,3.5))
ax1.set_ylabel("Test Accuracy",fontsize=14)
ax1.set_xlabel("Pseudo-label Iteration",fontsize=14)
ax1.tick_params(axis='y')
#Accuracy_List=[]
for idx,algo_name in enumerate(list_algorithms):
if algo_name=='CSA':
filename = os.path.join(save_dir, '{}_{}_{}_{}_M_{}_numIters_{}_numTrials_{}.pkl'.format(out_file, algo_name \
,confidence, dataset_name,num_XGB_models,numIters,numTrials))
elif algo_name=='SLA':
filename = os.path.join(save_dir, '{}_{}_{}_M_{}_numIters_{}_numTrials_{}.pkl'.format(out_file, algo_name \
, dataset_name,num_XGB_models,numIters,numTrials))
elif algo_name=='UPS':
filename = os.path.join(save_dir, '{}_{}_{}_M_{}_numIters_{}_numTrials_{}_up_thresh_{}_low_thresh_{}.pkl'.format(out_file,\
algo_name , dataset_name,num_XGB_models,numIters,numTrials,upper_threshold,low_threshold))
else:
filename = os.path.join(save_dir, '{}_{}_{}_numIters_{}_numTrials_{}_threshold_{}.pkl'.format(out_file, algo_name \
, dataset_name,numIters,numTrials,upper_threshold))
with open(filename, 'rb') as handle:
accuracy = pickle.load(handle)
#Accuracy_List.append(accuracy)
accuracy = np.asarray(accuracy)
accuracy=np.reshape(accuracy,(numTrials,numIters+1))
mean,std= np.mean(accuracy,axis=0),np.std(accuracy,axis=0)
x_axis=np.arange(len(mean))
if idx==0:
# supervised learning result is the first accuracy score in the list
supervised_learning_result=[ mean[0] ]*len(x_axis)
ax1.plot( np.arange(len(mean)),supervised_learning_result,'m:',linewidth=4,label="Supervised Learning")
fmt=color_list[idx%len(color_list)]+marker_list[idx%len(marker_list)]+linestyle_list[idx%len(linestyle_list)]
ax1.errorbar( np.arange(len(mean)),mean,yerr=0.1*std,fmt=fmt,elinewidth=4,label=algo_name)
number_class=len(np.unique(y_train))
ax1.set_title(dataset_name, fontsize=20)
plt.grid()
lgd=ax1.legend(loc='upper center',fancybox=True,bbox_to_anchor=(0.5, -0.2),ncol=3, fontsize=12)
#strFile="figs/{}.pdf".format(dataset_name)
strFile="figs/{}.png".format(dataset_name)
fig.savefig(strFile,bbox_inches='tight')
print("====Saved the plot into " +strFile)
| 4,373 | 32.906977 | 159 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/run_experiments/run_pseudo_labeling.py
|
import sys
#sys.path.insert(0,'..')
sys.path.append('..')
import numpy as np
import os
import argparse
import logging
import pickle
from tqdm import tqdm
from algorithm.pseudo_labeling import Pseudo_Labeling
#from algorithm.flexmatch import FlexMatch
#from algorithm.ups import UPS
#from algorithm.csa import CSA
from utilities.utils import get_train_test_unlabeled,append_acc_early_termination
from utilities.utils import get_train_test_unlabeled_for_multilabel
import warnings
warnings.filterwarnings('ignore')
def run_experiments(args, save_dir):
out_file = args.output_filename
numTrials=args.numTrials
numIters=args.numIters
upper_threshold=args.upper_threshold
verbose=args.verbose
dataset_name=args.dataset_name
IsMultiLabel=False # by default
# in our list of datasets: ['yeast','emotions'] are multi-label classification dataset
# the rest are multiclassification
if dataset_name in ['yeast','emotions']: # multi-label
IsMultiLabel=True
accuracy = []
# run the experiments over multiple Trials, then average the results
for tt in tqdm(range(numTrials)):
np.random.seed(tt)
# load the data ====================================================================
if IsMultiLabel==False: # multiclassification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled(dataset_name,path_to_data='../all_data.pickle',random_state=tt)
else: # multi-label classification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled_for_multilabel(dataset_name,path_to_data='../all_data_multilabel.pickle',random_state=tt)
pseudo_labeller = Pseudo_Labeling(x_unlabeled,x_test,y_test,
num_iters=numIters,
upper_threshold=upper_threshold,
verbose = False,
IsMultiLabel=IsMultiLabel
)
pseudo_labeller.fit(x_train, y_train)
accuracy.append( append_acc_early_termination(pseudo_labeller.test_acc,numIters) )
# print and pickle results
filename = os.path.join(save_dir, '{}_{}_{}_numIters_{}_numTrials_{}_threshold_{}.pkl'.format(out_file, pseudo_labeller.algorithm_name,dataset_name,\
numIters,numTrials,upper_threshold))
print('\n* Trial summary: avgerage of accuracy per Pseudo iterations')
print( np.mean( np.asarray(accuracy),axis=0))
print('\n* Saving to file {}'.format(filename))
with open(filename, 'wb') as f:
pickle.dump([accuracy], f)
f.close()
def main(args):
# make save directory
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_path = save_dir + '/' + '/'
if not os.path.exists(save_path):
os.mkdir(save_path)
# set up logging
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
run_experiments(args, save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Args for Pseudo_Labeling experiments')
parser.add_argument('--numIters', type=int, default=5, help='number of Pseudo Iterations')
parser.add_argument('--numTrials', type=int, default=20, help ='number of Trials (Repeated Experiments)' )
parser.add_argument('--upper_threshold', type=float, default=0.8, help ='threshold in pseudo-labeling' )
parser.add_argument('--dataset_name', type=str, default='synthetic_control_6c', help='segment_2310_20 | wdbc_569_31 | analcatdata_authorship | synthetic_control_6c | \
German-credit | madelon_no | agaricus-lepiota | breast_cancer | digits | yeast | emotions')
parser.add_argument('--verbose', type=str, default='No', help='verbose Yes or No')
parser.add_argument('--output_filename', type=str, default='', help='name of output files')
parser.add_argument('--save_dir', type=str, default='results_output', help='name of save directory')
args = parser.parse_args()
main(args)
| 4,272 | 36.814159 | 171 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/run_experiments/run_sla.py
|
import sys
import os
sys.path.insert(0,'..')
sys.path.append("../algorithm")
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import argparse
import logging
import pickle
from tqdm import tqdm
from algorithm.pseudo_labeling import Pseudo_Labeling
#from confident_sinkhorn_allocation.algorithm.flexmatch import FlexMatch
#from confident_sinkhorn_allocation.algorithm.ups import UPS
from algorithm.csa import CSA
from utilities.utils import get_train_test_unlabeled,append_acc_early_termination
from utilities.utils import get_train_test_unlabeled_for_multilabel
import warnings
warnings.filterwarnings('ignore')
def run_experiments(args, save_dir):
out_file = args.output_filename
numTrials=args.numTrials
numIters=args.numIters
confidence_choice = args.confidence_choice
verbose=args.verbose
dataset_name=args.dataset_name
num_XGB_models=args.numXGBs
IsMultiLabel=False # by default
# in our list of datasets: ['yeast','emotions'] are multi-label classification dataset
# the rest are multiclassification
if dataset_name in ['yeast','emotions']: # multi-label
IsMultiLabel=True
accuracy = []
for tt in tqdm(range(numTrials)):
np.random.seed(tt)
# load the data
if IsMultiLabel==False: # multiclassification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled(dataset_name,path_to_data='../all_data.pickle',random_state=tt)
else: # multi-label classification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled_for_multilabel(dataset_name,path_to_data='../all_data_multilabel.pickle',random_state=tt)
# SLA = CSA where confidence_choice=None
pseudo_labeller = CSA(x_unlabeled,x_test,y_test,
num_iters=numIters,
confidence_choice=confidence_choice,#confidence_choice=None
num_XGB_models=num_XGB_models,
verbose = verbose,
IsMultiLabel=IsMultiLabel
)
pseudo_labeller.fit(x_train, y_train)
accuracy.append( append_acc_early_termination(pseudo_labeller.test_acc,numIters) )
# print and pickle results
filename = os.path.join(save_dir, '{}_{}_{}_M_{}_numIters_{}_numTrials_{}.pkl'.format(out_file, pseudo_labeller.algorithm_name , dataset_name,\
num_XGB_models,numIters,numTrials))
print('\n* Trial summary: avgerage of accuracy per Pseudo iterations')
print( np.mean( np.asarray(accuracy),axis=0))
print('\n* Saving to file {}'.format(filename))
with open(filename, 'wb') as f:
pickle.dump([accuracy], f)
f.close()
def main(args):
# make save directory
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_path = save_dir + '/' + '/'
if not os.path.exists(save_path):
os.mkdir(save_path)
# set up logging
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
run_experiments(args, save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Args for CSA experiments')
parser.add_argument('--numIters', type=int, default=5, help='number of Pseudo Iterations')
parser.add_argument('--numTrials', type=int, default=20, help ='number of Trials (Repeated Experiments)' )
parser.add_argument('--numXGBs', type=int, default=10, help ='number of XGB models, M=?' )
parser.add_argument('--confidence_choice', type=str, default=None, help ='confidence choices: ttest | variance | entropy | None' )
parser.add_argument('--dataset_name', type=str, default='synthetic_control_6c', help='segment_2310_20 | wdbc_569_31 | analcatdata_authorship | synthetic_control_6c | \
German-credit | madelon_no | agaricus-lepiota | breast_cancer | digits | yeast | emotions')
parser.add_argument('--verbose', type=str, default='True', help='verbose True or False')
parser.add_argument('--output_filename', type=str, default='', help='name of output files')
parser.add_argument('--save_dir', type=str, default='results_output', help='name of save directory')
args = parser.parse_args()
main(args)
| 4,557 | 38.293103 | 171 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/run_experiments/run_flexmatch.py
|
import sys
sys.path.append('..')
import numpy as np
import os
import argparse
import logging
import pickle
from tqdm import tqdm
from algorithm.pseudo_labeling import Pseudo_Labeling
from algorithm.flexmatch import FlexMatch
#from algorithm.ups import UPS
#from algorithm.csa import CSA
from utilities.utils import get_train_test_unlabeled,append_acc_early_termination
from utilities.utils import get_train_test_unlabeled_for_multilabel
import warnings
warnings.filterwarnings('ignore')
def run_experiments(args, save_dir):
out_file = args.output_filename
numTrials=args.numTrials
numIters=args.numIters
upper_threshold=args.upper_threshold
verbose=args.verbose
dataset_name=args.dataset_name
IsMultiLabel=False # by default
# in our list of datasets: ['yeast','emotions'] are multi-label classification dataset
# the rest datasets are multiclassification
if dataset_name in ['yeast','emotions']: # multi-label
IsMultiLabel=True
accuracy = []
for tt in tqdm(range(numTrials)):
np.random.seed(tt)
# load the data
if IsMultiLabel==False: # multiclassification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled(dataset_name,path_to_data='../all_data.pickle',random_state=tt)
else: # multi-label classification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled_for_multilabel(dataset_name,path_to_data='../all_data_multilabel.pickle',random_state=tt)
pseudo_labeller = FlexMatch(x_unlabeled,x_test,y_test,
num_iters=numIters,
upper_threshold=upper_threshold,
verbose = False,
IsMultiLabel=IsMultiLabel
)
pseudo_labeller.fit(x_train, y_train)
accuracy.append( append_acc_early_termination(pseudo_labeller.test_acc,numIters) )
# print and pickle results
filename = os.path.join(save_dir, '{}_{}_{}_numIters_{}_numTrials_{}_threshold_{}.pkl'.format(out_file, pseudo_labeller.algorithm_name,dataset_name,\
numIters,numTrials,upper_threshold))
print('\n* Trial summary: avgerage of accuracy per Pseudo iterations')
print( np.mean( np.asarray(accuracy),axis=0))
print('\n* Saving to file {}'.format(filename))
with open(filename, 'wb') as f:
pickle.dump([accuracy], f)
f.close()
def main(args):
# make save directory
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_path = save_dir + '/' + '/'
if not os.path.exists(save_path):
os.mkdir(save_path)
# set up logging
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
run_experiments(args, save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Args for FlexMatch experiments')
parser.add_argument('--numIters', type=int, default=5, help='number of Pseudo Iterations')
parser.add_argument('--numTrials', type=int, default=20, help ='number of Trials (Repeated Experiments)' )
parser.add_argument('--upper_threshold', type=float, default=0.8, help ='threshold in pseudo-labeling' )
parser.add_argument('--dataset_name', type=str, default='synthetic_control_6c', help='segment_2310_20 | wdbc_569_31 | analcatdata_authorship | synthetic_control_6c | \
German-credit | madelon_no | agaricus-lepiota | breast_cancer | digits | yeast | emotions')
parser.add_argument('--verbose', type=str, default='No', help='verbose Yes or No')
parser.add_argument('--output_filename', type=str, default='', help='name of output files')
parser.add_argument('--save_dir', type=str, default='results_output', help='name of save directory')
args = parser.parse_args()
main(args)
| 4,104 | 37.364486 | 171 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/run_experiments/run_csa.py
|
import sys
import os
sys.path.insert(0,'..')
#sys.path.append('..')
sys.path.append("../algorithm")
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import os
import argparse
import logging
import pickle
from tqdm import tqdm
from algorithm.pseudo_labeling import Pseudo_Labeling
#from confident_sinkhorn_allocation.algorithm.flexmatch import FlexMatch
#from confident_sinkhorn_allocation.algorithm.ups import UPS
from algorithm.csa import CSA
from utilities.utils import get_train_test_unlabeled,append_acc_early_termination
from utilities.utils import get_train_test_unlabeled_for_multilabel
import warnings
warnings.filterwarnings('ignore')
def run_experiments(args, save_dir):
out_file = args.output_filename
numTrials=args.numTrials
numIters=args.numIters
confidence_choice = args.confidence_choice
verbose=args.verbose
dataset_name=args.dataset_name
num_XGB_models=args.numXGBs
IsMultiLabel=False # by default
# in our list of datasets: ['yeast','emotions'] are multi-label classification dataset
# the rest are multiclassification
if dataset_name in ['yeast','emotions']: # multi-label
IsMultiLabel=True
accuracy = []
for tt in tqdm(range(numTrials)):
np.random.seed(tt)
# load the data
if IsMultiLabel==False: # multiclassification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled(dataset_name,path_to_data='../all_data.pickle',random_state=tt)
else: # multi-label classification
x_train,y_train, x_test, y_test, x_unlabeled=get_train_test_unlabeled_for_multilabel(dataset_name,path_to_data='../all_data_multilabel.pickle',random_state=tt)
pseudo_labeller = CSA(x_unlabeled,x_test,y_test,
num_iters=numIters,
confidence_choice=confidence_choice,
num_XGB_models=num_XGB_models,
verbose = verbose,
IsMultiLabel=IsMultiLabel
)
pseudo_labeller.fit(x_train, y_train)
accuracy.append( append_acc_early_termination(pseudo_labeller.test_acc,numIters) )
# print and pickle results
filename = os.path.join(save_dir, '{}_{}_{}_M_{}_numIters_{}_numTrials_{}.pkl'.format(out_file, pseudo_labeller.algorithm_name , dataset_name,\
num_XGB_models,numIters,numTrials))
print('\n* Trial summary: avgerage of accuracy per Pseudo iterations')
print( np.mean( np.asarray(accuracy),axis=0))
print('\n* Saving to file {}'.format(filename))
with open(filename, 'wb') as f:
pickle.dump([accuracy], f)
f.close()
def main(args):
# make save directory
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_path = save_dir + '/' + '/'
if not os.path.exists(save_path):
os.mkdir(save_path)
# set up logging
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
run_experiments(args, save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Args for CSA experiments')
parser.add_argument('--numIters', type=int, default=5, help='number of Pseudo Iterations')
parser.add_argument('--numTrials', type=int, default=20, help ='number of Trials (Repeated Experiments)' )
parser.add_argument('--numXGBs', type=int, default=10, help ='number of XGB models, M=?' )
parser.add_argument('--confidence_choice', type=str, default='ttest', help ='confidence choices: ttest | variance | entropy | None' )
parser.add_argument('--dataset_name', type=str, default='synthetic_control_6c', help='segment_2310_20 | wdbc_569_31 | analcatdata_authorship | synthetic_control_6c | \
German-credit | madelon_no | agaricus-lepiota | breast_cancer | digits | yeast | emotions')
parser.add_argument('--verbose', type=str, default='True', help='verbose True or False')
parser.add_argument('--output_filename', type=str, default='', help='name of output files')
parser.add_argument('--save_dir', type=str, default='results_output', help='name of save directory')
args = parser.parse_args()
main(args)
| 4,521 | 37.649573 | 171 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/algorithm/flexmatch.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 14:19:22 2021
@author: Vu Nguyen
"""
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
from scipy import stats
from .pseudo_labeling import Pseudo_Labeling
# FlexMatch Strategy for Pseudo-Labeling =======================================================================
# Zhang, Bowen, Yidong Wang, Wenxin Hou, Hao Wu, Jindong Wang, Manabu Okumura, and Takahiro Shinozaki.
# "Flexmatch: Boosting semi-supervised learning with curriculum pseudo labeling." NeurIPS 2021
class FlexMatch(Pseudo_Labeling):
# adaptive thresholding
def __init__(self, unlabelled_data, x_test,y_test,num_iters=5,upper_threshold = 0.9, verbose = False,IsMultiLabel=False):
"""
unlabelled_data : [N x d] where N is the number of unlabeled data, d is the feature dimension
x_test :[N_test x d]
y_test :[N_test x 1] for multiclassification or [N_test x K] for multilabel classification
num_iters : number of pseudo-iterations, recommended = 5 as in the paper
upper_threshold : the upper threshold used for pseudo-labeling, e.g., we assign label if the prob > 0.8
fraction_allocation : the faction of label allocation, if fraction_allocation=1, we assign labels to 100% of unlabeled data
lower_threshold : lower threshold, used for UPS
num_XGB_models : number of XGB models used for UPS and CSA, recommended = 10
verbose : verbose
IsMultiLabel : False => Multiclassification or True => Multilabel classification
"""
super().__init__( unlabelled_data, x_test,y_test,num_iters=num_iters,upper_threshold=upper_threshold,verbose=verbose,IsMultiLabel=IsMultiLabel)
self.algorithm_name="FlexMatch"
def predict(self, X):
super().predict(X)
def predict_proba(self, X):
super().predict_proba(X)
def evaluate_performance(self):
super().evaluate_performance()
def get_max_pseudo_point(self,class_freq,current_iter):
return super().get_max_pseudo_point(class_freq,current_iter)
def label_assignment_and_post_processing_FlexMatch(self, pseudo_labels_prob,X,y, current_iter=0,upper_threshold=None):
"""
Given the threshold, perform label assignments and augmentation
This function is particular for FlexMatch
Args:
pseudo_labels_prob: predictive prob [N x K] where N is #unlabels, K is #class
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X = augmented_X + X
Augmented y = augmented_y + Y
"""
if self.IsMultiLabel==False:
#go over each row (data point), only keep the argmax prob
# because we only allow a single data point to a single class
max_prob_matrix=self.get_prob_at_max_class(pseudo_labels_prob)
else:
# we dont need to get prob at max class for multi-label
# because a single data point can be assigned to multiple classes
max_prob_matrix=pseudo_labels_prob
# for each class, count the number of points > threshold
# this is the technique used in FlexMatch
countVector=[0]*self.nClass
for cc in range(self.nClass):
temp=np.where(max_prob_matrix[:,cc]>self.upper_threshold)[0]
countVector[cc]= len( temp )
countVector_normalized=np.asarray(countVector)/np.max(countVector)
if upper_threshold is None:
upper_threshold=self.upper_threshold
# assign labels if the prob > threshold ========================================================
assigned_pseudo_labels=np.zeros((max_prob_matrix.shape[0],self.nClass)).astype(int)
MaxPseudoPoint=[0]*self.nClass
for cc in range(self.nClass): # loop over each class
# note that in FlexMatch, the upper_threshold is updated below before using as the threshold
flex_class_upper_thresh=countVector_normalized[cc]*self.upper_threshold
# obtain the maximum number of points can be assigned per class
MaxPseudoPoint[cc]=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
idx_sorted = np.argsort( max_prob_matrix[:,cc])[::-1] # decreasing
temp_idx = np.where(max_prob_matrix[idx_sorted,cc] > flex_class_upper_thresh )[0]
labels_satisfied_threshold=idx_sorted[temp_idx]
# only select upto MaxPseudoPoint[cc] points
labels_satisfied_threshold = labels_satisfied_threshold[:MaxPseudoPoint[cc]]
assigned_pseudo_labels[labels_satisfied_threshold, cc]=1
if self.verbose:
print("MaxPseudoPoint",MaxPseudoPoint)
# post-processing and augmenting the data into X and Y ==========================================
return self.post_processing_and_augmentation(assigned_pseudo_labels,X,y)
def fit(self, X, y):
"""
main algorithm to perform pseudo labelling
Args:
X: train features [N x d]
y: train targets [N x 1]
Output:
we record the test_accuracy a vector of test accuracy per pseudo-iteration
"""
print("=====",self.algorithm_name)
self.nClass=self.get_number_of_labels(y)
self.label_frequency=self.estimate_label_frequency(y)
for current_iter in (tqdm(range(self.num_iters)) if self.verbose else range(self.num_iters)):
# Fit to data
self.model.fit(X, y)
self.evaluate_performance()
# estimate prob using unlabelled data
pseudo_labels_prob=self.get_predictive_prob_for_unlabelled_data(self.model)
#go over each row (data point), only keep the argmax prob
# max_prob=[0]*num_points
# max_prob_matrix=np.zeros((pseudo_labels_prob.shape))
# for ii in range(num_points):
# idxMax=np.argmax(pseudo_labels_prob[ii,:])
# max_prob_matrix[ii,idxMax]=pseudo_labels_prob[ii,idxMax]
# max_prob[ii]=pseudo_labels_prob[ii,idxMax]
# for each class, count the number of points > threshold
# countVector=[0]*self.nClass
# for cc in range(self.nClass):
# idx_above_threshold=np.where(max_prob_matrix[:,cc]>self.upper_threshold)[0]
# countVector[cc]= len( idx_above_threshold ) # count number of unlabeled data above the threshold
# countVector_normalized=np.asarray(countVector)/np.max(countVector)
# if self.verbose:
# print("class threshold:", np.round(countVector_normalized*self.upper_threshold,2))
X,y=self.label_assignment_and_post_processing_FlexMatch( pseudo_labels_prob,X,y, current_iter=0)
# augmented_idx=[]
# for cc in range(self.nClass):
# # compute the adaptive threshold for each class
# class_upper_thresh=countVector_normalized[cc]*self.upper_threshold
# MaxPseudoPoint=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
# idx_sorted = np.argsort( max_prob_matrix[:,cc])[::-1][:MaxPseudoPoint] # decreasing
# idx_above_threshold = np.where(max_prob_matrix[idx_sorted,cc] > class_upper_thresh)[0]
# labels_within_threshold= idx_sorted[idx_above_threshold]
# augmented_idx += labels_within_threshold.tolist()
# X,y = self.post_processing(cc,labels_within_threshold,X,y)
if self.verbose:
print("#augmented:", self.num_augmented_per_class, " len of training data ", len(y))
if np.sum(self.num_augmented_per_class)==0: # no data point is augmented
return #self.test_acc
# remove the selected data from unlabelled data
#self.unlabelled_data = np.delete(self.unlabelled_data, np.unique(augmented_idx), 0)
# evaluate_performance at the last iteration for reporting purpose
self.model.fit(X, y)
self.evaluate_performance()
| 8,767 | 42.405941 | 151 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/algorithm/pseudo_labeling.py
|
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from sklearn.multioutput import MultiOutputClassifier
import copy
import sklearn
class Pseudo_Labeling(object):
# implementation of the master class for pseudo-labeling
# this class will be inherited across other subclasses
def __init__(self, unlabelled_data, x_test,y_test, num_iters=5,upper_threshold = 0.8, \
fraction_allocation=1,lower_threshold = None,num_XGB_models=0, \
verbose = False,IsMultiLabel=False):
"""
unlabelled_data : [N x d] where N is the number of unlabeled data, d is the feature dimension
x_test :[N_test x d]
y_test :[N_test x 1] for multiclassification or [N_test x K] for multilabel classification
num_iters : number of pseudo-iterations, recommended = 5 as in the paper
upper_threshold : the upper threshold used for pseudo-labeling, e.g., we assign label if the prob > 0.8
fraction_allocation : the faction of label allocation, if fraction_allocation=1, we assign labels to 100% of unlabeled data
lower_threshold : lower threshold, used for UPS
num_XGB_models : number of XGB models used for UPS and CSA, recommended = 10
verbose : verbose
IsMultiLabel : False => Multiclassification or True => Multilabel classification
"""
self.IsMultiLabel=False
self.algorithm_name="Pseudo_Labeling"
self.x_test=x_test
self.y_test=y_test
self.IsMultiLabel=IsMultiLabel
# for house keeping and reporting purpose
self.len_unlabels=[]
self.len_accepted_ttest=[]
self.len_selected=[]
self.num_augmented_per_class=[]
# this is the XGBoost model for multi-class classification
param = {}
param['booster'] = 'gbtree'
param['objective'] = 'binary:logistic'
param['verbosity'] = 0
param['silent'] = 1
param['seed'] = 0
# create XGBoost instance with default hyper-parameters
#xgb = XGBClassifier(**param,use_label_encoder=False)
xgb = self.get_XGB_model(param)
self.model = copy.copy(xgb)
self.unlabelled_data = unlabelled_data # this is a temporary unlabelled data changing in each iteration
self.verbose = verbose
self.upper_threshold = upper_threshold
self.num_iters=num_iters
if lower_threshold is not None:
self.lower_threshold = lower_threshold # this lower threshold is used for UPS algorithm, not the vanilla Pseudo-labeling
# allow the pseudo-data is repeated, e.g., without removing them after each iteration
# create a list of all the indices
self.unlabelled_indices = list(range(unlabelled_data.shape[0]))
self.selected_unlabelled_index=[]
if self.verbose:
print("no of unlabelled data:",unlabelled_data.shape[0], "\t no of test data:",x_test.shape[0])
# Shuffle the indices
np.random.shuffle(self.unlabelled_indices)
self.test_acc=[]
self.FractionAllocatedLabel=fraction_allocation # we will allocate labels to 100% of the unlabeled dataset
self.num_XGB_models=num_XGB_models # this is the parameter M in our paper
if num_XGB_models>1: # will be used for CSA and UPS
# for uncertainty estimation
# generate multiple models
params = { 'max_depth': np.arange(3, 20).astype(int),
'learning_rate': [0.01, 0.1, 0.2, 0.3],
'subsample': np.arange(0.5, 1.0, 0.05),
'colsample_bytree': np.arange(0.4, 1.0, 0.05),
'colsample_bylevel': np.arange(0.4, 1.0, 0.05),
'n_estimators': [100, 200, 300, 500, 600, 700, 1000]}
self.XGBmodels_list=[0]*self.num_XGB_models
param_list=[0]*self.num_XGB_models
for tt in range(self.num_XGB_models):
param_list[tt]={}
for key in params.keys():
mychoice=np.random.choice(params[key])
param_list[tt][key]=mychoice
param_list[tt]['verbosity'] = 0
param_list[tt]['silent'] = 1
param_list[tt]['seed'] = tt
#self.XGBmodels_list[tt] = XGBClassifier(**param_list[tt],use_label_encoder=False)
self.XGBmodels_list[tt] = self.get_XGB_model(param_list[tt])
def get_XGB_model(self,param):
"""
we create the XGB model depending on multiclass or multi-label setting
Args:
param: a predefined hyperparameter for XGBmodel
Output:
a single XGBClassifier for multiclass
or
a single MultiOutputClassifier for multilabel
"""
if self.IsMultiLabel==False:
return XGBClassifier(**param,use_label_encoder=False)
else:
return MultiOutputClassifier(XGBClassifier(**param,use_label_encoder=False))
def get_predictive_prob_for_unlabelled_data(self, model):
"""
Compute the predictive probability within [0,1] for unlabelled data given a single XGB model
Args:
model: a single XGBmodel
Output:
predictive probability matrix [N x K]
"""
pseudo_labels_prob = model.predict_proba(self.unlabelled_data)
# number of unlabeled data
if self.IsMultiLabel==True:
pseudo_labels_prob=np.asarray(pseudo_labels_prob).T
pseudo_labels_prob=pseudo_labels_prob[1,:,:]
return pseudo_labels_prob
def estimate_label_frequency(self, y):
"""
estimate the label frequency empirically from the initial labeled data
Args:
y: label vector or matrix (multilabel)
Output:
Given K the number of labels, it returns a vector of label frequency [1 x K]
"""
if self.IsMultiLabel==False:
if len(self.num_augmented_per_class)>0:
unique, label_frequency = np.unique( y[np.sum(self.num_augmented_per_class):], return_counts=True)
else:
unique, label_frequency = np.unique( y, return_counts=True)
else:
label_frequency = np.sum( y, axis=0)
if self.verbose:
print("==label_frequency without adjustment", np.round(label_frequency,3))
# smooth the label frequency if the ratio between the max class / min class is significant >5
# this smoothing is the implementation trick to prevent biased estimation given limited training data
ratio=np.max(label_frequency)/np.min(label_frequency)
if ratio>5:
label_frequency=label_frequency/np.sum(label_frequency)+np.ones( self.nClass )*1.0/self.nClass
return label_frequency/np.sum(label_frequency)
def evaluate_performance(self):
"""
evaluate_performance the classification performance
Store the result into: self.test_acc which is the accuracy for multiclassification \
or the precision for multilabel classification
"""
y_test_pred = self.model.predict(self.x_test)
if self.IsMultiLabel==False:
test_acc= np.round( accuracy_score(y_test_pred, self.y_test)*100, 2)# round to 2 digits xx.yy %
if self.verbose:
print('+++Test Acc: {:.2f}%'.format(test_acc))
self.test_acc +=[test_acc]
else: # multi-label classification
# Precision
prec=sklearn.metrics.precision_score(self.y_test, y_test_pred,average='samples')*100
prec=np.round(prec,2) # round to 2 digits xx.yy %
self.test_acc +=[prec] # precision score
if self.verbose:
print('+++Test Acc: {:.2f}%'.format(prec))
def get_prob_at_max_class(self,pseudo_labels_prob):
"""
Given the 2d probability matrix [N x K], we get the probability at the maximum index
Args:
pseudo_labels_prob: 2d probability matrix [N x K]
Returns:
max_prob_matrix: probability at argmax class [N x 1]
"""
max_prob_matrix=np.zeros((pseudo_labels_prob.shape))
for ii in range(pseudo_labels_prob.shape[0]): # loop over each data point
idxMax=np.argmax(pseudo_labels_prob[ii,:]) # find the highest score class
max_prob_matrix[ii,idxMax]=pseudo_labels_prob[ii,idxMax]
return max_prob_matrix
def post_processing_and_augmentation(self,assigned_pseudo_labels,X,y):
"""
after assigning the pseudo labels in the previous step, we post-process and augment them into X and y
Args:
assigned_pseudo_labels: [N x K] matrix where N is the #unlabels and K is the #class
assigned_pseudo_labels==0 indicates no assignment
assigned_pseudo_labels==1 indicates assignment.
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X
Augmented y
"""
sum_by_cols=np.sum(assigned_pseudo_labels,axis=1)
labels_satisfied_threshold = np.where(sum_by_cols>0)[0]
self.num_augmented_per_class.append( np.sum(assigned_pseudo_labels,axis=0).astype(int) )
if len(labels_satisfied_threshold) == 0: # no point is selected
return X,y
self.selected_unlabelled_index += labels_satisfied_threshold.tolist()
# augment the assigned labels to X and y ==============================================
X = np.vstack((self.unlabelled_data[labels_satisfied_threshold,:], X))
if self.IsMultiLabel==False: # y is [N x 1] matrix
# allow a single data point can be added into multiple
y = np.vstack(( np.argmax( assigned_pseudo_labels[labels_satisfied_threshold,:],axis=1).reshape(-1,1), np.array(y).reshape(-1,1)))
else: # y is [N x L] matrix
y = np.vstack((assigned_pseudo_labels[labels_satisfied_threshold,:], np.array(y)))
if "CSA" in self.algorithm_name: # book keeping
self.len_unlabels.append( len(self.unlabelled_data) )
self.len_accepted_ttest.append( assigned_pseudo_labels.shape[0] )
self.len_selected.append( np.sum(self.num_augmented_per_class) )
# remove the selected data from unlabelled data
self.unlabelled_data = np.delete(self.unlabelled_data, np.unique(labels_satisfied_threshold), 0)
return X,y
def label_assignment_and_post_processing(self, pseudo_labels_prob,X,y, current_iter=0,upper_threshold=None):
"""
Given the threshold, we perform label assignment and post-processing
Args:
pseudo_labels_prob: predictive prob [N x K] where N is #unlabels, K is #class
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X = augmented_X + X
Augmented y = augmented_y + Y
"""
if self.IsMultiLabel==False:
#go over each row (data point), only keep the argmax prob
# because we only allow a single data point to a single class
max_prob_matrix=self.get_prob_at_max_class(pseudo_labels_prob)
else:
# we dont need to get prob at max class for multi-label
# because a single data point can be assigned to multiple classes
max_prob_matrix=pseudo_labels_prob
if upper_threshold is None:
upper_threshold=self.upper_threshold
if 'CSA' in self.algorithm_name: # if using CSA, we dont use the upper threshold
upper_threshold=0
assigned_pseudo_labels=np.zeros((max_prob_matrix.shape[0],self.nClass)).astype(int)
MaxPseudoPoint=[0]*self.nClass
for cc in range(self.nClass): # loop over each class
MaxPseudoPoint[cc]=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
idx_sorted = np.argsort( max_prob_matrix[:,cc])[::-1] # decreasing
temp_idx = np.where(max_prob_matrix[idx_sorted,cc] > upper_threshold )[0]
labels_satisfied_threshold=idx_sorted[temp_idx]
# only select upto MaxPseudoPoint[cc] points
labels_satisfied_threshold = labels_satisfied_threshold[:MaxPseudoPoint[cc]]
assigned_pseudo_labels[labels_satisfied_threshold, cc]=1
if self.verbose:
print("MaxPseudoPoint",MaxPseudoPoint)
return self.post_processing_and_augmentation(assigned_pseudo_labels,X,y)
def get_number_of_labels(self,y):
"""
# given the label y, return the number of classes
Args:
y: label vector (for singlelabel) or matrix (for multilabel)
Output:
number of classes or number of labels
"""
if self.IsMultiLabel==False:
return len(np.unique(y))
else:
return y.shape[1]
def get_max_pseudo_point(self,fraction_of_class, current_iter):
"""
We select more points at the begining and less at later stage
Args:
fraction_of_class: vector of the frequency of points per class
current_iter: current iteration 0,1,2...T
Output:
number_of_max_pseudo_points: scalar
"""
LinearRamp= [(self.num_iters-ii)/self.num_iters for ii in range(self.num_iters)]
SumLinearRamp=np.sum(LinearRamp)
fraction_iter= (self.num_iters-current_iter) / (self.num_iters*SumLinearRamp)
MaxPseudoPoint=fraction_iter*fraction_of_class*self.FractionAllocatedLabel*len(self.unlabelled_data)
return np.int(np.ceil(MaxPseudoPoint))
def fit(self, X, y):
"""
main algorithm to perform pseudo labelling
Args:
X: train features [N x d]
y: train targets [N x 1]
Output:
we record the test_accuracy a vector of test accuracy per pseudo-iteration
"""
print("=====",self.algorithm_name)
self.nClass=self.get_number_of_labels(y)
self.label_frequency=self.estimate_label_frequency(y)
for current_iter in (tqdm(range(self.num_iters)) if self.verbose else range(self.num_iters)):
self.selected_unlabelled_index=[]
# Fit to data
self.model.fit(X, y)
# evaluate_performance the performance on test set after Fit the model given the data
self.evaluate_performance()
# Predictive probability on the unlabeled data
pseudo_labels_prob=self.get_predictive_prob_for_unlabelled_data(self.model)
X,y=self.label_assignment_and_post_processing(pseudo_labels_prob,X,y,current_iter)
if self.verbose:
print("#augmented:", self.num_augmented_per_class, " no training data ", len(y))
if np.sum(self.num_augmented_per_class)==0: # no data point is augmented
return
# evaluate_performance at the last iteration for reporting purpose
self.model.fit(X, y)
self.evaluate_performance()
# def predict(self, X):
# return self.model.predict(X)
# def predict_proba(self, X):
# return self.model.predict_proba(X)
# def decision_function(self, X):
# return self.model.decision_function(X)
| 16,406 | 38.439904 | 144 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/algorithm/__init__.py
| 0 | 0 | 0 |
py
|
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/algorithm/csa.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 14:19:22 2021
@author: Vu Nguyen
"""
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from scipy import stats
import time
from .pseudo_labeling import Pseudo_Labeling
# Confident Sinkhorn Allocation==================================================================================================
class CSA(Pseudo_Labeling):
def __init__(self, unlabelled_data, x_test,y_test,num_iters=5,num_XGB_models=20,confidence_choice="ttest",verbose = False,IsMultiLabel=False):
"""
unlabelled_data : [N x d] where N is the number of unlabeled data, d is the feature dimension
x_test :[N_test x d]
y_test :[N_test x 1] for multiclassification or [N_test x K] for multilabel classification
num_iters : number of pseudo-iterations, recommended = 5 as in the paper
upper_threshold : the upper threshold used for pseudo-labeling, e.g., we assign label if the prob > 0.8
fraction_allocation : the faction of label allocation, if fraction_allocation=1, we assign labels to 100% of unlabeled data
lower_threshold : lower threshold, used for UPS
num_XGB_models : number of XGB models used for UPS and CSA, recommended = 10
verbose : verbose
IsMultiLabel : False => Multiclassification or True => Multilabel classification
"""
super().__init__( unlabelled_data, x_test,y_test,num_iters=num_iters,num_XGB_models=num_XGB_models,verbose=verbose,IsMultiLabel=IsMultiLabel)
self.confidence_choice=confidence_choice
if self.IsMultiLabel==True:
# by default, we use total_variance as the main criteria for multilabel classification
if self.confidence_choice is not None:
self.confidence_choice="variance"
if self.confidence_choice is None or self.confidence_choice=="None":
self.algorithm_name="SLA"
else:
self.algorithm_name="CSA_" + self.confidence_choice
self.elapse_xgb=[]
self.elapse_ttest=[]
self.elapse_sinkhorn=[]
if self.verbose:
print("number of used XGB models M=",self.num_XGB_models)
def predict(self, X):
super().predict(X)
def predict_proba(self, X):
super().predict_proba(X)
def evaluate_performance(self):
super().evaluate_performance()
def get_max_pseudo_point(self,class_freq,current_iter):
return super().get_max_pseudo_point(class_freq,current_iter)
def set_ot_regularizer(self,nRow,nCol):
"""
We set the Sinkhorn regularization parameter based on the ratio of Row/Column
Args:
nRow: number of rows in our cost matrix for Sinkhorn algorithm
nCol: number of columns
Output:
regularization
"""
if nRow/nCol>=300:
regulariser=1
if nRow/nCol>=200:
regulariser=0.5
elif nRow/nCol>=100:
regulariser=0.2
elif nRow/nCol>=50:
regulariser=0.1
else:
regulariser=0.05
if self.IsMultiLabel:
if self.nClass>20:
regulariser=regulariser*5
else:
regulariser=regulariser*200
return regulariser
def data_uncertainty(self,pseudo_labels_prob_list):
"""
Args:
pseudo_labels_prob_list: [M x N x K]
Output:
entropy: [N x 1]
"""
ent=np.zeros((pseudo_labels_prob_list.shape[0],pseudo_labels_prob_list.shape[1]))
for mm in range(pseudo_labels_prob_list.shape[0]):
ent[mm,:]= self.entropy_prediction(pseudo_labels_prob_list[mm,:,:])
return np.mean(ent,axis=0)
def entropy_prediction(self,ave_pred,atClass=None):
"""
Args:
ave_pred: [N x K]
Output:
entropy: [N x 1]
"""
ent=[0]*ave_pred.shape[0]
for ii in range(ave_pred.shape[0]):
ent[ii]= - np.sum( ave_pred[ii,:]*np.log(ave_pred[ii,:]))
return np.asarray(ent)
def total_entropy(self,pseudo_labels_prob_list, atClass=None):
"""
calculate total entropy
Args:
pseudo_labels_prob_list: [M x N x K]: M #XGB, N #unlabels, K #class
Output:
total_entropy score [N x 1]
"""
ave_pred=np.mean(pseudo_labels_prob_list,axis=0) # average over model
total_uncertainty=self.entropy_prediction(ave_pred,atClass)
return total_uncertainty
def knowledge_uncertainty(self,pred):
total_uncertainty=self.total_uncertainty(pred)
data_uncertainty=self.data_uncertainty(pred)
knowledge_uncertainty = total_uncertainty-data_uncertainty
return knowledge_uncertainty
def total_variance(self,pseudo_labels_prob_list):
"""
calculate total variance
Args:
pseudo_labels_prob_list: [M x N x K]: M #XGB, N #unlabels, K #class
Output:
standard deviation score [N x 1]
"""
# [nModel, nPoint, nClass]
std_pred = np.std( pseudo_labels_prob_list, axis=0) # std over models
total_std = np.sum(std_pred, axis=1) # sum of std over classes
return total_std
def calculate_ttest(self,pseudo_labels_prob_list):
"""
calculate t-test
Args:
pseudo_labels_prob_list: [M x N x K]: M #XGB, N #unlabels, K #class
Output:
t-test score [N x 1]
"""
num_points=pseudo_labels_prob_list.shape[1]
var_rows_argmax=[0]*num_points
var_rows_arg2ndmax=[0]*num_points
t_test=[0]*num_points
t_value=[0]*num_points
pseudo_labels_prob= np.mean(pseudo_labels_prob_list,axis=0)
temp=np.argsort(-pseudo_labels_prob,axis=1) # decreasing
idxargmax=temp[:,0]
idx2nd_argmax= temp[:,1]
for jj in range(num_points):# go over each row (data points)
idxmax =idxargmax[jj]
idx2ndmax=idx2nd_argmax[jj]
var_rows_argmax[jj]=np.var(pseudo_labels_prob_list[:,jj,idxmax ])
var_rows_arg2ndmax[jj]=np.var(pseudo_labels_prob_list[:,jj,idx2ndmax])
nominator=pseudo_labels_prob[jj, idxmax]-pseudo_labels_prob[jj, idx2ndmax]
temp=(0.1 + var_rows_argmax[jj] + var_rows_arg2ndmax[jj] )/self.num_XGB_models
denominator=np.sqrt(temp)
t_test[jj] = nominator/denominator
# compute degree of freedom=========================================
nominator = (var_rows_argmax[jj] + var_rows_arg2ndmax[jj])**2
denominator= var_rows_argmax[jj]**2 + var_rows_arg2ndmax[jj]**2
denominator=denominator/(self.num_XGB_models-1)
dof=nominator/denominator
t_value[jj]=stats.t.ppf(1-0.025, dof)
t_test[jj]=t_test[jj]-t_value[jj]
return t_test
def label_assignment_and_post_processing_for_CSA(self, assignment_matrix,pseudo_labels_prob,X,y, current_iter=0):
"""
Given the threshold, we perform label assignment and post-processing
Args:
pseudo_labels_prob: predictive prob [N x K] where N is #unlabels, K is #class
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X = augmented_X + X
Augmented y = augmented_y + Y
"""
if self.IsMultiLabel==False:
#go over each row (data point), only keep the argmax prob
# because we only allow a single data point to a single class
max_prob_matrix=self.get_prob_at_max_class(pseudo_labels_prob)
else:
# we dont need to get prob at max class for multi-label
# because a single data point can be assigned to multiple classes
max_prob_matrix=pseudo_labels_prob
assignment_matrix=self.get_prob_at_max_class(assignment_matrix)
assigned_pseudo_labels=np.zeros((max_prob_matrix.shape[0],self.nClass)).astype(int)
MaxPseudoPoint=[0]*self.nClass
for cc in range(self.nClass): # loop over each class
MaxPseudoPoint[cc]=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
idx_sorted = np.argsort( assignment_matrix[:,cc])[::-1] # decreasing
idx_assignment = np.where(assignment_matrix[idx_sorted,cc] > 0 )[0]
# we dont accept labels with less than 0.5 prediction, this works well for multilabel classification
idx_satisfied = np.where(pseudo_labels_prob[idx_sorted[idx_assignment],cc] > 0.5 )[0]
# only select upto MaxPseudoPoint[cc] points
labels_satisfied_threshold=idx_sorted[idx_satisfied][:MaxPseudoPoint[cc]]
assigned_pseudo_labels[labels_satisfied_threshold, cc]=1
if self.verbose:
print("MaxPseudoPoint",MaxPseudoPoint)
return self.post_processing_and_augmentation(assigned_pseudo_labels,X,y)
def fit(self, X, y):
"""
main algorithm to perform pseudo labelling
Args:
X: train features [N x d]
y: train targets [N x 1]
Output:
we record the test_accuracy a vector of test accuracy per pseudo-iteration
"""
print("=====",self.algorithm_name)
self.nClass=self.get_number_of_labels(y)
self.label_frequency=self.estimate_label_frequency(y)
for current_iter in (tqdm(range(self.num_iters)) if self.verbose else range(self.num_iters)):
# Fit to data
self.model.fit(X, y)
self.evaluate_performance()
num_points=self.unlabelled_data.shape[0]
pseudo_labels_prob_list=[0]*self.num_XGB_models
tic = time.perf_counter()
# estimate prob using unlabelled data on M XGB models
pseudo_labels_prob_list=[0]*self.num_XGB_models
for mm in range(self.num_XGB_models):
self.XGBmodels_list[mm].fit(X, y)
pseudo_labels_prob_list[mm] = self.get_predictive_prob_for_unlabelled_data(self.XGBmodels_list[mm])
toc = time.perf_counter()
self.elapse_xgb.append(toc-tic)
pseudo_labels_prob_list=np.asarray(pseudo_labels_prob_list) # P [M x N x K]
pseudo_labels_prob= np.mean(pseudo_labels_prob_list,axis=0) # \bar{P} [N x K]
tic = time.perf_counter() # Start Time
# estimate confidence level here====================================
if self.confidence_choice=="variance":
tot_variance=self.total_variance(pseudo_labels_prob_list)
confidence=1-tot_variance
confidence=confidence-np.mean(confidence)
elif self.confidence_choice=="neg_variance":
confidence=self.total_variance(pseudo_labels_prob_list)
confidence=confidence-np.mean(confidence)
elif self.confidence_choice=='entropy':
tot_ent=self.total_entropy(pseudo_labels_prob_list)
confidence=1-tot_ent
confidence=confidence-0.5*np.mean(confidence)
elif self.confidence_choice=='neg_entropy':
confidence=self.total_entropy(pseudo_labels_prob_list)
confidence=confidence-np.mean(confidence)
elif self.confidence_choice=="ttest":
confidence=self.calculate_ttest(pseudo_labels_prob_list)
elif self.confidence_choice=="neg_ttest":
confidence=self.calculate_ttest(pseudo_labels_prob_list)
confidence=-np.asarray(confidence)
elif self.confidence_choice==None or self.confidence_choice=="None": # not using any confidence score, accepting all data point similar to SLA
confidence=np.ones((1,num_points))
confidence=np.clip(confidence, a_min=0,a_max=np.max(confidence))
toc = time.perf_counter() # End Time
self.elapse_ttest.append(toc-tic)
# for numerical stability of OT, select the nonzero entry only
idxNoneZero=np.where( confidence>0 )[0]
#idxNoneZero=np.where( (confidence>0) & (confidence<0.9*np.max(confidence)) )[0]
num_points= len(idxNoneZero)
if self.verbose:
print("num_points accepted= ",num_points, " total num_points=",len(self.unlabelled_data))
if len(idxNoneZero)==0: # terminate if could not find any point satisfying constraints
return self.test_acc
# Sinkhorn's algorithm ======================================================================
# fraction of label being assigned.
max_allocation_point= self.get_max_pseudo_point(class_freq=1,current_iter=current_iter)
rho=max_allocation_point/ len(self.unlabelled_data)
# regulariser for Sinkhorn's algorithm
regulariser=self.set_ot_regularizer(num_points, self.nClass)
tic = time.perf_counter()
# this is w_{+} and w_{-} in the paper
upper_b_per_class=self.label_frequency*1.1
lower_b_per_class=self.label_frequency*0.9
# we define row marginal distribution =============================
row_marginal=np.ones(num_points)
temp=num_points*rho*(np.sum(upper_b_per_class)-np.sum(lower_b_per_class))
row_marginal = np.append(row_marginal,temp)
if self.verbose:
print("#unlabel={:d} #points/#classes={:d}/{:d}={:.2f} reg={:.2f}".format(
len(self.unlabelled_data),num_points,self.nClass,num_points/self.nClass,regulariser))
C=1-pseudo_labels_prob # cost # expand Cost matrix
C=C[idxNoneZero,:]
C=np.vstack((C,np.zeros((1,self.nClass))))
C=np.hstack((C,np.zeros((len(idxNoneZero)+1,1))))
K=np.exp(-C/regulariser)
# define column marginal distribution ==============================
col_marginal = rho*upper_b_per_class*num_points # frequency of the class label
temp=num_points*(1-rho*np.sum(lower_b_per_class))
col_marginal = np.append(col_marginal,temp)
# checking the total mass of column marginal ~ row marginal
if np.abs( np.sum(col_marginal) - np.sum(row_marginal) ) > 0.001 :
print("np.sum(dist_labels) - np.sum(dist_points) > 0.001")
# initialize uu and perform Sinkhorn algorithm
uu=np.ones( (num_points+1,))
for jj in range(100):
vv= col_marginal / np.dot(K.T, uu)
uu= row_marginal / np.dot(K, vv)
# compute label assignment matrix Q'
Q_prime= np.atleast_2d(uu).T*(K*vv.T)
toc = time.perf_counter()
self.elapse_sinkhorn.append(toc-tic)
# this is the final Q matrix
assignment_matrix_Q=np.zeros((pseudo_labels_prob.shape))
assignment_matrix_Q[idxNoneZero,:]=Q_prime[:-1,:-1]
X,y=self.label_assignment_and_post_processing_for_CSA(assignment_matrix_Q,pseudo_labels_prob,X,y,current_iter)
if self.verbose:
print("#augmented:", self.num_augmented_per_class, " len of training data ", len(y))
# evaluate_performance at the last iteration for reporting purpose
self.model.fit(X, y)
self.evaluate_performance()
| 16,573 | 38.368171 | 155 |
py
|
confident-sinkhorn-allocation
|
confident-sinkhorn-allocation-master/algorithm/ups.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 14:19:22 2021
@author: Vu Nguyen
"""
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from .pseudo_labeling import Pseudo_Labeling
# UPS: ===========================================================================================
# Rizve, Mamshad Nayeem, Kevin Duarte, Yogesh S. Rawat, and Mubarak Shah.
# "In Defense of Pseudo-Labeling: An Uncertainty-Aware Pseudo-label Selection Framework for Semi-Supervised Learning."
# ICLR. 2020.
# https://arxiv.org/pdf/2101.06329.pdf
class UPS(Pseudo_Labeling):
# adaptive thresholding
def __init__(self, unlabelled_data, x_test,y_test,num_iters=5,upper_threshold = 0.8, lower_threshold = 0.2,\
num_XGB_models=10,verbose = False,IsMultiLabel=False):
"""
unlabelled_data : [N x d] where N is the number of unlabeled data, d is the feature dimension
x_test :[N_test x d]
y_test :[N_test x 1] for multiclassification or [N_test x K] for multilabel classification
num_iters : number of pseudo-iterations, recommended = 5 as in the paper
upper_threshold : the upper threshold used for pseudo-labeling, e.g., we assign label if the prob > 0.8
fraction_allocation : the faction of label allocation, if fraction_allocation=1, we assign labels to 100% of unlabeled data
lower_threshold : lower threshold, used for UPS
num_XGB_models : number of XGB models used for UPS and CSA, recommended = 10
verbose : verbose
IsMultiLabel : False => Multiclassification or True => Multilabel classification
"""
super().__init__( unlabelled_data, x_test,y_test,num_iters=num_iters,upper_threshold=upper_threshold,\
lower_threshold=lower_threshold,num_XGB_models=num_XGB_models,verbose=verbose,IsMultiLabel=IsMultiLabel)
self.algorithm_name="UPS"
def predict(self, X):
super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
def evaluate_performance(self):
super().evaluate_performance()
def uncertainty_score(self, matrix_prob):
return super().uncertainty_score(matrix_prob)
def get_prob_at_max_class(self,pseudo_labels_prob):
return super().get_prob_at_max_class(pseudo_labels_prob)
def get_max_pseudo_point(self,class_freq,current_iter):
return super().get_max_pseudo_point(class_freq,current_iter)
def label_assignment_and_post_processing_UPS(self, pseudo_labels_prob,uncertainty_scores,X,y, current_iter=0,upper_threshold=None):
"""
Given the threshold, we perform label assignment and post-processing
Args:
pseudo_labels_prob: predictive prob [N x K] where N is #unlabels, K is #class
uncertainty_scores : uncertainty_score of each data point at each class [N x K]
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X = augmented_X + X
Augmented y = augmented_y + Y
"""
if self.IsMultiLabel==False:
#go over each row (data point), only keep the argmax prob
# because we only allow a single data point to a single class
max_prob_matrix=self.get_prob_at_max_class(pseudo_labels_prob)
else:
# we dont need to get prob at max class for multi-label
# because a single data point can be assigned to multiple classes
max_prob_matrix=pseudo_labels_prob
assigned_pseudo_labels=np.zeros((max_prob_matrix.shape[0],self.nClass)).astype(int)
MaxPseudoPoint=[0]*self.nClass
for cc in range(self.nClass): # loop over each class
MaxPseudoPoint[cc]=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
idx_sorted = np.argsort( max_prob_matrix[:,cc])[::-1] # decreasing
idx_within_prob = np.where( max_prob_matrix[idx_sorted,cc] > self.upper_threshold )[0]
idx_within_prob_uncertainty = np.where( uncertainty_scores[idx_sorted[idx_within_prob],cc] < self.lower_threshold)[0]
# only select upto MaxPseudoPoint[cc] points
labels_satisfied_threshold=idx_sorted[idx_within_prob_uncertainty][:MaxPseudoPoint[cc]]
assigned_pseudo_labels[labels_satisfied_threshold, cc]=1
if self.verbose:
print("MaxPseudoPoint",MaxPseudoPoint)
return self.post_processing_and_augmentation(assigned_pseudo_labels,X,y)
def fit(self, X, y):
"""
main algorithm to perform pseudo labelling
Args:
X: train features [N x d]
y: train targets [N x 1]
Output:
we record the test_accuracy a vector of test accuracy per pseudo-iteration
"""
print("=====",self.algorithm_name)
self.nClass=self.get_number_of_labels(y)
self.label_frequency=self.estimate_label_frequency(y)
for current_iter in (tqdm(range(self.num_iters)) if self.verbose else range(self.num_iters)):
# Fit to data
self.model.fit(X, y)
self.evaluate_performance()
# estimate prob using unlabelled data on M XGB models
pseudo_labels_prob_list=[0]*self.num_XGB_models
for mm in range(self.num_XGB_models):
self.XGBmodels_list[mm].fit(X, y) # fit an XGB model
pseudo_labels_prob_list[mm] = self.get_predictive_prob_for_unlabelled_data(self.XGBmodels_list[mm])
pseudo_labels_prob_list=np.asarray(pseudo_labels_prob_list)
pseudo_labels_prob= np.mean(pseudo_labels_prob_list,axis=0)
# calculate uncertainty estimation for each data points at the argmax class
uncertainty_scores=np.ones((pseudo_labels_prob.shape))
for ii in range(pseudo_labels_prob.shape[0]):# go over each row (data points)
idxMax=np.argmax( pseudo_labels_prob[ii,:] )
uncertainty_scores[ii,idxMax]=np.std(pseudo_labels_prob_list[:,ii,idxMax])
X,y=self.label_assignment_and_post_processing_UPS(pseudo_labels_prob,uncertainty_scores,X,y,current_iter)
if np.sum(self.num_augmented_per_class)==0: # no data point is augmented
return
if self.verbose:
print("#added:", self.num_augmented_per_class, " no train data", len(y))
# evaluate_performance at the last iteration for reporting purpose
self.model.fit(X, y)
self.evaluate_performance()
| 7,123 | 42.175758 | 135 |
py
|
Unimer
|
Unimer-master/lr_scheduler_wrapper.py
|
# coding=utf8
from typing import Dict, Any
from overrides import overrides
from torch.optim.lr_scheduler import MultiStepLR
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
class PyTorchMultiStepLearningRateSchedulerWrapper(LearningRateScheduler):
def __init__(self, lr_scheduler: MultiStepLR) -> None:
self.lr_scheduler = lr_scheduler
def get_values(self):
return self.lr_scheduler.get_lr()
@overrides
def step(self, metric: float = None, epoch: int = None) -> None:
self.lr_scheduler.step(epoch)
@overrides
def state_dict(self) -> Dict[str, Any]:
return self.lr_scheduler.state_dict()
@overrides
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.lr_scheduler.load_state_dict(state_dict)
| 816 | 27.172414 | 76 |
py
|
Unimer
|
Unimer-master/evaluations.py
|
# coding=utf-8
import re
import numpy as np
from grammars.utils import action_sequence_to_logical_form
def evaluate_grammar_based_prediction(instance, prediction, grammar, preprocessor, postprocessor=None):
meta_field = instance['meta_field']
question = meta_field['question']
truth_logical_form = meta_field['logical_form']
predicted_rules = prediction['predicted_rules']
production_rules, rule_str = list(), list()
for rule_id in predicted_rules:
rule = grammar.get_production_rule_by_id(int(rule_id))
if rule is None:
break
rule_str.append(rule.rule)
production_rules.append(rule)
predicted_logical_form = preprocessor(action_sequence_to_logical_form(rule_str))
if postprocessor:
predicted_logical_form = postprocessor(predicted_logical_form)
truth_logical_form = postprocessor(truth_logical_form)
is_correct = (truth_logical_form == predicted_logical_form)
result = {
"question": question,
"truth_logical_form": truth_logical_form,
"predicted_logical_form": predicted_logical_form,
"is_correct": is_correct
}
return is_correct, result
def evaluate_grammar_copy_based_prediction(instance, prediction, grammar, preprocessor, postprocessor=None):
meta_field = instance['meta_field']
question = meta_field['question']
truth_logical_form = meta_field['logical_form']
question_tokens = instance.fields['source_tokens'].tokens
predicted_rules = prediction['predicted_rules']
copy_gates = prediction['recorded_copy_gates']
copy_weights = prediction['recorded_copy_weights']
assert len(copy_weights) == len(copy_gates)
production_rules, rule_str, copy_info = list(), list(), list()
for idx, rule_id in enumerate(predicted_rules):
rule = grammar.get_production_rule_by_id(int(rule_id))
if rule is None:
break
rule_str.append(rule.rule)
production_rules.append(rule)
if idx > 0 and rule.lhs in grammar.copy_terminal_set:
gate = copy_gates[idx-1]
weights = [(token.text, copy_weights[idx-1][sidx],) for (sidx, token) in enumerate(question_tokens)]
max_weights = max(weights, key=lambda x: x[1])
copy_info.append({"gate": gate, "rule": rule.rule, "weights": max_weights})
predicted_logical_form = preprocessor(action_sequence_to_logical_form(rule_str))
if postprocessor:
predicted_logical_form = postprocessor(predicted_logical_form)
truth_logical_form = postprocessor(truth_logical_form)
is_correct = (truth_logical_form == predicted_logical_form)
result = {
"question": question,
"truth_logical_form": truth_logical_form,
"predicted_logical_form": predicted_logical_form,
"copy_info": copy_info,
"is_correct": is_correct
}
return is_correct, result
def evaluate_seq_parsing_prediction(instance, prediction, language):
source_tokens = instance.fields['source_tokens'].tokens
gold_tokens = instance.fields['target_tokens'].tokens
predicted_tokens = prediction['predicted_tokens']
if language in ['sql', 'lambda', 'prolog']:
logical_form = ' '.join([s.text for s in gold_tokens[1:-1]])
predicted_logical_form = ' '.join(predicted_tokens)
else:
logical_form = ''.join([s.text for s in gold_tokens[1:-1]])
predicted_logical_form = ''.join(predicted_tokens)
is_correct = logical_form == predicted_logical_form
result = {
"question": " ".join([s.text for s in source_tokens[1:-1]]),
"truth_logical_form": logical_form,
"predicted_logical_form": predicted_logical_form,
"is_correct": is_correct
}
return is_correct, result
def evaluate_seq_copy_parsing_prediction(instance, prediction, language):
source_tokens = instance.fields['source_tokens'].tokens
gold_tokens = instance.fields['target_tokens'].tokens
predicted_tokens = prediction['predicted_tokens']
meta_field = instance['meta_field']
source_tokens_to_copy = meta_field['source_tokens_to_copy']
predicted_logical_form_tokens = list()
for text in predicted_tokens:
match = re.match("^@@copy@@(\d+)$", text)
if match:
source_index = int(match.group(1))
if source_index >= len(source_tokens_to_copy):
text = "@@PADDING@@"
else:
text = source_tokens_to_copy[source_index]
predicted_logical_form_tokens.append(text)
if language in ['sql', 'lambda', 'lambda2', 'prolog', 'prolog2']:
logical_form = ' '.join([s.text for s in gold_tokens[1:-1]])
predicted_logical_form = ' '.join(predicted_logical_form_tokens)
else:
logical_form = ''.join([s.text for s in gold_tokens[1:-1]])
predicted_logical_form = ''.join(predicted_logical_form_tokens)
is_correct = logical_form == predicted_logical_form
result = {
"question": " ".join([s.text for s in source_tokens[1:-1]]),
"truth_logical_form": logical_form,
"predicted_logical_form": predicted_logical_form,
"is_correct": is_correct
}
return is_correct, result
def evaluate_gnn_parsing_prediction(instance, prediction, language):
source_tokens = instance.fields['source_tokens'].tokens
gold_tokens = instance.fields['target_tokens'].tokens
predicted_abstract_tokens = prediction['predicted_tokens']
meta_field = instance.fields['meta_field']
entity_candidates = meta_field['entity_candidates']
predicted_logical_form_tokens = list()
for text in predicted_abstract_tokens:
match = re.match("^@entity_(\d+)$", text)
if match:
source_index = int(match.group(1))
for entity in entity_candidates:
if entity['index'] == source_index:
text = entity.get('formatted_value', entity['value'])
break
else:
text = '@@PADDING@@'
predicted_logical_form_tokens.append(text)
gold_logical_form_tokens = list()
for token in gold_tokens[1:-1]:
text = token.text
match = re.match("^@entity_(\d+)$", text)
if match:
source_index = int(match.group(1))
for entity in entity_candidates:
if entity['index'] == source_index:
text = entity.get('formatted_value', entity['value'])
break
else:
text = '@@PADDING@@'
gold_logical_form_tokens.append(text)
if language in ['sql', 'lambda', 'lambda2']:
logical_form = ' '.join(gold_logical_form_tokens)
predicted_logical_form = ' '.join(predicted_logical_form_tokens)
else:
logical_form = ''.join(gold_logical_form_tokens)
predicted_logical_form = ''.join(predicted_logical_form_tokens)
is_correct = logical_form == predicted_logical_form
result = {
"question": " ".join([s.text for s in source_tokens[1:-1]]),
"truth_logical_form": logical_form,
"predicted_logical_form": predicted_logical_form,
"is_correct": is_correct
}
return is_correct, result
def evaluate_translation_prediction(instance, prediction, language):
source_tokens = instance.fields['source_tokens'].tokens
gold_tokens = instance.fields['target_tokens'].tokens
predicted_tokens = prediction['predicted_tokens']
if language in ['sql', 'lambda']:
logical_form = ' '.join([s.text for s in source_tokens[1:-1]])
else:
logical_form = ''.join([s.text for s in source_tokens[1:-1]])
gold_question = ' '.join([s.text for s in gold_tokens[1:-1]])
predicted_question = ' '.join(predicted_tokens)
is_correct = gold_question == predicted_question
result = {
"logical_form": logical_form,
"truth_question": gold_question,
"predicted_question": predicted_question,
"is_correct": is_correct
}
return is_correct, result
| 8,052 | 38.282927 | 112 |
py
|
Unimer
|
Unimer-master/custom_trainer.py
|
# coding=utf8
import math
import time
import torch
import logging
from typing import Dict, List, Tuple, Optional, Iterable, Union, Callable, NoReturn
from allennlp.data import Instance
from allennlp.data.iterators.data_iterator import TensorDict, DataIterator
from allennlp.models import Model
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.moving_average import MovingAverage
from allennlp.training.tensorboard_writer import TensorboardWriter
from overrides import overrides
from allennlp.training import Trainer
from allennlp.nn import util as nn_util
from allennlp.common.util import (dump_metrics, gpu_memory_mb, peak_memory_mb,
lazy_groups_of)
from allennlp.common.tqdm import Tqdm
from allennlp.training import util as training_util
logger = logging.getLogger(__name__)
def record_loss(outputs_dict: Dict, tensorboard: TensorboardWriter):
for key, value in outputs_dict.items():
if key.endswith("loss"):
tensorboard.add_train_scalar("loss/%s" % key, value)
class CustomTrainer(Trainer):
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
checkpointer: Checkpointer = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
momentum_scheduler: Optional[MomentumScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
log_batch_size_period: Optional[int] = None,
moving_average: Optional[MovingAverage] = None,
tensorboard_log_batch_callback: Optional[Callable[[Dict, TensorboardWriter], NoReturn]] = record_loss,
loss_fn: Optional[Callable[[Dict, int], torch.Tensor]] = None) -> None:
super().__init__(model, optimizer, iterator, train_dataset, validation_dataset, patience,
validation_metric, validation_iterator, shuffle, num_epochs, serialization_dir,
num_serialized_models_to_keep, keep_serialized_model_every_num_seconds, checkpointer,
model_save_interval, cuda_device, grad_norm, grad_clipping, learning_rate_scheduler,
momentum_scheduler, summary_interval, histogram_interval, should_log_parameter_statistics,
should_log_learning_rate, log_batch_size_period, moving_average)
self.tensorboard_log_batch_callback = tensorboard_log_batch_callback
self.loss_fn = loss_fn
def get_output_dict(self, batch_group: List[TensorDict], for_training: bool) -> Dict[str, torch.Tensor]:
"""
Does a forward pass on the given batches and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = training_util.data_parallel(batch_group, self.model, self._cuda_devices)
else:
assert len(batch_group) == 1
batch = batch_group[0]
batch = nn_util.move_to_device(batch, self._cuda_devices[0])
output_dict = self.model(**batch)
return output_dict
def get_batch_loss(self, output_dict: Dict[str, torch.Tensor], for_training: bool):
try:
if self.loss_fn is None:
loss = output_dict["loss"]
else:
loss = self.loss_fn(output_dict, self._batch_num_total)
if for_training:
loss += self.model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError("The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
loss = None
return loss
@overrides
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
peak_cpu_usage = peak_memory_mb()
logger.info(f"Peak CPU memory usage MB: {peak_cpu_usage}")
gpu_usage = []
for gpu, memory in gpu_memory_mb().items():
gpu_usage.append((gpu, memory))
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self.model.train()
num_gpus = len(self._cuda_devices)
# Get tqdm for the training batches
raw_train_generator = self.iterator(self.train_data,
num_epochs=1,
shuffle=self.shuffle)
train_generator = lazy_groups_of(raw_train_generator, num_gpus)
num_training_batches = math.ceil(self.iterator.get_num_batches(self.train_data)/num_gpus)
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
histogram_parameters = set(self.model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
train_generator_tqdm = Tqdm.tqdm(train_generator,
total=num_training_batches)
cumulative_batch_size = 0
for batch_group in train_generator_tqdm:
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self.optimizer.zero_grad()
output_dict = self.get_output_dict(batch_group, for_training=True)
loss = self.get_batch_loss(output_dict, for_training=True)
if torch.isnan(loss):
raise ValueError("nan loss encountered")
loss.backward()
train_loss += loss.item()
batch_grad_norm = self.rescale_gradients()
# This does nothing if batch_num_total is None or you are using a
# scheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._momentum_scheduler:
self._momentum_scheduler.step_batch(batch_num_total)
if self._tensorboard.should_log_histograms_this_batch():
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {name: param.detach().cpu().clone()
for name, param in self.model.named_parameters()}
self.optimizer.step()
for name, param in self.model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1, ))
param_norm = torch.norm(param.view(-1, )).cpu()
self._tensorboard.add_train_scalar("gradient_update/" + name,
update_norm / (param_norm + 1e-7))
else:
self.optimizer.step()
# Update moving averages
if self._moving_average is not None:
self._moving_average.apply(batch_num_total)
# Update the description with the latest metrics
metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch)
description = training_util.description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if self._tensorboard.should_log_this_batch():
self._tensorboard.log_parameter_and_gradient_statistics(self.model, batch_grad_norm)
self._tensorboard.log_learning_rates(self.model, self.optimizer)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"])
self._tensorboard.log_metrics({"epoch_metrics/" + k: v for k, v in metrics.items()})
if self.tensorboard_log_batch_callback:
self.tensorboard_log_batch_callback(output_dict, self._tensorboard)
if self._tensorboard.should_log_histograms_this_batch():
self._tensorboard.log_histograms(self.model, histogram_parameters)
if self._log_batch_size_period:
cur_batch = sum([training_util.get_batch_size(batch) for batch in batch_group])
cumulative_batch_size += cur_batch
if (batches_this_epoch - 1) % self._log_batch_size_period == 0:
average = cumulative_batch_size/batches_this_epoch
logger.info(f"current batch size: {cur_batch} mean batch size: {average}")
self._tensorboard.add_train_scalar("current_batch_size", cur_batch)
self._tensorboard.add_train_scalar("mean_batch_size", average)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
'{0}.{1}'.format(epoch, training_util.time_to_str(int(last_save_time)))
)
metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch, reset=True)
metrics['cpu_memory_MB'] = peak_cpu_usage
for (gpu_num, memory) in gpu_usage:
metrics['gpu_'+str(gpu_num)+'_memory_MB'] = memory
return metrics
@overrides
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self.model.eval()
# Replace parameter values with the shadow values from the moving averages.
if self._moving_average is not None:
self._moving_average.assign_average_value()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self.iterator
num_gpus = len(self._cuda_devices)
raw_val_generator = val_iterator(self._validation_data,
num_epochs=1,
shuffle=False)
val_generator = lazy_groups_of(raw_val_generator, num_gpus)
num_validation_batches = math.ceil(val_iterator.get_num_batches(self._validation_data)/num_gpus)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch_group in val_generator_tqdm:
output_dict = self.get_output_dict(batch_group, for_training=False)
loss = self.get_batch_loss(output_dict, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = training_util.get_metrics(self.model, val_loss, batches_this_epoch)
description = training_util.description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
# Now restore the original parameter values.
if self._moving_average is not None:
self._moving_average.restore()
return val_loss, batches_this_epoch
| 13,380 | 46.282686 | 119 |
py
|
Unimer
|
Unimer-master/nni_main.py
|
# coding=utf-8
import os
import re
import time
import json
import shutil
import subprocess
from absl import app
from absl import flags
from pprint import pprint
import nni
# Data
flags.DEFINE_integer('cuda_device', 0, 'cuda_device')
flags.DEFINE_string('train_data', os.path.join('data', 'geo', 'geo_funql_train.tsv'), 'training data path')
flags.DEFINE_string('test_data', os.path.join('data', 'geo', 'geo_funql_test.tsv'), 'testing data path')
flags.DEFINE_enum('language', 'funql', ['funql', 'prolog', 'lambda', 'sql'], 'target language to generate')
flags.DEFINE_string('basepath', 'model', 'basepath to store models')
FLAGS = flags.FLAGS
PATTERN = re.compile('metrics_epoch_(\d+).json')
def main(argv):
# basepath
base_path = FLAGS.basepath
if not os.path.exists(base_path):
os.mkdir(base_path)
# model path
save_path = os.path.join(base_path, '%s_model' % str(time.time()))
os.mkdir(save_path)
params = nni.get_next_parameter()
source_embedding_dim = params['encoder_hidden_dim'] * 2
encoder_hidden_dim = params['encoder_hidden_dim']
encoder_input_dropout = params['encoder_input_dropout']
encoder_output_dropout = params['encoder_output_dropout']
decoder_hidden_dim = params['encoder_hidden_dim'] * 2
decoder_num_layers = params['decoder_num_layers']
rule_embedding_dim = params['rule_embedding_dim']
nonterminal_embedding_dim = params['nonterminal_embedding_dim']
dropout = params['decoder_dropout']
batch_size = params['batch_size']
lr = params['lr']
command = '''CUDA_VISIBLE_DEVICES=%d python run_parser.py --do_train=True \
--source_embedding_dim=%d \
--encoder_hidden_dim=%d \
--encoder_bidirectional=True \
--encoder_input_dropout=%f \
--encoder_output_dropout=%f \
--decoder_hidden_dim=%d \
--decoder_num_layers=%d \
--rule_embedding_dim=%d \
--nonterminal_embedding_dim=%d \
--max_decode_length=200 \
--serialization_dir=%s \
--seed=1 \
--dropout=%f \
--task=geo \
--language=%s \
--train_data=%s \
--test_data=%s \
--batch_size=%d \
--lr=%f \
--patient=30 \
--optimizer=adam \
--epoch=50 \
--model_save_interval=1 \
--gradient_clip=5 \ ''' % (FLAGS.cuda_device, source_embedding_dim, encoder_hidden_dim, encoder_input_dropout, encoder_output_dropout,
decoder_hidden_dim, decoder_num_layers, rule_embedding_dim, nonterminal_embedding_dim, save_path,
dropout, FLAGS.language, FLAGS.train_data, FLAGS.test_data, batch_size, lr)
command = re.sub('\s+', ' ', command).strip()
subprocess.call(command, shell=True)
last_epoch = 0
for f in os.listdir(save_path):
if f.startswith('metrics_epoch_'):
match = PATTERN.match(f)
if match:
epoch = int(match.group(1))
if epoch > last_epoch:
last_epoch = epoch
file_path = os.path.join(save_path, 'metrics_epoch_%d.json' % last_epoch)
with open(file_path, 'r') as f:
metrics = json.load(f)
accuracy = metrics['best_validation_accuracy']
nni.report_final_result(accuracy)
if __name__ == '__main__':
app.run(main)
| 3,358 | 33.628866 | 142 |
py
|
Unimer
|
Unimer-master/model_builder.py
|
# coding=utf8
import numpy
import torch
from typing import Dict, List, Callable
from overrides import overrides
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.training.metrics import Metric
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import Embedding
from allennlp.data.dataset_readers import DatasetReader
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.models.encoder_decoders.simple_seq2seq import SimpleSeq2Seq
from allennlp.modules.attention import BilinearAttention, DotProductAttention
from allennlp.predictors import Seq2SeqPredictor
from allennlp.common import Params
from allennlp.nn import Activation, InitializerApplicator
from grammars.grammar import Grammar
from grammars.parse_ast import AST
from neural_models.seq2seq_model import Seq2SeqModel
from neural_models.recombination_seq2seq import RecombinationSeq2Seq
from neural_models.recombination_seq2seq_copy import RecombinationSeq2SeqWithCopy
from neural_models.grammar_based_models import GrammarModel
from neural_models.modules.grammar_decoder import LSTMGrammarDecoder
from neural_models.modules.grammar_copy_decoder import LSTMGrammarCopyDecoder
from neural_models.modules.grammar_copy_decoder_2 import LSTMGrammarCopyDecoder as LSTMGrammarCopyDecoder2
from neural_models.GNN import GNNCopyTransformer
from neural_models.GNN2 import GNNCopyTransformer2
from metrics.sequency_accuracy import SequenceAccuracy
def get_predictor(model, reader) -> Seq2SeqPredictor:
return Seq2SeqPredictor(model=model, dataset_reader=reader)
def build_grammar_model(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
grammar: Grammar,
source_namespace: str = 'source_tokens',
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
decoder = LSTMGrammarDecoder(grammar, AST, lstm_hidden_dim=flags.decoder_hidden_dim,
num_lstm_layers=flags.decoder_num_layers,
rule_pad_index=data_reader.rule_pad_index, rule_embedding_dim=flags.rule_embedding_dim,
nonterminal_pad_index=data_reader.nonterminal_pad_index,
nonterminal_end_index=data_reader.nonterminal_end_index,
nonterminal_embedding_dim=flags.nonterminal_embedding_dim,
source_encoding_dim=flags.encoder_hidden_dim * 2,
dropout=flags.dropout, max_target_length=flags.max_decode_length)
metric = SequenceAccuracy()
model = GrammarModel(vocab, source_embedder, lstm_encoder,
decoder, metric, flags, regularizer=None)
return model
def build_grammar_copy_model(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
grammar: Grammar,
source_namespace: str = 'source_tokens',
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
decoder = LSTMGrammarCopyDecoder(grammar, AST, lstm_hidden_dim=flags.decoder_hidden_dim,
num_lstm_layers=flags.decoder_num_layers,
rule_pad_index=data_reader.rule_pad_index,
rule_embedding_dim=flags.rule_embedding_dim,
nonterminal_pad_index=data_reader.nonterminal_pad_index,
nonterminal_end_index=data_reader.nonterminal_end_index,
nonterminal_embedding_dim=flags.nonterminal_embedding_dim,
source_encoding_dim=flags.encoder_hidden_dim * 2,
dropout=flags.dropout, max_target_length=flags.max_decode_length)
metric = SequenceAccuracy()
model = GrammarModel(vocab, source_embedder, lstm_encoder,
decoder, metric, flags, regularizer=None)
return model
def build_grammar_copy_model_2(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
grammar: Grammar,
source_namespace: str = 'source_tokens',
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
decoder = LSTMGrammarCopyDecoder2(grammar, AST, lstm_hidden_dim=flags.decoder_hidden_dim,
num_lstm_layers=flags.decoder_num_layers,
rule_pad_index=data_reader.rule_pad_index,
rule_embedding_dim=flags.rule_embedding_dim,
nonterminal_pad_index=data_reader.nonterminal_pad_index,
nonterminal_end_index=data_reader.nonterminal_end_index,
nonterminal_embedding_dim=flags.nonterminal_embedding_dim,
source_encoding_dim=flags.encoder_hidden_dim * 2,
dropout=flags.dropout, max_target_length=flags.max_decode_length)
metric = SequenceAccuracy()
model = GrammarModel(vocab, source_embedder, lstm_encoder,
decoder, metric, flags, regularizer=None)
return model
def build_parsing_seq2seq_model(
flags,
data_reader,
vocab: Vocabulary,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens'
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
attention = DotProductAttention()
metric = SequenceAccuracy()
model = Seq2SeqModel(vocab, source_embedder, lstm_encoder, flags.max_decode_length,
target_embedding_dim=flags.decoder_hidden_dim,
target_namespace=target_namespace,
attention=attention,
beam_size=flags.beam_size,
use_bleu=False,
seq_metrics=metric)
return model
def build_parsing_recombination_seq2seq_model(
flags,
data_reader,
vocab: Vocabulary,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens'
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
lstm = PytorchSeq2SeqWrapper(torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
attention = BilinearAttention(flags.attention_hidden_dim, flags.attention_hidden_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
initializer = InitializerApplicator.from_params([(".*bias", Params({"type": "constant", "val": 0})),
('.*', Params({"type": "uniform", "a": -0.1, "b": 0.1}))])
metric = SequenceAccuracy()
model = RecombinationSeq2Seq(vocab, source_embedder, lstm, flags.max_decode_length,
seq_metrics=metric,
target_embedding_dim=flags.target_embedding_dim,
target_namespace=target_namespace,
output_attention=attention,
beam_size=flags.beam_size,
use_bleu=False,
encoder_input_dropout=flags.encoder_input_dropout,
encoder_output_dropout=flags.encoder_output_dropout,
dropout=flags.dropout,
feed_output_attention_to_decoder=True,
keep_decoder_output_dim_same_as_encoder=True,
initializer=initializer)
return model
def build_parsing_recombination_seq2seq_copy_model(
flags,
data_reader,
vocab: Vocabulary,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens'
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
lstm = PytorchSeq2SeqWrapper(torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
attention = BilinearAttention(flags.attention_hidden_dim, flags.attention_hidden_dim, normalize=False)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
initializer = InitializerApplicator.from_params([(".*bias", Params({"type": "constant", "val": 0})),
('.*', Params({"type": "uniform", "a": -0.1, "b": 0.1}))])
metric = SequenceAccuracy()
model = RecombinationSeq2SeqWithCopy(vocab, source_embedder, lstm, flags.max_decode_length,
seq_metrics=metric,
source_namespace=source_namespace,
target_namespace=target_namespace,
target_embedding_dim=flags.target_embedding_dim,
attention=attention,
beam_size=flags.beam_size,
use_bleu = False,
encoder_input_dropout=flags.encoder_input_dropout,
encoder_output_dropout=flags.encoder_output_dropout,
dropout=flags.dropout,
feed_output_attention_to_decoder=True,
keep_decoder_output_dim_same_as_encoder=True,
initializer=initializer)
return model
def build_gnn_parsing_model(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
is_test: bool = False,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens',
segment_namespace: str = 'segment_tokens',
) -> Model:
metric = SequenceAccuracy()
model = GNNCopyTransformer(
vocab=vocab,
source_namespace=source_namespace,
target_namespace=target_namespace,
segment_namespace=segment_namespace,
max_decoding_step=flags.max_decode_length,
token_based_metric=metric,
source_embedding_dim=flags.source_embedding_dim,
target_embedding_dim=flags.target_embedding_dim,
encoder_d_model=flags.transformer_encoder_hidden_dim,
decoder_d_model=flags.transformer_decoder_hidden_dim,
encoder_nhead=flags.transformer_encoder_nhead,
decoder_nhead=flags.transformer_decoder_nhead,
num_decoder_layers=flags.transformer_num_decoder_layers,
num_encoder_layers=flags.transformer_num_encoder_layers,
encoder_dim_feedforward=flags.transformer_encoder_feedforward_dim,
decoder_dim_feedforward=flags.transformer_decoder_feedforward_dim,
dropout=flags.dropout,
beam_size=1,
nlabels=flags.gnn_transformer_num_edge_labels,
max_decode_clip_range=flags.gnn_max_decode_clip_range,
encode_edge_label_with_matrix=flags.gnn_encode_edge_label_with_matrix,
is_test=is_test
)
return model
def build_gnn_parsing_model2(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
is_test: bool = False,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens',
segment_namespace: str = 'segment_tokens',
) -> Model:
metric = SequenceAccuracy()
model = GNNCopyTransformer2(
vocab=vocab,
source_namespace=source_namespace,
target_namespace=target_namespace,
segment_namespace=segment_namespace,
max_decoding_step=flags.max_decode_length,
token_based_metric=metric,
source_embedding_dim=flags.source_embedding_dim,
target_embedding_dim=flags.target_embedding_dim,
encoder_d_model=flags.transformer_encoder_hidden_dim,
decoder_d_model=flags.transformer_decoder_hidden_dim,
encoder_nhead=flags.transformer_encoder_nhead,
decoder_nhead=flags.transformer_decoder_nhead,
num_decoder_layers=flags.transformer_num_decoder_layers,
num_encoder_layers=flags.transformer_num_encoder_layers,
encoder_dim_feedforward=flags.transformer_encoder_feedforward_dim,
decoder_dim_feedforward=flags.transformer_decoder_feedforward_dim,
dropout=flags.dropout,
beam_size=1,
nlabels=flags.gnn_transformer_num_edge_labels,
max_decode_clip_range=flags.gnn_max_decode_clip_range,
encode_edge_label_with_matrix=flags.gnn_encode_edge_label_with_matrix,
is_test=is_test
)
return model
def build_seq2seq_model(
flags,
data_reader,
vocab: Vocabulary,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens'
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
attention = DotProductAttention()
model = SimpleSeq2Seq(vocab, source_embedder, lstm_encoder, flags.max_decode_length,
target_embedding_dim=flags.decoder_hidden_dim,
target_namespace=target_namespace,
attention=attention,
beam_size=flags.beam_size,
use_bleu=True)
return model
| 15,631 | 50.084967 | 120 |
py
|
Unimer
|
Unimer-master/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/run_parser.py
|
# coding=utf-8
import re
import os
import json
import copy
import random
import torch
import itertools
from typing import Dict, Any
from overrides import overrides
from absl import app
from absl import flags
import numpy as np
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
from allennlp.data.iterators import BucketIterator
from allennlp.training.util import evaluate as model_evaluate
from allennlp.data.vocabulary import Vocabulary
from allennlp.training.learning_rate_schedulers import LearningRateScheduler, NoamLR
from allennlp.data.tokenizers import WordTokenizer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from grammars.grammar import get_grammar
from grammars.entity_matcher import get_entity_matcher, get_seq2seq_entity_matcher
from grammars.gnn_entity_matcher import get_gnn_entity_replacer, get_gnn_entity_matcher
from grammars.utils import get_logical_form_preprocessor, get_logical_form_postprocessor,\
get_logical_form_tokenizer, get_utterance_preprocessor
from data_readers.grammar_based_reader import GrammarBasedDataReader
from data_readers.grammar_copy_based_reader import GrammarCopyBasedDataReader
from data_readers.seq2seq_data_reader import Seq2SeqDataReader
from data_readers.gnn_data_reader import GNNCopyTransformerDataReader
from model_builder import build_grammar_model, get_predictor, build_seq2seq_model, build_parsing_seq2seq_model, \
build_grammar_copy_model, build_grammar_copy_model_2, build_parsing_recombination_seq2seq_model,\
build_parsing_recombination_seq2seq_copy_model, build_gnn_parsing_model, build_gnn_parsing_model2
from custom_trainer import CustomTrainer
from lr_scheduler_wrapper import PyTorchMultiStepLearningRateSchedulerWrapper
import evaluations
flags.DEFINE_bool('do_train', False, 'whether to do training')
flags.DEFINE_integer('seed', 100, 'random seed')
# Model Type
flags.DEFINE_enum(
'model', 'parsing',
['parsing', 'copy_parsing', 'copy_parsing_2',
'seq_parsing', 'recombination_seq_parsing',
'recombination_copy_seq_parsing',
'translation', 'gnn_parsing', 'gnn_parsing2'],
'Specifying parsing models and translation models'
)
# Data
flags.DEFINE_enum('task', 'geo', ['geo', 'atis', 'job'], 'task')
flags.DEFINE_string('train_data', os.path.join(
'data', 'geo', 'geo_prolog_train.tsv'), 'training data path')
flags.DEFINE_string('test_data', os.path.join(
'data', 'geo', 'geo_prolog_test.tsv'), 'training data path')
flags.DEFINE_enum('language', 'prolog', [
'funql', 'typed_funql', 'prolog', 'prolog2', 'lambda',
'lambda2', 'lambda3', 'lambda4', 'sql', 'sql2', 'sql3'], 'target language to generate')
flags.DEFINE_integer('min_count', 1, 'Minimum counts for vocabulary')
# Model Hyper-parameters
flags.DEFINE_integer('source_embedding_dim', 128, 'Embedding size of source')
flags.DEFINE_integer('encoder_hidden_dim', 128, 'Hidden size of lstm encoder')
flags.DEFINE_bool('encoder_bidirectional', True,
'Whether to use birdirectional lstm')
flags.DEFINE_float('encoder_output_dropout', 0.2,
'Input dropout rate of encoder')
flags.DEFINE_float('encoder_input_dropout', 0.2,
'Output dropout rate of encoder')
# Grammar Decoder
flags.DEFINE_integer('target_embedding_dim', 128, 'Hidden size of lstm decoder')
flags.DEFINE_integer('decoder_hidden_dim', 128, 'Hidden size of lstm decoder')
flags.DEFINE_integer('decoder_num_layers', 1, 'Number of layers in decoder')
flags.DEFINE_integer('rule_embedding_dim', 64, 'Embedding size of rule')
flags.DEFINE_integer('nonterminal_embedding_dim', 64,
'Embedding size of non-terminal')
flags.DEFINE_integer('max_decode_length', 100, 'Maximum decode steps')
flags.DEFINE_integer('attention_hidden_dim', 100, 'Attention hidden dim for Bilinear Attention')
flags.DEFINE_float('dropout', 0.2, 'Dropout rate')
# GNN Hyperparameters
flags.DEFINE_integer('transformer_encoder_hidden_dim', 128, 'hidden dimension of encoder of transformer')
flags.DEFINE_integer('transformer_decoder_hidden_dim', 128, 'hidden dimension of decoder of transformer')
flags.DEFINE_integer('transformer_encoder_nhead', 128, 'number of head in self attention')
flags.DEFINE_integer('transformer_decoder_nhead', 128, 'number of head in self attention')
flags.DEFINE_integer('transformer_num_encoder_layers', 3, 'number of encoder layer in transformer')
flags.DEFINE_integer('transformer_num_decoder_layers', 3, 'number of decoder layer in transformer')
flags.DEFINE_integer('transformer_encoder_feedforward_dim', 256, 'dimension of feed forward layer in transformer')
flags.DEFINE_integer('transformer_decoder_feedforward_dim', 256, 'dimension of feed forward layer in transformer')
flags.DEFINE_integer('gnn_transformer_num_edge_labels', 20, 'number of edge labels in gnn transformer')
flags.DEFINE_bool('gnn_encode_edge_label_with_matrix', True, 'whether to encode edge label with matrix')
flags.DEFINE_integer('gnn_relative_position_clipped_range', 8, 'clip distance of relative position representations')
flags.DEFINE_integer('gnn_max_decode_clip_range', 8, 'clip distance of decode sequence')
# Optimization
flags.DEFINE_bool('use_scheduler', False, 'whether to use learning rate scheduler')
flags.DEFINE_float('lr', 0.001, 'learning rate')
flags.DEFINE_enum('optimizer', 'adam', [
'adam', 'sgd', 'rmsprop', 'adadelta'], 'optimizer to use')
flags.DEFINE_integer('warmup_steps', 800, 'number of steps to increase learning rate')
flags.DEFINE_float('adam_beta_1', 0.9, 'hyper-parameter beta_1 of adam')
flags.DEFINE_float('adam_beta_2', 0.999, 'hyper-parameter beta_2 of adam')
flags.DEFINE_float('adam_eps', 1e-8, 'hyper-parameter epsilon of adam')
flags.DEFINE_enum('scheduler', 'noam', ['noam', 'bert', 'finetune_bert_noam'], 'scheduler for transformer based models')
flags.DEFINE_integer('batch_size', 32, 'batch size')
flags.DEFINE_integer(
'patient', 10, 'Number of epochs to be patient before early stopping')
flags.DEFINE_integer('epoch', 1, 'Number of epoch to train')
flags.DEFINE_integer('model_save_interval', 1, 'Interval to save model')
flags.DEFINE_float('gradient_clip', 5.0, 'Clip gradient')
flags.DEFINE_string('validation_metric', '+accuracy', 'validation metric')
# Utils
flags.DEFINE_string('serialization_dir', os.path.join(
'trained_models', 'geo'), 'Path to save trained models')
# Evaluation
flags.DEFINE_bool('save_prediction_result', False,
'Whether to save prediction result')
flags.DEFINE_string('checkpoint', 'best.th', 'Checkpoint to evaluate')
flags.DEFINE_integer('beam_size', 1, 'Beam Search Size')
FLAGS = flags.FLAGS
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Seed all GPUs with the same seed if available.
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def save_flags(FLAGs):
with open(os.path.join(FLAGS.serialization_dir, 'config.txt'), 'w') as f:
f.write(FLAGS.flags_into_string())
def build_data_reader(FLAGS):
splitter = SpacyWordSplitter(pos_tags=True)
question_tokenizer = WordTokenizer(SpacyWordSplitter(pos_tags=True))
reader = None
if FLAGS.model == 'parsing':
# Parsing
grammar = get_grammar(FLAGS.task, FLAGS.language)
assert grammar is not None
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
else:
max_target_length = 0
reader = GrammarBasedDataReader(
question_tokenizer, grammar, logical_form_preprocessor=logical_form_preprocessor,
maximum_target_length=max_target_length)
elif FLAGS.model in ['copy_parsing', 'copy_parsing_2']:
# Parsing
grammar = get_grammar(FLAGS.task, FLAGS.language)
assert grammar is not None
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
else:
max_target_length = 0
entity_matcher = get_entity_matcher(FLAGS.task, FLAGS.language)
utterance_preprocessor = get_utterance_preprocessor(FLAGS.task, FLAGS.language)
reader = GrammarCopyBasedDataReader(
question_tokenizer, grammar, logical_form_preprocessor=logical_form_preprocessor,
utterance_preprocessor=utterance_preprocessor,
copy_link_finder=entity_matcher, maximum_target_length=max_target_length)
elif FLAGS.model == 'translation':
# Translation
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
reader = Seq2SeqDataReader(
question_tokenizer=question_tokenizer,
logical_form_tokenizer=logical_form_tokenizer,
is_parsing=False)
return reader
elif FLAGS.model == 'seq_parsing':
# Parsing without grammar
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
reader = Seq2SeqDataReader(
question_tokenizer=question_tokenizer,
logical_form_tokenizer=logical_form_tokenizer,
is_parsing=True)
elif FLAGS.model == 'recombination_seq_parsing':
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language, normalize_var_with_de_brujin_index=True)
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
else:
max_target_length = 0
reader = Seq2SeqDataReader(
question_tokenizer=question_tokenizer,
logical_form_tokenizer=logical_form_tokenizer,
logical_form_preprocessor=logical_form_preprocessor,
is_parsing=True,
maximum_target_length=max_target_length
)
return reader
elif FLAGS.model == 'recombination_copy_seq_parsing':
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language, normalize_var_with_de_brujin_index=True)
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
else:
max_target_length = 0
entity_matcher = get_seq2seq_entity_matcher(FLAGS.task, FLAGS.language)
if FLAGS.language.startswith('sql'):
exclude_target_words = ['select', 'from', 'and', 'in', 'where', 'group', 'order', 'having', 'limit', 'not']
else:
exclude_target_words = None
reader = Seq2SeqDataReader(
question_tokenizer=question_tokenizer,
logical_form_tokenizer=logical_form_tokenizer,
logical_form_preprocessor=logical_form_preprocessor,
is_parsing=True,
enable_copy=True,
maximum_target_length=max_target_length,
entity_matcher=entity_matcher,
exclude_target_words=exclude_target_words
)
return reader
elif FLAGS.model in ['gnn_parsing', 'gnn_parsing2']:
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language, normalize_var_with_de_brujin_index=True)
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
allow_drop = True
else:
max_target_length = 0
allow_drop = False
grammar = get_grammar(FLAGS.task, FLAGS.language)
entity_matcher = get_gnn_entity_matcher(FLAGS.task, FLAGS.language)
entity_replacer = get_gnn_entity_replacer(FLAGS.task, FLAGS.language)
reader = GNNCopyTransformerDataReader(
entity_matcher=entity_matcher,
entity_replacer=entity_replacer,
target_grammar=grammar,
source_tokenizer=question_tokenizer,
target_tokenizer=logical_form_tokenizer,
logical_form_preprocessor=logical_form_preprocessor,
relative_position_clipped_range=FLAGS.gnn_relative_position_clipped_range,
nlabels=FLAGS.gnn_transformer_num_edge_labels,
allow_drop=allow_drop
)
return reader
return reader
def build_optimizer(FLAGS, parameters) -> optim.Optimizer:
if FLAGS.optimizer == 'adam':
optimizer = optim.Adam(parameters, lr=FLAGS.lr)
elif FLAGS.optimizer == 'sgd':
optimizer = optim.SGD(parameters, lr=FLAGS.lr, momentum=0,
dampening=0, weight_decay=0, nesterov=False)
elif FLAGS.optimizer == 'rmsprop':
optimizer = optim.RMSprop(parameters, lr=FLAGS.lr, alpha=0.95)
elif FLAGS.optimizer == 'adadelta':
optimizer = optim.Adadelta(parameters, lr=FLAGS.lr)
else:
optimizer = None
return optimizer
def build_lr_scheduler(FLAGS, optimizer) -> LearningRateScheduler:
if not FLAGS.use_scheduler:
return None
allen_scheduler = None
if FLAGS.optimizer == 'rmsprop':
scheduler = MultiStepLR(optimizer, milestones=[5] + list(range(6, 200)), gamma=0.98)
allen_scheduler = PyTorchMultiStepLearningRateSchedulerWrapper(scheduler)
elif FLAGS.optimizer == 'sgd':
scheduler = MultiStepLR(optimizer, milestones=[15, 20, 25, 30], gamma=0.5)
allen_scheduler = PyTorchMultiStepLearningRateSchedulerWrapper(scheduler)
elif FLAGS.optimizer == 'adam':
if FLAGS.scheduler == 'noam':
print('Use Noam Scheduler')
allen_scheduler = NoamLR(optimizer, model_size=FLAGS.transformer_encoder_hidden_dim,
warmup_steps=FLAGS.warmup_steps)
return allen_scheduler
def main(argv):
set_random_seed(FLAGS.seed)
print(FLAGS.flags_into_string())
reader = build_data_reader(FLAGS)
assert reader is not None
if FLAGS.do_train:
is_test = False
save_flags(FLAGS)
train_dataset, test_dataset = reader.read(
FLAGS.train_data), reader.read(FLAGS.test_data)
vocab = Vocabulary.from_instances(
train_dataset, min_count={'source_tokens': FLAGS.min_count})
else:
is_test = True
test_dataset = reader.read(FLAGS.test_data)
vocab = Vocabulary.from_files(os.path.join(
FLAGS.serialization_dir, 'vocabulary'))
if FLAGS.model == 'parsing':
model = build_grammar_model(FLAGS, reader, vocab, reader.grammar)
elif FLAGS.model == 'copy_parsing':
model = build_grammar_copy_model(FLAGS, reader, vocab, reader.grammar)
elif FLAGS.model == 'copy_parsing_2':
model = build_grammar_copy_model_2(FLAGS, reader, vocab, reader.grammar)
elif FLAGS.model == 'translation':
model = build_seq2seq_model(FLAGS, reader, vocab)
elif FLAGS.model == 'recombination_seq_parsing':
model = build_parsing_recombination_seq2seq_model(FLAGS, reader, vocab)
elif FLAGS.model == 'recombination_copy_seq_parsing':
model = build_parsing_recombination_seq2seq_copy_model(FLAGS, reader, vocab)
elif FLAGS.model == 'gnn_parsing':
model = build_gnn_parsing_model(FLAGS, reader, vocab, is_test=not FLAGS.do_train)
elif FLAGS.model == 'gnn_parsing2':
model = build_gnn_parsing_model2(FLAGS, reader, vocab, is_test=not FLAGS.do_train)
else:
model = build_parsing_seq2seq_model(FLAGS, reader, vocab)
print(model)
assert model is not None
print("Cuda Available: ", torch.cuda.is_available())
if torch.cuda.is_available():
cuda_device = list(range(torch.cuda.device_count()))
print("Cuda device: ", cuda_device)
if len(cuda_device) > 1:
print("Enable Multiple GPU: ", cuda_device)
# Enable Multiple GPU
model = model.cuda(cuda_device[0])
else:
cuda_device = cuda_device[0]
model = model.cuda(cuda_device)
else:
cuda_device = -1
if FLAGS.do_train:
with torch.autograd.set_detect_anomaly(False):
model.train()
optimizer = build_optimizer(FLAGS, model.parameters())
assert optimizer is not None
allen_scheduler = build_lr_scheduler(FLAGS, optimizer)
vocab.save_to_files(os.path.join(
FLAGS.serialization_dir, 'vocabulary'))
iterator = BucketIterator(batch_size=FLAGS.batch_size, sorting_keys=[
("source_tokens", "num_tokens")])
iterator.index_with(vocab)
trainer = CustomTrainer(model=model,
optimizer=optimizer,
iterator=iterator,
train_dataset=train_dataset,
validation_dataset=test_dataset,
patience=FLAGS.patient,
num_epochs=FLAGS.epoch,
cuda_device=cuda_device,
serialization_dir=FLAGS.serialization_dir,
grad_clipping=FLAGS.gradient_clip,
validation_metric=FLAGS.validation_metric,
should_log_learning_rate=True,
summary_interval=5,
num_serialized_models_to_keep=5,
learning_rate_scheduler=allen_scheduler,
loss_fn=None)
trainer.train()
else:
# Load Model
with open(os.path.join(FLAGS.serialization_dir, FLAGS.checkpoint), 'rb') as f:
model.load_state_dict(torch.load(f))
model.eval()
iterator = BucketIterator(batch_size=FLAGS.batch_size, sorting_keys=[
("source_tokens", "num_tokens")])
iterator.index_with(vocab)
metrics = model_evaluate(
model, test_dataset, iterator, cuda_device, batch_weight_key='')
for key, metric in metrics.items():
print("%s: %s" % (key, str(metric)))
if FLAGS.save_prediction_result:
results = list()
predictor = get_predictor(model, reader)
total, correct = 0, 0
preprocessor = get_logical_form_preprocessor(FLAGS.task, FLAGS.language)
postprocessor = get_logical_form_postprocessor(
FLAGS.task, FLAGS.language)
for idx in itertools.islice(range(len(test_dataset)), 0, len(test_dataset), FLAGS.batch_size):
instances = test_dataset[idx:idx + FLAGS.batch_size]
total += len(instances)
predictions = predictor.predict_batch_instance(instances)
for inst, pred in zip(instances, predictions):
if FLAGS.model == 'parsing':
is_correct, result = evaluations.evaluate_grammar_based_prediction(
inst, pred, reader.grammar, preprocessor, postprocessor)
elif FLAGS.model in ['copy_parsing', 'copy_parsing_2']:
is_correct, result = evaluations.evaluate_grammar_copy_based_prediction(
inst, pred, reader.grammar, preprocessor, postprocessor)
elif FLAGS.model in ['seq_parsing', 'recombination_seq_parsing']:
is_correct, result = evaluations.evaluate_seq_parsing_prediction(
inst, pred, FLAGS.language)
elif FLAGS.model in ['recombination_copy_seq_parsing']:
is_correct, result = evaluations.evaluate_seq_copy_parsing_prediction(
inst, pred, FLAGS.language
)
elif FLAGS.model in ['gnn_parsing', 'gnn_parsing2']:
is_correct, result = evaluations.evaluate_gnn_parsing_prediction(
inst, pred, FLAGS.language
)
else:
# Translation
is_correct, result = evaluations.evaluate_translation_prediction(
inst, pred, FLAGS.language)
if is_correct:
correct += 1
results.append(result)
assert total == len(test_dataset)
print('Total: %d, Correct: %d, Accuracy: %f' %
(total, correct, correct / total))
with open(os.path.join(FLAGS.serialization_dir, 'predictions.json'), 'w') as f:
f.write(json.dumps(results, indent=4))
if __name__ == '__main__':
app.run(main)
| 21,059 | 46.432432 | 120 |
py
|
Unimer
|
Unimer-master/text2sql_data.py
|
# coding=utf8
"""
Parsing data from https://github.com/jkkummerfeld/text2sql-data/tree/master/data
"""
import os
import json
import copy
def get_sql_data(basepath, raw_data_path):
with open(raw_data_path, 'r') as f:
data = json.load(f)
question_based_train_dataset, question_based_dev_dataset, question_based_test_dataset = list(), list(), list()
query_based_train_dataset, query_based_dev_dataset, query_based_test_dataset = list(), list(), list()
for d in data:
sql = d['sql'][0]
sentences = d['sentences']
for s_dict in sentences:
s = s_dict['text']
_sql = copy.copy(sql)
for name in s_dict['variables']:
value = s_dict['variables'][name]
if len(value) == 0:
for variable in d['variables']:
if variable['name'] == name:
value = variable['example']
s = value.join(s.split(name))
_sql = value.join(_sql.split(name))
if s_dict['question-split'] == 'test':
question_based_test_dataset.append("%s\t%s" % (s, _sql))
elif s_dict['question-split'] == 'dev':
question_based_dev_dataset.append("%s\t%s" % (s, _sql))
else:
question_based_train_dataset.append("%s\t%s" % (s, _sql))
if d['query-split'] == 'test':
query_based_test_dataset.append("%s\t%s" % (s, _sql))
elif d['query-split'] == 'dev':
query_based_dev_dataset.append("%s\t%s" % (s, _sql))
else:
query_based_train_dataset.append("%s\t%s" % (s, _sql))
save_train_path, save_dev_path, save_test_path = os.path.join(base_path, 'atis_sql_question_based_train_2018.tsv'), \
os.path.join(basepath, 'atis_sql_question_based_dev_2018.tsv'), \
os.path.join(base_path, 'atis_sql_question_based_test_2018.tsv')
with open(save_train_path, 'w') as f:
f.write('\n'.join(question_based_train_dataset))
with open(save_dev_path, 'w') as f:
f.write('\n'.join(question_based_dev_dataset))
with open(save_test_path, 'w') as f:
f.write('\n'.join(question_based_test_dataset))
save_train_path, save_dev_path, save_test_path = os.path.join(base_path, 'atis_sql_query_based_train_2018.tsv'), \
os.path.join(base_path, 'atis_sql_query_based_dev_2018.tsv'), \
os.path.join(base_path, 'atis_sql_query_based_test_2018.tsv')
with open(save_train_path, 'w') as f:
f.write('\n'.join(query_based_train_dataset))
with open(save_dev_path, 'w') as f:
f.write('\n'.join(query_based_dev_dataset))
with open(save_test_path, 'w') as f:
f.write('\n'.join(query_based_test_dataset))
if __name__ == '__main__':
base_path = os.path.join('data', 'atis')
raw_data_path = os.path.join('data', 'atis', 'atis.json')
get_sql_data(base_path, raw_data_path)
| 2,982 | 41.014085 | 121 |
py
|
Unimer
|
Unimer-master/hyperparameters/read_hyperparameter.py
|
# coding=utf8
import json
import argparse
from pprint import pprint
def main(path):
with open(path, 'r', encoding='utf8') as f:
for line in f:
results = json.loads(json.loads(line))
pprint(results)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--file', help='file that stores hyper-parameters', required=True)
args = parser.parse_args()
main(args.file)
| 456 | 20.761905 | 74 |
py
|
Unimer
|
Unimer-master/neural_models/recombination_seq2seq_copy.py
|
# coding=utf8
from typing import Dict, List, Tuple
import numpy
from overrides import overrides
import torch
import torch.nn.functional as F
from torch.nn.modules.linear import Linear
from torch.nn.modules.rnn import LSTMCell
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers import Token
from allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.training.metrics import Metric
from allennlp.nn.beam_search import BeamSearch
from allennlp.training.metrics import BLEU
from allennlp.nn import util, InitializerApplicator
class RecombinationSeq2SeqWithCopy(Model):
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
max_decoding_steps: int,
seq_metrics: Metric,
attention: Attention,
beam_size: int = None,
source_namespace: str = 'source_tokens',
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.,
use_bleu: bool = False,
encoder_input_dropout: int = 0.0,
encoder_output_dropout: int = 0.0,
dropout=0.0,
feed_output_attention_to_decoder: bool = False,
keep_decoder_output_dim_same_as_encoder: bool = True,
initializer: InitializerApplicator = InitializerApplicator()) -> None:
super(RecombinationSeq2SeqWithCopy, self).__init__(vocab)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._scheduled_sampling_ratio = scheduled_sampling_ratio
# We need the start symbol to provide as the input at the first timestep of decoding, and
# end symbol as a way to indicate the end of the decoded sequence.
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._pad_index = self.vocab.get_token_index(self.vocab._padding_token,
self._target_namespace) # pylint: disable=protected-access
# Evaluation Metrics
if use_bleu:
pad_index = self.vocab.get_token_index(self.vocab._padding_token, self._target_namespace) # pylint: disable=protected-access
self._bleu = BLEU(exclude_indices={pad_index, self._end_index, self._start_index})
else:
self._bleu = None
self._seq_metric = seq_metrics
# At prediction time, we use a beam search to find the most likely sequence of target tokens.
beam_size = beam_size or 1
self._max_decoding_steps = max_decoding_steps
self._beam_search = BeamSearch(self._end_index, max_steps=max_decoding_steps, beam_size=beam_size)
# Dense embedding of source vocab tokens.
self._source_embedder = source_embedder
# Encoder
# Encodes the sequence of source embeddings into a sequence of hidden states.
self._encoder = encoder
self._encoder_output_dim = self._encoder.get_output_dim()
# Attention mechanism applied to the encoder output for each step.
self._attention = attention
self._feed_output_attention_to_decoder = feed_output_attention_to_decoder
if self._feed_output_attention_to_decoder:
# If using attention, a weighted average over encoder outputs will be concatenated
# to the previous target embedding to form the input to the decoder at each
# time step.
self._decoder_input_dim = self._encoder_output_dim + target_embedding_dim
else:
# Otherwise, the input to the decoder is just the previous target embedding.
self._decoder_input_dim = target_embedding_dim
# Decoder
# Dense embedding of vocab words in the target space.
num_classes = self.vocab.get_vocab_size(self._target_namespace)
self._num_classes = num_classes
target_embedding_dim = target_embedding_dim or source_embedder.get_output_dim()
self._target_embedder = Embedding(num_classes, target_embedding_dim)
# TODO: relax this assumption
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
self._keep_decoder_output_dim_same_as_encoder = keep_decoder_output_dim_same_as_encoder
if not self._keep_decoder_output_dim_same_as_encoder:
self._decoder_output_dim = int(self._encoder_output_dim / 2) if encoder.is_bidirectional() \
else self._encoder_output_dim
else:
self._decoder_output_dim = self._encoder_output_dim
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
self._transform_decoder_init_state = torch.nn.Sequential(
torch.nn.Linear(self._encoder_output_dim, self._decoder_output_dim),
torch.nn.Tanh()
)
# Generate Score
self._output_projection_layer = Linear(self._decoder_output_dim + self._encoder_output_dim, num_classes)
# Dropout Layers
self._encoder_input_dropout = torch.nn.Dropout(p=encoder_input_dropout)
self._encoder_output_dropout = torch.nn.Dropout(p=encoder_output_dropout)
self._output_dropout = torch.nn.Dropout(p=dropout)
self._embedded_dropout = torch.nn.Dropout(p=dropout)
initializer(self)
def _prepare_output_projections(self,
last_predictions: torch.Tensor,
state: Dict[str, torch.Tensor])\
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
# pylint: disable=line-too-long
"""
Decode current state and last prediction to produce produce projections
into the target space, which can then be used to get probabilities of
each target token for the next step.
Add dropout before the softmax classifier (Following "Language to Logical Form with Neural Attention")
Inputs are the same as for `take_step()`.
last_predictions: (group_size,)
"""
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (group_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (group_size, decoder_output_dim)
decoder_hidden = state["decoder_hidden"]
# shape: (group_size, decoder_output_dim)
decoder_context = state["decoder_context"]
# shape: (group_size, target_embedding_dim)
copy_mask = (last_predictions < self._num_classes).long()
embedded_input = self._target_embedder(last_predictions * copy_mask)
if not self.training and copy_mask.sum() < copy_mask.size(0):
# Copy, Retrieve target token
mapped_indices = list()
source_token_ids = state['source_token_ids']
for gidx, idx in enumerate(last_predictions):
if idx >= self._num_classes:
source_idx = idx - self._num_classes
source_token_id = int(source_token_ids[gidx,source_idx])
token = self.vocab.get_token_from_index(source_token_id, self._source_namespace)
tid = self.vocab.get_token_index(token, self._target_namespace)
mapped_indices.append(tid)
else:
mapped_indices.append(self._pad_index)
# mapped_indices to tensor
mapped_indices = torch.from_numpy(numpy.array(mapped_indices))
mapped_indices = mapped_indices.to(last_predictions.device)
copyed_embedded_input = self._target_embedder(mapped_indices)
unsqueezed_copy_mask = copy_mask.unsqueeze(dim=1).float()
embedded_input = embedded_input * unsqueezed_copy_mask + copyed_embedded_input * (1 - unsqueezed_copy_mask)
embedded_input = self._embedded_dropout(embedded_input)
if self._feed_output_attention_to_decoder:
# shape: (group_size, decoder_output_dim + target_embedding_dim)
decoder_input = torch.cat((embedded_input, state["attention_context"]), -1)
else:
# shape: (group_size, target_embedding_dim)
decoder_input = embedded_input
# shape (decoder_hidden): (group_size, decoder_output_dim)
# shape (decoder_context): (group_size, decoder_output_dim)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input,
(decoder_hidden, decoder_context))
state["decoder_hidden"] = decoder_hidden
state["decoder_context"] = decoder_context
# output_attended_input: shape: (group_size, encoder_output_dim)
# attention_weights shape: (group_size, max_input_sequence_length)
output_attended_input, attention_weights = self._prepare_output_attended_input(
decoder_hidden,
encoder_outputs,
source_mask
)
if self._feed_output_attention_to_decoder:
state["attention_context"] = output_attended_input
output_projection_input = torch.cat((decoder_hidden, output_attended_input), -1)
dropped_output_projection_input = self._output_dropout(output_projection_input)
# shape: (group_size, num_classes)
output_projections = self._output_projection_layer(dropped_output_projection_input)
# shape: (group_size, num_classes + max_input_sequence_length)
output_projections = torch.cat((output_projections, attention_weights), -1)
return output_projections, state
def take_step(self,
last_predictions: torch.Tensor,
state: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take a decoding step. This is called by the beam search class.
Parameters
----------
last_predictions : ``torch.Tensor``
A tensor of shape ``(group_size,)``, which gives the indices of the predictions
during the last time step.
state : ``Dict[str, torch.Tensor]``
A dictionary of tensors that contain the current state information
needed to predict the next step, which includes the encoder outputs,
the source mask, and the decoder hidden state and context. Each of these
tensors has shape ``(group_size, *)``, where ``*`` can be any other number
of dimensions.
Returns
-------
Tuple[torch.Tensor, Dict[str, torch.Tensor]]
A tuple of ``(log_probabilities, updated_state)``, where ``log_probabilities``
is a tensor of shape ``(group_size, num_classes)`` containing the predicted
log probability of each class for the next step, for each item in the group,
while ``updated_state`` is a dictionary of tensors containing the encoder outputs,
source mask, and updated decoder hidden state and context.
Notes
-----
We treat the inputs as a batch, even though ``group_size`` is not necessarily
equal to ``batch_size``, since the group may contain multiple states
for each source sentence in the batch.
"""
# shape: (group_size, num_classes + max_input_sequence_length)
output_projections, state = self._prepare_output_projections(last_predictions, state)
source_mask = state['source_mask']
group_size = source_mask.size(0)
# (batch_size, num_classes + max_input_sequence_length)
normalization_mask = torch.cat([source_mask.new_ones((group_size, self._num_classes)),
source_mask], dim=-1)
# shape: (group_size, num_classes + max_input_sequence_length)
class_log_probabilities = util.masked_log_softmax(output_projections, normalization_mask, dim=-1)
return class_log_probabilities, state
@overrides
def forward(self, # type: ignore
source_tokens: Dict[str, torch.LongTensor],
target_tokens: Dict[str, torch.LongTensor] = None,
target_source_token_map: torch.Tensor = None,
meta_field: List[Dict] = None,
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Make foward pass with decoder logic for producing the entire target sequence.
Parameters
----------
source_tokens : ``Dict[str, torch.LongTensor]``
The output of `TextField.as_array()` applied on the source `TextField`. This will be
passed through a `TextFieldEmbedder` and then through an encoder.
target_tokens : ``Dict[str, torch.LongTensor]``, optional (default = None)
Output of `Textfield.as_array()` applied on target `TextField`. We assume that the
target tokens are also represented as a `TextField`.
target_source_token_map: (batch_size, target_length, source_length)
Returns
-------
Dict[str, torch.Tensor]
"""
state = self._encode(source_tokens)
if target_tokens:
state = self._init_decoder_state(state)
# The `_forward_loop` decodes the input sequence and computes the loss during training
# and validation.
output_dict = self._forward_loop(state, target_tokens, target_source_token_map)
else:
output_dict = {}
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
output_dict.update({"source_token_ids": source_tokens['tokens']})
if target_tokens:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = self.map_predictions(top_k_predictions[:, 0, :], source_tokens['tokens'], meta_field)
if self._bleu:
self._bleu(best_predictions, target_tokens["tokens"])
if self._seq_metric:
self._seq_metric(
best_predictions.float(),
gold_labels=target_tokens["tokens"][:, 1:].float(),
mask=util.get_text_field_mask(
target_tokens).float()[:, 1:]
)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Finalize predictions.
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives
within the ``forward`` method.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
# Beam search gives us the top k results for each source sentence in the batch
# but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
# Collect indices till the first end_symbol
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
predicted_tokens = list()
for x in indices:
if x < self._num_classes:
predicted_tokens.append(self.vocab.get_token_from_index(x, namespace=self._target_namespace))
else:
source_idx = x - self._num_classes
text = "@@copy@@%d" % int(source_idx)
token = Token(text)
# source_token_id = int(output_dict['source_token_ids'][0][source_idx])
# token = self.vocab.get_token_from_index(source_token_id, self._source_namespace)
predicted_tokens.append(token)
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
embedded_input = self._encoder_input_dropout(embedded_input)
encoder_outputs = self._encoder(embedded_input, source_mask)
encoder_outputs = self._encoder_output_dropout(encoder_outputs)
return {
"source_token_ids": source_tokens['tokens'],
"source_mask": source_mask,
"encoder_outputs": encoder_outputs,
}
def _init_decoder_state(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
batch_size = state["source_mask"].size(0)
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
state["encoder_outputs"],
state["source_mask"],
self._encoder.is_bidirectional())
# Initialize the decoder hidden state with the final output of the encoder.
# shape: (batch_size, decoder_output_dim)
state["decoder_hidden"] = self._transform_decoder_init_state(final_encoder_output)
# shape: (batch_size, decoder_output_dim)
state["decoder_context"] = state["encoder_outputs"].new_zeros(batch_size, self._decoder_output_dim)
if self._feed_output_attention_to_decoder:
state["attention_context"] = state["encoder_outputs"].new_zeros(batch_size, self._encoder_output_dim)
return state
def _forward_loop(self,
state: Dict[str, torch.Tensor],
target_tokens: Dict[str, torch.LongTensor] = None,
target_source_token_map: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
"""
Make forward pass during training or do greedy search during prediction.
Notes
-----
We really only use the predictions from the method to test that beam search
with a beam size of 1 gives the same results.
"""
# shape: (batch_size, max_input_sequence_length)
source_mask = state["source_mask"]
batch_size = source_mask.size()[0]
if target_tokens:
# shape: (batch_size, max_target_sequence_length)
targets = target_tokens["tokens"]
_, target_sequence_length = targets.size()
# The last input from the target is either padding or the end symbol.
# Either way, we don't have to process it.
num_decoding_steps = target_sequence_length - 1
else:
num_decoding_steps = self._max_decoding_steps
# Initialize target predictions with the start index.
# shape: (batch_size,)
last_predictions = source_mask.new_full((batch_size,), fill_value=self._start_index)
step_logits: List[torch.Tensor] = []
step_predictions: List[torch.Tensor] = []
for timestep in range(num_decoding_steps):
if self.training and torch.rand(1).item() < self._scheduled_sampling_ratio:
# Use gold tokens at test time and at a rate of 1 - _scheduled_sampling_ratio
# during training.
# shape: (batch_size,)
input_choices = last_predictions
elif not target_tokens:
# shape: (batch_size,)
input_choices = last_predictions
else:
# shape: (batch_size,)
input_choices = targets[:, timestep]
# shape: (batch_size, num_classes + max_input_sequence_length)
output_projections, state = self._prepare_output_projections(input_choices, state)
# list of tensors, shape: (batch_size, 1, num_classes + max_input_sequence_length)
step_logits.append(output_projections.unsqueeze(1))
# (batch_size, num_classes + max_input_sequence_length)
normalization_mask = torch.cat([source_mask.new_ones((batch_size, self._num_classes)),
source_mask], dim=-1)
class_probabilities = util.masked_softmax(output_projections, normalization_mask, dim=-1)
# shape (predicted_classes): (batch_size,)
_, predicted_classes = torch.max(class_probabilities, 1)
# shape (predicted_classes): (batch_size,)
last_predictions = predicted_classes
step_predictions.append(last_predictions.unsqueeze(1))
# shape: (batch_size, num_decoding_steps)
predictions = torch.cat(step_predictions, 1)
output_dict = {"predictions": predictions}
if target_tokens:
# shape: (batch_size, num_decoding_steps, num_classes + max_input_sequence_length)
logits = torch.cat(step_logits, 1)
# Compute loss.
target_mask = util.get_text_field_mask(target_tokens)
loss = self._get_loss(logits, targets, target_mask, target_source_token_map)
output_dict["loss"] = loss
return output_dict
def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Make forward pass during prediction using a beam search."""
batch_size = state["source_mask"].size()[0]
start_predictions = state["source_mask"].new_full((batch_size,), fill_value=self._start_index)
# shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)
# shape (log_probabilities): (batch_size, beam_size)
all_top_k_predictions, log_probabilities = self._beam_search.search(
start_predictions, state, self.take_step)
output_dict = {
"class_log_probabilities": log_probabilities,
"predictions": all_top_k_predictions,
}
return output_dict
def _prepare_output_attended_input(self,
decoder_hidden_state: torch.Tensor = None,
encoder_outputs: torch.Tensor = None,
encoder_outputs_mask: torch.LongTensor = None) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""Apply ouput attention over encoder outputs and decoder state."""
# Ensure mask is also a FloatTensor. Or else the multiplication within
# attention will complain.
# shape: (batch_size, max_input_sequence_length)
encoder_outputs_mask = encoder_outputs_mask.float()
# shape: (batch_size, max_input_sequence_length)
input_weights = self._attention(
decoder_hidden_state, encoder_outputs, encoder_outputs_mask)
normalized_weights = util.masked_softmax(input_weights, encoder_outputs_mask)
# shape: (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, normalized_weights)
return attended_input, input_weights
def _get_loss(self,
logits: torch.FloatTensor,
targets: torch.LongTensor,
target_mask: torch.LongTensor,
target_source_token_map: torch.Tensor) -> torch.Tensor:
"""
Compute loss.
Takes logits (unnormalized outputs from the decoder) of size (batch_size,
num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)
and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross
entropy loss while taking the mask into account.
The length of ``targets`` is expected to be greater than that of ``logits`` because the
decoder does not need to compute the output corresponding to the last timestep of
``targets``. This method aligns the inputs appropriately to compute the loss.
``target_source_token_map``: (batch_size, target_length, source_length)
During training, we want the logit corresponding to timestep i to be similar to the target
token from timestep i + 1. That is, the targets should be shifted by one timestep for
appropriate comparison. Consider a single example where the target has 3 words, and
padding is to 7 tokens.
The complete sequence would correspond to <S> w1 w2 w3 <E> <P> <P>
and the mask would be 1 1 1 1 1 0 0
and let the logits be l1 l2 l3 l4 l5 l6
We actually need to compare:
the sequence w1 w2 w3 <E> <P> <P>
with masks 1 1 1 1 0 0
against l1 l2 l3 l4 l5 l6
(where the input was) <S> w1 w2 w3 <E> <P>
"""
# shape: (batch_size, num_decoding_steps)
relevant_targets = targets[:, 1:].contiguous()
batch_size, num_decoding_steps = relevant_targets.size()
# shape: (batch_size, num_decoding_steps)
relevant_mask = target_mask[:, 1:].contiguous()
# shape: (batch_size, num_decoding_steps, source_length)
target_source_token_map = target_source_token_map[:, 1:, :]
probs = F.softmax(logits, dim=-1)
# (batch_size * num_decoding_steps, num_classes)
generate_probs_flat = probs[:, :, :self._num_classes].view(-1, self._num_classes)
relevant_targets_flat = relevant_targets.view(-1, 1).long()
# (batch_size, num_decoding_steps)
generate_probs = torch.gather(generate_probs_flat, dim=1, index=relevant_targets_flat).reshape(batch_size,
num_decoding_steps)
# (batch_size, num_decoding_steps)
copy_probs = (probs[:, :, self._num_classes:] * target_source_token_map).sum(dim=-1)
target_log_probs = torch.log(generate_probs + copy_probs + 1e-13)
target_log_probs *= relevant_mask.float()
negative_log_likelihood = -1 * target_log_probs
weights_batch_sum = relevant_mask.sum(-1).float()
per_batch_loss = negative_log_likelihood.sum(dim=1) / (weights_batch_sum + 1e-13)
num_non_empty_sequences = ((weights_batch_sum > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._bleu:
all_metrics.update(self._bleu.get_metric(reset=reset))
if self._seq_metric:
all_metrics.update(
{"accuracy": self._seq_metric.get_metric(reset)['accuracy']})
return all_metrics
def map_predictions(self, predictions: torch.LongTensor,
source_token_ids: torch.LongTensor,
meta_field: List[Dict]) -> torch.LongTensor:
"""
Map those copy indices to target idx
:return:
"""
batch_size, max_length = predictions.size()
mapped_predictions = predictions.new_full((batch_size,max_length), fill_value=self._pad_index)
for i in range(batch_size):
source_tokens_to_copy = meta_field[i]['source_tokens_to_copy']
for j in range(max_length):
idx = predictions[i, j]
if idx < self._num_classes:
mapped_predictions[i, j] = idx
else:
# Copy
source_idx = idx - self._num_classes
if source_idx > len(source_tokens_to_copy):
tid = self._pad_index
else:
token = source_tokens_to_copy[source_idx]
# source_token_id = int(source_token_ids[i, source_idx])
# token = self.vocab.get_token_from_index(source_token_id, self._source_namespace)
tid = self.vocab.get_token_index(token, self._target_namespace)
mapped_predictions[i, j] = tid
return mapped_predictions.long()
| 29,535 | 47.182708 | 137 |
py
|
Unimer
|
Unimer-master/neural_models/seq2seq_model.py
|
# coding=utf8
import torch
from overrides import overrides
from typing import Dict, List, Tuple
from allennlp.training.metrics import Metric
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util
from allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.models.encoder_decoders.simple_seq2seq import SimpleSeq2Seq
from allennlp.modules.similarity_functions import SimilarityFunction
class Seq2SeqModel(SimpleSeq2Seq):
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
max_decoding_steps: int,
attention: Attention = None,
attention_function: SimilarityFunction = None,
beam_size: int = None,
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.,
use_bleu: bool = True,
seq_metrics=None) -> None:
self._seq_metric = seq_metrics
super(Seq2SeqModel, self).__init__(
vocab,
source_embedder,
encoder,
max_decoding_steps,
attention,
attention_function,
beam_size,
target_namespace,
target_embedding_dim,
scheduled_sampling_ratio,
use_bleu)
@overrides
def forward(self, # type: ignore
source_tokens: Dict[str, torch.LongTensor],
target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:
state = self._encode(source_tokens)
if target_tokens:
state = self._init_decoder_state(state)
# The `_forward_loop` decodes the input sequence and computes the loss during training
# and validation.
output_dict = self._forward_loop(state, target_tokens)
else:
output_dict = {}
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
if self._bleu:
self._bleu(best_predictions, target_tokens["tokens"])
if self._seq_metric:
self._seq_metric(
best_predictions.float(),
gold_labels=target_tokens["tokens"][:, 1:].float(),
mask=util.get_text_field_mask(
target_tokens).float()[:, 1:]
)
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._bleu:
all_metrics.update(self._bleu.get_metric(reset=reset))
if self._seq_metric:
all_metrics.update(
{"accuracy": self._seq_metric.get_metric(reset)['accuracy']})
return all_metrics
| 3,441 | 37.244444 | 98 |
py
|
Unimer
|
Unimer-master/neural_models/utils.py
|
# coding=utf8
import numpy
import torch
from typing import List
def has_nan(x: torch.Tensor) -> bool:
return torch.isnan(x).any()
def matrix_cosine_similarity(x: torch.Tensor, y: torch.Tensor, eps: float=1e-8):
"""
:param x (batch_size, length_1, dim)
:param y (batch_size, length_2, dim)
:return
(batch_size, length_1, length_2)
"""
length_1, length_2 = x.size(1), y.size(1)
# shape: (batch_size, length_1, length_2)
dot_product = x.bmm(y.permute(0, 2, 1))
# shape: (batch_size, length_1), (batch_size, length_2)
x_norm, y_norm = x.norm(dim=-1, p=None), y.norm(dim=-1, p=None)
# added eps for numerical stability
x_norm = torch.max(x_norm, eps * x_norm.new_ones(x_norm.size()))
y_norm = torch.max(y_norm, eps * y_norm.new_ones(y_norm.size()))
expanded_x_norm = x_norm.unsqueeze(-1).repeat(1, 1, length_2)
expanded_y_norm = y_norm.unsqueeze(1).repeat(1, length_1, 1)
# shape: (batch_size, length_1, length_2)
norm = expanded_x_norm * expanded_y_norm
similarity = dot_product / norm
return similarity
def get_one_hot_mask(num_classes: int, ids: List):
targets = numpy.array(ids, dtype=int)
one_hot = numpy.eye(num_classes)[targets]
return torch.from_numpy(one_hot.sum(0))
| 1,279 | 31.820513 | 80 |
py
|
Unimer
|
Unimer-master/neural_models/GNN.py
|
# coding=utf8
import numpy
import torch
import torch.nn as nn
from allennlp.models.model import Model
from allennlp.data.tokenizers import Token
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.training.metrics import Metric
from allennlp.nn import util
from overrides import overrides
from typing import Dict, List, Union, Tuple
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from . import utils as nn_utils
from .modules.gnn_encoder import GNNTransformerEncoderLayer, GNNTransformerEncoder, \
GNNTransformerDecoderLayer, GNNTransformerDecoder, get_decode_edge_mask
class GNNCopyTransformer(Model):
"""
Transformer-based Seq2Seq Model
"""
def __init__(self, vocab: Vocabulary,
source_namespace: str,
target_namespace: str,
segment_namespace: str,
max_decoding_step: int,
token_based_metric: Metric,
source_embedding_dim: int = 256,
target_embedding_dim: int = 256,
encoder_d_model: int = 512,
decoder_d_model: int = 512,
encoder_nhead: int = 8,
decoder_nhead: int = 8,
num_encoder_layers: int = 6,
num_decoder_layers: int = 6,
encoder_dim_feedforward: int = 2048,
decoder_dim_feedforward: int = 2048,
dropout: float = 0.1,
beam_size: int = 1,
token_symbol: str = '@token@',
non_func_symbol: str = "@nonfunc@",
nlabels: int = 0,
max_decode_clip_range: int = 0,
encode_edge_label_with_matrix: bool = True,
is_test: bool = False,
):
super().__init__(vocab)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._segment_namespace = segment_namespace
self._src_start_index = self.vocab.get_token_index(START_SYMBOL, self._source_namespace)
self._src_end_index = self.vocab.get_token_index(END_SYMBOL, self._source_namespace)
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._oov_index = self.vocab.get_token_index(self.vocab._oov_token,
self._target_namespace) # pylint: disable=protected-access
self._pad_index = self.vocab.get_token_index(self.vocab._padding_token,
self._target_namespace)
self._token_index = self.vocab.get_token_index(token_symbol, self._segment_namespace)
self._non_func_symbol_index = self.vocab.get_token_index(non_func_symbol, self._segment_namespace)
self._segment_pad_index = self.vocab.get_token_index(self.vocab._padding_token, self._segment_namespace)
# Source Embedding
num_source_words = self.vocab.get_vocab_size(self._source_namespace)
self._use_glove = False
self._source_embedding = Embedding(num_source_words, source_embedding_dim)
# Segments
num_segment_types = self.vocab.get_vocab_size(self._segment_namespace)
segment_embedding = Embedding(num_segment_types, source_embedding_dim)
self._segment_embedder = BasicTextFieldEmbedder({'tokens': segment_embedding})
num_classes = self.vocab.get_vocab_size(self._target_namespace)
self._num_classes = num_classes
self._target_embedder = Embedding(num_classes, target_embedding_dim)
# Encoder
self._nlabels = nlabels # number of edge labels
if self._nlabels == 0:
self._use_gnn_encoder = False
encoder_layer = nn.TransformerEncoderLayer(encoder_d_model, encoder_nhead, encoder_dim_feedforward, dropout)
encoder_norm = nn.LayerNorm(encoder_d_model)
self._encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
else:
self._use_gnn_encoder = True
print("Use GNN Encoder")
encoder_layer = GNNTransformerEncoderLayer(d_model=encoder_d_model, nhead=encoder_nhead,
dim_feedforward=encoder_dim_feedforward,
dropout=dropout, nlabels=self._nlabels,
is_matrix=encode_edge_label_with_matrix)
encoder_norm = nn.LayerNorm(encoder_d_model)
self._encoder = GNNTransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
# Decoder
self._max_decode_clip_range = max_decode_clip_range
if max_decode_clip_range == 0:
self._decode_nlabels = 0
self._decode_use_relative_position = False
decoder_layer = nn.TransformerDecoderLayer(decoder_d_model, decoder_nhead, decoder_dim_feedforward, dropout)
decoder_norm = nn.LayerNorm(decoder_d_model)
self._decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
else:
print("Use GNN Decoder")
self._decode_nlabels = self._max_decode_clip_range + 1
self._decode_use_relative_position = True
decoder_layer = GNNTransformerDecoderLayer(d_model=decoder_d_model, nhead=decoder_nhead,
dim_feedforward=decoder_dim_feedforward,
dropout=dropout, nlabels=self._decode_nlabels,
is_matrix=encode_edge_label_with_matrix)
decoder_norm = nn.LayerNorm(decoder_d_model)
self._decoder = GNNTransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
# Decode Gate
self.gate_linear = nn.Linear(decoder_d_model, 1)
self.copy_word_prj = nn.Linear(decoder_d_model, encoder_d_model, bias=False)
self._source_embedding_dim = source_embedding_dim
self._target_embedding_dim = target_embedding_dim
self._encoder_d_model = encoder_d_model
self._decoder_d_model = decoder_d_model
self._encoder_nhead = encoder_nhead
self._decoder_nhead = decoder_nhead
self._max_decoding_step = max_decoding_step
self._token_based_metric = token_based_metric
self._beam_size = beam_size
self._is_test = is_test
self._reset_parameters()
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
@overrides
def forward(self,
source_tokens: Dict[str, torch.LongTensor],
segments: Dict[str, torch.LongTensor],
source_entity_length: torch.LongTensor,
edge_mask: torch.Tensor,
copy_targets: torch.Tensor = None,
generate_targets: torch.Tensor = None,
target_tokens: Dict[str, torch.LongTensor] = None,
meta_field: Dict = None,
) -> Dict[str, torch.Tensor]:
assert self._nlabels == edge_mask.size(1)
state = self._encode(source_tokens, segments, source_entity_length, edge_mask)
if self.training:
state = self._train_decode(state, target_tokens, generate_targets)
# shape: (batch_size, decode_length, d_model)
generate_mask = state["generate_mask"]
decoder_outputs = state["decoder_outputs"]
decode_length = decoder_outputs.size(1)
# Generate scores
# shape: (batch_size, decode_length, num_classes)
generate_scores = self.get_generate_scores(decoder_outputs)
# shape: (batch_size, encode_length)
entity_mask = 1 - ((segments['tokens'] == self._token_index) |
(segments['tokens'] == self._non_func_symbol_index) |
(segments['tokens'] == self._segment_pad_index)).float()
entity_mask = entity_mask.unsqueeze(1).repeat(1, decode_length, 1)
# shape: (batch_size, decode_length, encode_length)
copy_scores = self.get_copy_scores(state, decoder_outputs)
# shape: (batch_size, decode_length, 1)
# generate_gate = F.sigmoid(self.gate_linear(decoder_outputs))
# copy_gate = 1 - generate_gate
scores = torch.cat((generate_scores, copy_scores), dim=-1)
# scores = torch.cat((generate_scores, copy_scores), dim=-1)
# shape: (batch_size, decode_length, num_classes + encode_length)
score_mask = torch.cat((entity_mask.new_ones((copy_scores.size(0), decode_length, self._num_classes)),
entity_mask), dim=-1)
class_probabilities = util.masked_softmax(scores, mask=score_mask, dim=-1)
_, predicted_classes = torch.max(class_probabilities, dim=-1, keepdim=False)
targets = target_tokens["tokens"]
target_mask = state["target_mask"]
# shape: (batch_size, max_target_sequence_length)
loss = self._get_loss(class_probabilities, targets, generate_mask, copy_targets, target_mask)
output_dict = {"predictions": predicted_classes, "loss": loss}
predictions = output_dict["predictions"]
pmask = (predictions < self._num_classes).long()
_predictions = predictions * pmask + (predictions - self._num_classes) * (1 - pmask)
target_labels = self._get_target_labels(target_tokens["tokens"], generate_targets)
target_mask = util.get_text_field_mask(target_tokens)
self._token_based_metric(_predictions, gold_labels=target_labels[:, 1:], mask=target_mask[:, 1:])
else:
output_dict = self._eval_decode(state, segments)
if target_tokens:
predictions = output_dict["predictions"]
pmask = (predictions < self._num_classes).long()
_predictions = predictions * pmask + (predictions - self._num_classes) * (1 - pmask)
target_labels = self._get_target_labels(target_tokens["tokens"], generate_targets)
target_mask = util.get_text_field_mask(target_tokens)
self._token_based_metric(_predictions[:, 1:], gold_labels=target_labels[:, 1:], mask=target_mask[:, 1:])
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Finalize predictions.
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives
within the ``forward`` method.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
# Beam search gives us the top k results for each source sentence in the batch
# but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
predicted_tokens = list()
for x in indices:
if x in [self._end_index, self._start_index, self._pad_index]:
continue
if x >= self._num_classes:
index = x - self._num_classes
predicted_tokens.append(Token("@entity_%d" % index))
else:
w = self.vocab.get_token_from_index(x, namespace=self._target_namespace)
predicted_tokens.append(w)
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def _embed_source(self, source_tokens: Dict[str, torch.Tensor], source_entity_length: torch.LongTensor):
"""
:param source_tokens
:param source_entity_length: (batch_size, max_token_num)
:return
(batch_size, max_token_num, embedding_dim)
"""
token_ids = source_tokens['tokens']
embedded = self._source_embedding(token_ids)
batched_embedded = list()
embedding_dim = embedded.size(-1)
batch_size, max_token_num = source_entity_length.size()
for _embedded, _length in zip(embedded, source_entity_length.long()):
merged_embedded_input = list()
idx = 0
for length in _length:
if length > 0:
embedding = torch.mean(_embedded[idx:idx + length, :], dim=0)
merged_embedded_input.append(embedding)
idx += length
else:
break
merged_embedded_input = torch.stack(merged_embedded_input, dim=0)
pad_num = max_token_num - merged_embedded_input.size(0)
if pad_num > 0:
merged_embedded_input = torch.cat((merged_embedded_input,
merged_embedded_input.new_zeros([pad_num, embedding_dim])), dim=0)
batched_embedded.append(merged_embedded_input)
# shape: (batch_size, max_token_num, embedding_dim)
batched_embedded = torch.stack(batched_embedded, dim=0)
assert batched_embedded.size(0) == embedded.size(0) and batched_embedded.size(1) == source_entity_length.size(1)
# TODO: Dropout
return batched_embedded
def _encode(self, source_tokens: Dict[str, torch.Tensor], segments: Dict[str, torch.Tensor],
source_entity_length: torch.Tensor, edge_mask: torch.Tensor, ) -> Dict[str, torch.Tensor]:
"""
:param source_tokens:
:param segments:
:param merge_indicators:
:return:
"""
# shape: (batch_size, encode_length, embedding_dim)
source_embedded_input = self._embed_source(source_tokens, source_entity_length)
# shape: (batch_size, encode_length, embedding_dim)
segments_embedded_input = self._segment_embedder(segments)
encode_length = segments_embedded_input.size(1)
assert source_embedded_input.size(1) == segments_embedded_input.size(1)
# token_mask = (segments['tokens'] == self._token_index).unsqueeze(-1).float()
# valid_token_embedded_input = batched_embedded_input * token_mask
# valid_token_embedded_input = util.add_positional_features(valid_token_embedded_input)
# valid_token_embedded_input = batched_embedded_input * (1 - token_mask) + valid_token_embedded_input * token_mask
if self._source_embedding_dim == self._encoder_d_model:
batched_embedded_input = segments_embedded_input + source_embedded_input
final_embedded_input = util.add_positional_features(batched_embedded_input)
else:
batched_embedded_input = torch.cat([source_embedded_input, segments_embedded_input], dim=-1)
final_embedded_input = util.add_positional_features(batched_embedded_input)
# shape: (encode_length, batch_size, d_model)
final_embedded_input = final_embedded_input.permute(1, 0, 2)
# shape: (batch_size, encode_length)
source_mask = util.get_text_field_mask(segments)
source_key_padding_mask = (1 - source_mask.byte()).bool()
if not self._use_gnn_encoder:
# shape: (encode_length, batch_size, d_model)
encoder_outputs = self._encoder(final_embedded_input, src_key_padding_mask=source_key_padding_mask)
else:
# GNN encoders
encoder_outputs = self._encoder(src=final_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
padding_mask=source_key_padding_mask)
source_token_mask = (segments['tokens'] == self._token_index).float()
return {
"source_mask": source_mask,
"source_key_padding_mask": source_key_padding_mask,
"source_token_mask": source_token_mask,
"encoder_outputs": encoder_outputs,
"source_embedded": batched_embedded_input,
"source_raw_embedded": source_embedded_input,
}
def _train_decode(self, state: Dict[str, torch.Tensor],
target_tokens: [str, torch.Tensor],
generate_targets: torch.Tensor) -> Dict[str, torch.Tensor]:
encoder_outputs = state["encoder_outputs"]
source_key_padding_mask = state["source_key_padding_mask"]
# shape: (batch_size, encode_length, d_model)
source_embedded = state["source_raw_embedded"]
batch_size, _, _ = source_embedded.size()
basic_index = torch.arange(batch_size).to(source_embedded.device).long()
generate_targets = generate_targets.long()
retrieved_target_embedded_input = source_embedded[basic_index.unsqueeze(1), generate_targets][:, :-1, :]
target_embedded_input = self._target_embedder(target_tokens['tokens'])[:, :-1, :]
# shape: (batch_size, max_decode_length)
# where 1 indicates that the target token is generated rather than copied
generate_mask = (generate_targets == 0).float()
target_embedded_input = target_embedded_input * generate_mask[:, :-1].unsqueeze(-1) \
+ retrieved_target_embedded_input * (1 - generate_mask)[:, :-1].unsqueeze(-1)
target_embedded_input = util.add_positional_features(target_embedded_input)
# shape: (max_target_sequence_length - 1, batch_size, d_model)
target_embedded_input = target_embedded_input.permute(1, 0, 2)
# shape: (batch_size, max_target_sequence_length - 1)
"""
key_padding_mask should be a ByteTensor where True values are positions
that should be masked with float('-inf') and False values will be unchanged.
"""
target_mask = util.get_text_field_mask(target_tokens)[:, 1:]
target_key_padding_mask = (1 - target_mask.byte()).bool()
assert target_key_padding_mask.size(1) == target_embedded_input.size(0) and \
target_embedded_input.size(1) == target_key_padding_mask.size(0)
max_target_seq_length = target_key_padding_mask.size(1)
target_additive_mask = (torch.triu(
target_mask.new_ones(max_target_seq_length, max_target_seq_length)) == 1).transpose(0, 1)
target_additive_mask = target_additive_mask.float().masked_fill(target_additive_mask == 0, float('-inf'))
target_additive_mask = target_additive_mask.masked_fill(target_additive_mask == 1, float(0.0))
assert target_embedded_input.size(1) == encoder_outputs.size(1)
source_token_mask = state["source_token_mask"]
memory_key_padding_mask = (1 - source_token_mask).bool()
# memory_key_padding_mask = source_key_padding_mask
if not self._decode_use_relative_position:
# shape: (max_target_sequence_length, batch_size, d_model)
decoder_outputs = self._decoder(target_embedded_input, memory=encoder_outputs,
tgt_mask=target_additive_mask, tgt_key_padding_mask=None,
memory_key_padding_mask=memory_key_padding_mask)
else:
# gnn decoder
edge_mask = get_decode_edge_mask(target_embedded_input,
max_decode_clip_range=self._max_decode_clip_range)
batch_size = edge_mask.size(0)
tgt_padding_mask = torch.tril(edge_mask.new_ones([max_target_seq_length, max_target_seq_length]),
diagonal=0)
tgt_padding_mask = (1 - (tgt_padding_mask.unsqueeze(0).repeat(batch_size, 1, 1))).float()
decoder_outputs = self._decoder(target_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
memory=encoder_outputs, tgt_padding_mask=tgt_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
# shape: (batch_size, max_target_sequence_length, d_model)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
state.update({
"decoder_outputs": decoder_outputs,
"target_key_padding_mask": target_key_padding_mask,
"target_mask": target_mask,
"generate_mask": generate_mask
})
return state
def _eval_decode(self, state: Dict[str, torch.Tensor],
segments: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
encoder_outputs = state["encoder_outputs"]
source_key_padding_mask = state["source_key_padding_mask"]
source_embedded = state["source_raw_embedded"]
source_token_mask = state["source_token_mask"]
memory_key_padding_mask = (1 - source_token_mask).bool()
# memory_key_padding_mask = source_key_padding_mask
batch_size = source_key_padding_mask.size(0)
encode_length = source_key_padding_mask.size(1)
log_probs_after_end = encoder_outputs.new_full((batch_size, self._num_classes + encode_length),
fill_value=float("-inf"))
log_probs_after_end[:, self._end_index] = 0.
start_predictions = state["source_mask"].new_full((batch_size, 1), fill_value=self._start_index)
partial_generate_predictions = start_predictions
partial_copy_predictions = state["source_mask"].new_zeros((batch_size, 1))
basic_index = torch.arange(batch_size).to(source_embedded.device).unsqueeze(1).long()
generate_mask = state["source_mask"].new_ones((batch_size, 1)).float()
# shape: (batch_size)
last_prediction = start_predictions.squeeze(1)
for _ in range(self._max_decoding_step):
# shape: (batch_size, partial_len, d_model)
partial_source_embedded_input = source_embedded[basic_index, partial_copy_predictions]
partial_target_embedded_input = self._target_embedder(partial_generate_predictions)
partial_embedded_input = partial_target_embedded_input * generate_mask.unsqueeze(-1) \
+ partial_source_embedded_input * (1 - generate_mask).unsqueeze(-1)
partial_embedded_input = util.add_positional_features(partial_embedded_input)
partial_len = partial_embedded_input.size(1)
partial_embedded_input = partial_embedded_input.permute(1, 0, 2)
mask = (torch.triu(state["source_mask"].new_ones(partial_len, partial_len)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
if not self._decode_use_relative_position:
# shape: (partial_len, batch_size, d_model)
outputs = self._decoder(partial_embedded_input, memory=encoder_outputs,
tgt_mask=mask, memory_key_padding_mask=memory_key_padding_mask)
else:
# gnn decoder
edge_mask = get_decode_edge_mask(partial_embedded_input,
max_decode_clip_range=self._max_decode_clip_range)
tgt_padding_mask = torch.tril(edge_mask.new_ones([partial_len, partial_len]), diagonal=0)
tgt_padding_mask = (1 - tgt_padding_mask.unsqueeze(0).repeat(batch_size, 1, 1)).float()
# shape: (partial_len, batch_size, d_model)
outputs = self._decoder(partial_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
memory=encoder_outputs, tgt_padding_mask=tgt_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
outputs = outputs.permute(1, 0, 2)
# shape: (batch_size, d_model)
curr_outputs = outputs[:, -1, :]
# shape: (batch_size, num_classes)
generate_scores = self.get_generate_scores(curr_outputs)
# shape: (batch_size, encode_length)
copy_scores = self.get_copy_scores(state, curr_outputs.unsqueeze(1)).squeeze(1)
# Gate
# shape: (batch_size, 1)
# generate_gate = F.sigmoid(self.gate_linear(curr_outputs))
# copy_gate = 1 - generate_gate
scores = torch.cat((generate_scores, copy_scores), dim=-1)
# scores = torch.cat((generate_scores, copy_scores), dim=-1)
# shape: (batch_size, encode_length)
entity_mask = 1 - ((segments['tokens'] == self._token_index) |
(segments['tokens'] == self._non_func_symbol_index) |
(segments['tokens'] == self._segment_pad_index)).float()
# shape: (batch_size, num_classes + encode_length)
score_mask = torch.cat((entity_mask.new_ones((batch_size, self._num_classes)), entity_mask), dim=-1)
# shape: (batch_size, num_classes + encode_length)
normalized_scores = util.masked_softmax(scores, mask=score_mask, dim=-1)
last_prediction_expanded = last_prediction.unsqueeze(-1).expand(
batch_size, self._num_classes + encode_length
)
# shape: (batch_size, num_classes + encode_length)
cleaned_logits = torch.where(
last_prediction_expanded == self._end_index,
log_probs_after_end,
normalized_scores
)
# shape: (batch_size)
_, predicted = torch.max(input=cleaned_logits, dim=1, keepdim=False)
copy_mask = (predicted >= self._num_classes).long()
generate_predicted = predicted * (1 - copy_mask)
copy_predicted = (predicted - self._num_classes) * copy_mask
partial_copy_predictions = torch.cat((partial_copy_predictions, copy_predicted.unsqueeze(1)), dim=1)
partial_generate_predictions = torch.cat((partial_generate_predictions, generate_predicted.unsqueeze(1)),
dim=1)
generate_mask = torch.cat((generate_mask, (1 - copy_mask).unsqueeze(1).float()), dim=1)
last_prediction = predicted
if (last_prediction == self._end_index).sum() == batch_size:
break
predictions = partial_generate_predictions * generate_mask.long() + \
(1 - generate_mask).long() * (partial_copy_predictions + self._num_classes)
# shape: (batch_size, partial_len)
output_dict = {
"predictions": predictions
}
return output_dict
def get_copy_scores(self, state: Dict[str, torch.Tensor],
query: torch.Tensor) -> torch.Tensor:
"""
:param state:
:param query: (batch_size, length, d_model)
:return:
"""
# shape: (batch_size, encode_length, d_model)
encoder_outputs = state["encoder_outputs"].permute(1, 0, 2)
return self.copy_word_prj(query).bmm(encoder_outputs.permute(0, 2, 1))
def get_generate_scores(self, query: torch.Tensor) -> torch.Tensor:
"""
:param query: (batch_size, length, d_model)
:return:
"""
return F.linear(query, self._target_embedder.weight)
def _get_loss(self, scores: torch.Tensor,
targets: torch.LongTensor,
generate_mask: torch.LongTensor,
copy_mask: torch.LongTensor,
target_mask: torch.LongTensor) -> torch.Tensor:
"""
:param scores: (batch_size, decode_length, num_class + encode_length)
:param targets: (batch_size, decode_length + 1)
:param generate_mask: (batch_size, decode_length + 1), where 1.0 indicates the target word is selected from target
vocabulary, 0.0 indicates the target is copied from entity candidates
:param copy_mask: (batch_size, decode_length + 1, encode_length), where 1.0 indicates that the target word
is copied from this source word
:param target_mask: (batch_size, decode_length)
:return:
"""
batch_size, decode_length, _ = scores.size()
# (batch_size, decode_length, num_class)
generate_scores = scores[:, :, :self._num_classes]
# (batch_size, decode_length, encode_length)
copy_scores = scores[:, :, self._num_classes:]
# shape: (batch_size * decode_length, 1)
relevant_targets = targets[:, 1:].contiguous().view(-1, 1)
target_generate_scores = torch.gather(
generate_scores.view(-1, self._num_classes), dim=1, index=relevant_targets)
target_scores = target_generate_scores.view(batch_size, decode_length)
target_scores = target_scores * generate_mask[:, 1:]
target_scores += (copy_scores * copy_mask[:, 1:, :].float()).sum(dim=-1)
# shape: (batch_size, decode_length)
relevant_mask = target_mask.contiguous().float()
loss = - target_scores.log() * relevant_mask
loss = loss.sum(dim=-1) / relevant_mask.sum(dim=-1)
loss = loss.sum() / batch_size
return loss
def _get_target_labels(self, target_token_ids: torch.Tensor, generate_targets: torch.Tensor):
"""
:param target_token_ids: [batch_size, decode_length]
:param generate_targets: [batch_size, decode_length]
:return:
[batch_size, decode_length]
"""
generate_mask = (generate_targets == 0.0).long()
labels = target_token_ids * generate_mask + generate_targets.long() * (1 - generate_mask)
return labels
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return self._token_based_metric.get_metric(reset)
def _get_predicted_tokens(self,
source_tokens: Dict[str, torch.LongTensor],
predicted_indices: Union[torch.Tensor, numpy.ndarray],
meta_field: List[Dict]):
"""
Convert predicted indices into tokens.
If `n_best = 1`, the result type will be `List[List[str]]`. Otherwise the result
type will be `List[List[List[str]]]`.
"""
# shape: (batch_size, encode_length)
source_token_ids = source_tokens['tokens']
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
predicted_tokens: List[Union[List[List[str]], List[str]]] = []
predicted_abstract_tokens: List[Union[List[List[str]], List[str]]] = []
for bidx, top_k_predictions in enumerate(predicted_indices):
batch_predicted_tokens: List[List[str]] = []
batch_predicted_abstract_tokens: List[List[str]] = []
pseudo_tokens = meta_field[bidx]['pseudo_tokens']
for indices in top_k_predictions:
indices = list(indices)
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
tokens = list()
abstract_tokens = list()
for x in indices:
if x in [self._end_index, self._start_index, self._pad_index]:
continue
if x >= self._num_classes:
index = x - self._num_classes
# source_word = "@entity_%d" % index
source_word = pseudo_tokens[index]
tokens.append(source_word)
abstract_tokens.append("@entity_%d" % index)
else:
w = self.vocab.get_token_from_index(x, namespace=self._target_namespace)
tokens.append(w)
abstract_tokens.append(w)
batch_predicted_tokens.append(tokens)
batch_predicted_abstract_tokens.append(abstract_tokens)
predicted_tokens.append(batch_predicted_tokens)
predicted_abstract_tokens.append(batch_predicted_abstract_tokens)
return predicted_tokens, predicted_abstract_tokens
def _get_target_tokens(self, target_token_ids: Union[torch.Tensor, numpy.ndarray]) -> List[List[str]]:
if not isinstance(target_token_ids, numpy.ndarray):
_target_token_ids = target_token_ids.detach().cpu().numpy()
else:
_target_token_ids = target_token_ids
tokens = list()
for ids in _target_token_ids:
_tokens = [self.vocab.get_token_from_index(x, namespace=self._target_namespace) for x in ids
if x not in [self._end_index, self._start_index, self._pad_index]]
tokens.append(_tokens)
return tokens
| 33,952 | 50.057143 | 122 |
py
|
Unimer
|
Unimer-master/neural_models/GNN2.py
|
# coding=utf8
import numpy
import torch
import torch.nn as nn
from allennlp.models.model import Model
from allennlp.data.tokenizers import Token
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.training.metrics import Metric
from allennlp.nn import util
from overrides import overrides
from typing import Dict, List, Union, Tuple
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from . import utils as nn_utils
from .modules.gnn_encoder import GNNTransformerEncoderLayer, GNNTransformerEncoder, \
GNNTransformerDecoderLayer, GNNTransformerDecoder, get_decode_edge_mask
class GNNCopyTransformer2(Model):
"""
Transformer-based Seq2Seq Model
"""
def __init__(self, vocab: Vocabulary,
source_namespace: str,
target_namespace: str,
segment_namespace: str,
max_decoding_step: int,
token_based_metric: Metric,
source_embedding_dim: int = 256,
target_embedding_dim: int = 256,
encoder_d_model: int = 512,
decoder_d_model: int = 512,
encoder_nhead: int = 8,
decoder_nhead: int = 8,
num_encoder_layers: int = 6,
num_decoder_layers: int = 6,
encoder_dim_feedforward: int = 2048,
decoder_dim_feedforward: int = 2048,
dropout: float = 0.1,
beam_size: int = 1,
token_symbol: str = '@token@',
non_func_symbol: str = "@nonfunc@",
nlabels: int = 0,
max_decode_clip_range: int = 0,
encode_edge_label_with_matrix: bool = True,
is_test: bool = False,
):
super().__init__(vocab)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._segment_namespace = segment_namespace
self._src_start_index = self.vocab.get_token_index(START_SYMBOL, self._source_namespace)
self._src_end_index = self.vocab.get_token_index(END_SYMBOL, self._source_namespace)
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._oov_index = self.vocab.get_token_index(self.vocab._oov_token,
self._target_namespace) # pylint: disable=protected-access
self._pad_index = self.vocab.get_token_index(self.vocab._padding_token,
self._target_namespace)
self._token_index = self.vocab.get_token_index(token_symbol, self._segment_namespace)
self._non_func_symbol_index = self.vocab.get_token_index(non_func_symbol, self._segment_namespace)
self._segment_pad_index = self.vocab.get_token_index(self.vocab._padding_token, self._segment_namespace)
# Source Embedding
num_source_words = self.vocab.get_vocab_size(self._source_namespace)
self._use_glove = False
self._source_embedding = Embedding(num_source_words, source_embedding_dim)
# Segments
num_segment_types = self.vocab.get_vocab_size(self._segment_namespace)
segment_embedding = Embedding(num_segment_types, source_embedding_dim)
self._segment_embedder = BasicTextFieldEmbedder({'tokens': segment_embedding})
num_classes = self.vocab.get_vocab_size(self._target_namespace)
self._num_classes = num_classes
self._target_embedder = Embedding(num_classes, target_embedding_dim)
# Encoder
self._nlabels = nlabels # number of edge labels
if self._nlabels == 0:
self._use_gnn_encoder = False
encoder_layer = nn.TransformerEncoderLayer(encoder_d_model, encoder_nhead, encoder_dim_feedforward, dropout)
encoder_norm = nn.LayerNorm(encoder_d_model)
self._encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
else:
self._use_gnn_encoder = True
print("Use GNN Encoder")
encoder_layer = GNNTransformerEncoderLayer(d_model=encoder_d_model, nhead=encoder_nhead,
dim_feedforward=encoder_dim_feedforward,
dropout=dropout, nlabels=self._nlabels,
is_matrix=encode_edge_label_with_matrix)
encoder_norm = nn.LayerNorm(encoder_d_model)
self._encoder = GNNTransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
# Decoder
self._max_decode_clip_range = max_decode_clip_range
if max_decode_clip_range == 0:
self._decode_nlabels = 0
self._decode_use_relative_position = False
decoder_layer = nn.TransformerDecoderLayer(decoder_d_model, decoder_nhead, decoder_dim_feedforward, dropout)
decoder_norm = nn.LayerNorm(decoder_d_model)
self._decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
else:
print("Use GNN Decoder")
self._decode_nlabels = self._max_decode_clip_range + 1
self._decode_use_relative_position = True
decoder_layer = GNNTransformerDecoderLayer(d_model=decoder_d_model, nhead=decoder_nhead,
dim_feedforward=decoder_dim_feedforward,
dropout=dropout, nlabels=self._decode_nlabels,
is_matrix=encode_edge_label_with_matrix)
decoder_norm = nn.LayerNorm(decoder_d_model)
self._decoder = GNNTransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
# Decode Gate
self.gate_linear = nn.Linear(decoder_d_model, 1)
self.copy_word_prj = nn.Linear(decoder_d_model, encoder_d_model, bias=False)
self._source_embedding_dim = source_embedding_dim
self._target_embedding_dim = target_embedding_dim
self._encoder_d_model = encoder_d_model
self._decoder_d_model = decoder_d_model
self._encoder_nhead = encoder_nhead
self._decoder_nhead = decoder_nhead
self._max_decoding_step = max_decoding_step
self._token_based_metric = token_based_metric
self._beam_size = beam_size
self._is_test = is_test
self._reset_parameters()
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
@overrides
def forward(self,
source_tokens: Dict[str, torch.LongTensor],
segments: Dict[str, torch.LongTensor],
source_entity_length: torch.LongTensor,
edge_mask: torch.Tensor,
copy_targets: torch.Tensor = None,
generate_targets: torch.Tensor = None,
target_tokens: Dict[str, torch.LongTensor] = None,
meta_field: Dict = None,
) -> Dict[str, torch.Tensor]:
assert self._nlabels == edge_mask.size(1)
state = self._encode(source_tokens, segments, source_entity_length, edge_mask)
if self.training:
state = self._train_decode(state, target_tokens, generate_targets)
# shape: (batch_size, decode_length, d_model)
generate_mask = state["generate_mask"]
decoder_outputs = state["decoder_outputs"]
decode_length = decoder_outputs.size(1)
# Generate scores
# shape: (batch_size, decode_length, num_classes)
generate_scores = self.get_generate_scores(decoder_outputs)
# shape: (batch_size, encode_length)
entity_mask = 1 - ((segments['tokens'] == self._token_index) |
(segments['tokens'] == self._non_func_symbol_index) |
(segments['tokens'] == self._segment_pad_index)).float()
entity_mask = entity_mask.unsqueeze(1).repeat(1, decode_length, 1)
# shape: (batch_size, decode_length, encode_length)
copy_scores = self.get_copy_scores(state, decoder_outputs)
# shape: (batch_size, decode_length, 1)
generate_gate = F.sigmoid(self.gate_linear(decoder_outputs))
copy_gate = 1 - generate_gate
scores = torch.cat((generate_scores * generate_gate, copy_scores * copy_gate), dim=-1)
# scores = torch.cat((generate_scores, copy_scores), dim=-1)
# shape: (batch_size, decode_length, num_classes + encode_length)
score_mask = torch.cat((entity_mask.new_ones((copy_scores.size(0), decode_length, self._num_classes)),
entity_mask), dim=-1)
class_probabilities = util.masked_softmax(scores, mask=score_mask, dim=-1)
_, predicted_classes = torch.max(class_probabilities, dim=-1, keepdim=False)
targets = target_tokens["tokens"]
target_mask = state["target_mask"]
# shape: (batch_size, max_target_sequence_length)
loss = self._get_loss(class_probabilities, targets, generate_mask, copy_targets, target_mask)
output_dict = {"predictions": predicted_classes, "loss": loss}
predictions = output_dict["predictions"]
pmask = (predictions < self._num_classes).long()
_predictions = predictions * pmask + (predictions - self._num_classes) * (1 - pmask)
target_labels = self._get_target_labels(target_tokens["tokens"], generate_targets)
target_mask = util.get_text_field_mask(target_tokens)
self._token_based_metric(_predictions, gold_labels=target_labels[:, 1:], mask=target_mask[:, 1:])
else:
output_dict = self._eval_decode(state, segments)
if target_tokens:
predictions = output_dict["predictions"]
pmask = (predictions < self._num_classes).long()
_predictions = predictions * pmask + (predictions - self._num_classes) * (1 - pmask)
target_labels = self._get_target_labels(target_tokens["tokens"], generate_targets)
target_mask = util.get_text_field_mask(target_tokens)
self._token_based_metric(_predictions[:, 1:], gold_labels=target_labels[:, 1:], mask=target_mask[:, 1:])
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Finalize predictions.
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives
within the ``forward`` method.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
# Beam search gives us the top k results for each source sentence in the batch
# but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
predicted_tokens = list()
for x in indices:
if x in [self._end_index, self._start_index, self._pad_index]:
continue
if x >= self._num_classes:
index = x - self._num_classes
predicted_tokens.append(Token("@entity_%d" % index))
else:
w = self.vocab.get_token_from_index(x, namespace=self._target_namespace)
predicted_tokens.append(w)
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def _embed_source(self, source_tokens: Dict[str, torch.Tensor], source_entity_length: torch.LongTensor):
"""
:param source_tokens
:param source_entity_length: (batch_size, max_token_num)
:return
(batch_size, max_token_num, embedding_dim)
"""
token_ids = source_tokens['tokens']
embedded = self._source_embedding(token_ids)
batched_embedded = list()
embedding_dim = embedded.size(-1)
batch_size, max_token_num = source_entity_length.size()
for _embedded, _length in zip(embedded, source_entity_length.long()):
merged_embedded_input = list()
idx = 0
for length in _length:
if length > 0:
embedding = torch.mean(_embedded[idx:idx + length, :], dim=0)
merged_embedded_input.append(embedding)
idx += length
else:
break
merged_embedded_input = torch.stack(merged_embedded_input, dim=0)
pad_num = max_token_num - merged_embedded_input.size(0)
if pad_num > 0:
merged_embedded_input = torch.cat((merged_embedded_input,
merged_embedded_input.new_zeros([pad_num, embedding_dim])), dim=0)
batched_embedded.append(merged_embedded_input)
# shape: (batch_size, max_token_num, embedding_dim)
batched_embedded = torch.stack(batched_embedded, dim=0)
assert batched_embedded.size(0) == embedded.size(0) and batched_embedded.size(1) == source_entity_length.size(1)
# TODO: Dropout
return batched_embedded
def _encode(self, source_tokens: Dict[str, torch.Tensor], segments: Dict[str, torch.Tensor],
source_entity_length: torch.Tensor, edge_mask: torch.Tensor, ) -> Dict[str, torch.Tensor]:
"""
:param source_tokens:
:param segments:
:param merge_indicators:
:return:
"""
# shape: (batch_size, encode_length, embedding_dim)
source_embedded_input = self._embed_source(source_tokens, source_entity_length)
# shape: (batch_size, encode_length, embedding_dim)
segments_embedded_input = self._segment_embedder(segments)
encode_length = segments_embedded_input.size(1)
assert source_embedded_input.size(1) == segments_embedded_input.size(1)
# token_mask = (segments['tokens'] == self._token_index).unsqueeze(-1).float()
# valid_token_embedded_input = batched_embedded_input * token_mask
# valid_token_embedded_input = util.add_positional_features(valid_token_embedded_input)
# valid_token_embedded_input = batched_embedded_input * (1 - token_mask) + valid_token_embedded_input * token_mask
if self._source_embedding_dim == self._encoder_d_model:
batched_embedded_input = segments_embedded_input + source_embedded_input
final_embedded_input = util.add_positional_features(batched_embedded_input)
else:
batched_embedded_input = torch.cat([source_embedded_input, segments_embedded_input], dim=-1)
final_embedded_input = util.add_positional_features(batched_embedded_input)
# shape: (encode_length, batch_size, d_model)
final_embedded_input = final_embedded_input.permute(1, 0, 2)
# shape: (batch_size, encode_length)
source_mask = util.get_text_field_mask(segments)
source_key_padding_mask = (1 - source_mask.byte()).bool()
if not self._use_gnn_encoder:
# shape: (encode_length, batch_size, d_model)
encoder_outputs = self._encoder(final_embedded_input, src_key_padding_mask=source_key_padding_mask)
else:
# GNN encoders
encoder_outputs = self._encoder(src=final_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
padding_mask=source_key_padding_mask)
source_token_mask = (segments['tokens'] == self._token_index).float()
return {
"source_mask": source_mask,
"source_key_padding_mask": source_key_padding_mask,
"source_token_mask": source_token_mask,
"encoder_outputs": encoder_outputs,
"source_embedded": batched_embedded_input,
"source_raw_embedded": source_embedded_input,
}
def _train_decode(self, state: Dict[str, torch.Tensor],
target_tokens: [str, torch.Tensor],
generate_targets: torch.Tensor) -> Dict[str, torch.Tensor]:
encoder_outputs = state["encoder_outputs"]
source_key_padding_mask = state["source_key_padding_mask"]
# shape: (batch_size, encode_length, d_model)
source_embedded = state["source_raw_embedded"]
batch_size, _, _ = source_embedded.size()
basic_index = torch.arange(batch_size).to(source_embedded.device).long()
generate_targets = generate_targets.long()
retrieved_target_embedded_input = source_embedded[basic_index.unsqueeze(1), generate_targets][:, :-1, :]
target_embedded_input = self._target_embedder(target_tokens['tokens'])[:, :-1, :]
# shape: (batch_size, max_decode_length)
# where 1 indicates that the target token is generated rather than copied
generate_mask = (generate_targets == 0).float()
target_embedded_input = target_embedded_input * generate_mask[:, :-1].unsqueeze(-1) \
+ retrieved_target_embedded_input * (1 - generate_mask)[:, :-1].unsqueeze(-1)
target_embedded_input = util.add_positional_features(target_embedded_input)
# shape: (max_target_sequence_length - 1, batch_size, d_model)
target_embedded_input = target_embedded_input.permute(1, 0, 2)
# shape: (batch_size, max_target_sequence_length - 1)
"""
key_padding_mask should be a ByteTensor where True values are positions
that should be masked with float('-inf') and False values will be unchanged.
"""
target_mask = util.get_text_field_mask(target_tokens)[:, 1:]
target_key_padding_mask = (1 - target_mask.byte()).bool()
assert target_key_padding_mask.size(1) == target_embedded_input.size(0) and \
target_embedded_input.size(1) == target_key_padding_mask.size(0)
max_target_seq_length = target_key_padding_mask.size(1)
target_additive_mask = (torch.triu(
target_mask.new_ones(max_target_seq_length, max_target_seq_length)) == 1).transpose(0, 1)
target_additive_mask = target_additive_mask.float().masked_fill(target_additive_mask == 0, float('-inf'))
target_additive_mask = target_additive_mask.masked_fill(target_additive_mask == 1, float(0.0))
assert target_embedded_input.size(1) == encoder_outputs.size(1)
source_token_mask = state["source_token_mask"]
memory_key_padding_mask = (1 - source_token_mask).bool()
# memory_key_padding_mask = source_key_padding_mask
if not self._decode_use_relative_position:
# shape: (max_target_sequence_length, batch_size, d_model)
decoder_outputs = self._decoder(target_embedded_input, memory=encoder_outputs,
tgt_mask=target_additive_mask, tgt_key_padding_mask=None,
memory_key_padding_mask=memory_key_padding_mask)
else:
# gnn decoder
edge_mask = get_decode_edge_mask(target_embedded_input,
max_decode_clip_range=self._max_decode_clip_range)
batch_size = edge_mask.size(0)
tgt_padding_mask = torch.tril(edge_mask.new_ones([max_target_seq_length, max_target_seq_length]),
diagonal=0)
tgt_padding_mask = (1 - (tgt_padding_mask.unsqueeze(0).repeat(batch_size, 1, 1))).float()
decoder_outputs = self._decoder(target_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
memory=encoder_outputs, tgt_padding_mask=tgt_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
# shape: (batch_size, max_target_sequence_length, d_model)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
state.update({
"decoder_outputs": decoder_outputs,
"target_key_padding_mask": target_key_padding_mask,
"target_mask": target_mask,
"generate_mask": generate_mask
})
return state
def _eval_decode(self, state: Dict[str, torch.Tensor],
segments: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
encoder_outputs = state["encoder_outputs"]
source_key_padding_mask = state["source_key_padding_mask"]
source_embedded = state["source_raw_embedded"]
source_token_mask = state["source_token_mask"]
memory_key_padding_mask = (1 - source_token_mask).bool()
# memory_key_padding_mask = source_key_padding_mask
batch_size = source_key_padding_mask.size(0)
encode_length = source_key_padding_mask.size(1)
log_probs_after_end = encoder_outputs.new_full((batch_size, self._num_classes + encode_length),
fill_value=float("-inf"))
log_probs_after_end[:, self._end_index] = 0.
start_predictions = state["source_mask"].new_full((batch_size, 1), fill_value=self._start_index)
partial_generate_predictions = start_predictions
partial_copy_predictions = state["source_mask"].new_zeros((batch_size, 1))
basic_index = torch.arange(batch_size).to(source_embedded.device).unsqueeze(1).long()
generate_mask = state["source_mask"].new_ones((batch_size, 1)).float()
# shape: (batch_size)
last_prediction = start_predictions.squeeze(1)
for _ in range(self._max_decoding_step):
# shape: (batch_size, partial_len, d_model)
partial_source_embedded_input = source_embedded[basic_index, partial_copy_predictions]
partial_target_embedded_input = self._target_embedder(partial_generate_predictions)
partial_embedded_input = partial_target_embedded_input * generate_mask.unsqueeze(-1) \
+ partial_source_embedded_input * (1 - generate_mask).unsqueeze(-1)
partial_embedded_input = util.add_positional_features(partial_embedded_input)
partial_len = partial_embedded_input.size(1)
partial_embedded_input = partial_embedded_input.permute(1, 0, 2)
mask = (torch.triu(state["source_mask"].new_ones(partial_len, partial_len)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
if not self._decode_use_relative_position:
# shape: (partial_len, batch_size, d_model)
outputs = self._decoder(partial_embedded_input, memory=encoder_outputs,
tgt_mask=mask, memory_key_padding_mask=memory_key_padding_mask)
else:
# gnn decoder
edge_mask = get_decode_edge_mask(partial_embedded_input,
max_decode_clip_range=self._max_decode_clip_range)
tgt_padding_mask = torch.tril(edge_mask.new_ones([partial_len, partial_len]), diagonal=0)
tgt_padding_mask = (1 - tgt_padding_mask.unsqueeze(0).repeat(batch_size, 1, 1)).float()
# shape: (partial_len, batch_size, d_model)
outputs = self._decoder(partial_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
memory=encoder_outputs, tgt_padding_mask=tgt_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
outputs = outputs.permute(1, 0, 2)
# shape: (batch_size, d_model)
curr_outputs = outputs[:, -1, :]
# shape: (batch_size, num_classes)
generate_scores = self.get_generate_scores(curr_outputs)
# shape: (batch_size, encode_length)
copy_scores = self.get_copy_scores(state, curr_outputs.unsqueeze(1)).squeeze(1)
# Gate
# shape: (batch_size, 1)
generate_gate = F.sigmoid(self.gate_linear(curr_outputs))
copy_gate = 1 - generate_gate
scores = torch.cat((generate_scores * generate_gate, copy_scores * copy_gate), dim=-1)
# scores = torch.cat((generate_scores, copy_scores), dim=-1)
# shape: (batch_size, encode_length)
entity_mask = 1 - ((segments['tokens'] == self._token_index) |
(segments['tokens'] == self._non_func_symbol_index) |
(segments['tokens'] == self._segment_pad_index)).float()
# shape: (batch_size, num_classes + encode_length)
score_mask = torch.cat((entity_mask.new_ones((batch_size, self._num_classes)), entity_mask), dim=-1)
# shape: (batch_size, num_classes + encode_length)
normalized_scores = util.masked_softmax(scores, mask=score_mask, dim=-1)
last_prediction_expanded = last_prediction.unsqueeze(-1).expand(
batch_size, self._num_classes + encode_length
)
# shape: (batch_size, num_classes + encode_length)
cleaned_logits = torch.where(
last_prediction_expanded == self._end_index,
log_probs_after_end,
normalized_scores
)
# shape: (batch_size)
_, predicted = torch.max(input=cleaned_logits, dim=1, keepdim=False)
copy_mask = (predicted >= self._num_classes).long()
generate_predicted = predicted * (1 - copy_mask)
copy_predicted = (predicted - self._num_classes) * copy_mask
partial_copy_predictions = torch.cat((partial_copy_predictions, copy_predicted.unsqueeze(1)), dim=1)
partial_generate_predictions = torch.cat((partial_generate_predictions, generate_predicted.unsqueeze(1)),
dim=1)
generate_mask = torch.cat((generate_mask, (1 - copy_mask).unsqueeze(1).float()), dim=1)
last_prediction = predicted
if (last_prediction == self._end_index).sum() == batch_size:
break
predictions = partial_generate_predictions * generate_mask.long() + \
(1 - generate_mask).long() * (partial_copy_predictions + self._num_classes)
# shape: (batch_size, partial_len)
output_dict = {
"predictions": predictions
}
return output_dict
def get_copy_scores(self, state: Dict[str, torch.Tensor],
query: torch.Tensor) -> torch.Tensor:
"""
:param state:
:param query: (batch_size, length, d_model)
:return:
"""
# shape: (batch_size, encode_length, d_model)
encoder_outputs = state["encoder_outputs"].permute(1, 0, 2)
return self.copy_word_prj(query).bmm(encoder_outputs.permute(0, 2, 1))
def get_generate_scores(self, query: torch.Tensor) -> torch.Tensor:
"""
:param query: (batch_size, length, d_model)
:return:
"""
return F.linear(query, self._target_embedder.weight)
def _get_loss(self, scores: torch.Tensor,
targets: torch.LongTensor,
generate_mask: torch.LongTensor,
copy_mask: torch.LongTensor,
target_mask: torch.LongTensor) -> torch.Tensor:
"""
:param scores: (batch_size, decode_length, num_class + encode_length)
:param targets: (batch_size, decode_length + 1)
:param generate_mask: (batch_size, decode_length + 1), where 1.0 indicates the target word is selected from target
vocabulary, 0.0 indicates the target is copied from entity candidates
:param copy_mask: (batch_size, decode_length + 1, encode_length), where 1.0 indicates that the target word
is copied from this source word
:param target_mask: (batch_size, decode_length)
:return:
"""
batch_size, decode_length, _ = scores.size()
# (batch_size, decode_length, num_class)
generate_scores = scores[:, :, :self._num_classes]
# (batch_size, decode_length, encode_length)
copy_scores = scores[:, :, self._num_classes:]
# shape: (batch_size * decode_length, 1)
relevant_targets = targets[:, 1:].contiguous().view(-1, 1)
target_generate_scores = torch.gather(
generate_scores.view(-1, self._num_classes), dim=1, index=relevant_targets)
target_scores = target_generate_scores.view(batch_size, decode_length)
target_scores = target_scores * generate_mask[:, 1:]
target_scores += (copy_scores * copy_mask[:, 1:, :].float()).sum(dim=-1)
# shape: (batch_size, decode_length)
relevant_mask = target_mask.contiguous().float()
loss = - target_scores.log() * relevant_mask
loss = loss.sum(dim=-1) / relevant_mask.sum(dim=-1)
loss = loss.sum() / batch_size
return loss
def _get_target_labels(self, target_token_ids: torch.Tensor, generate_targets: torch.Tensor):
"""
:param target_token_ids: [batch_size, decode_length]
:param generate_targets: [batch_size, decode_length]
:return:
[batch_size, decode_length]
"""
generate_mask = (generate_targets == 0.0).long()
labels = target_token_ids * generate_mask + generate_targets.long() * (1 - generate_mask)
return labels
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return self._token_based_metric.get_metric(reset)
def _get_predicted_tokens(self,
source_tokens: Dict[str, torch.LongTensor],
predicted_indices: Union[torch.Tensor, numpy.ndarray],
meta_field: List[Dict]):
"""
Convert predicted indices into tokens.
If `n_best = 1`, the result type will be `List[List[str]]`. Otherwise the result
type will be `List[List[List[str]]]`.
"""
# shape: (batch_size, encode_length)
source_token_ids = source_tokens['tokens']
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
predicted_tokens: List[Union[List[List[str]], List[str]]] = []
predicted_abstract_tokens: List[Union[List[List[str]], List[str]]] = []
for bidx, top_k_predictions in enumerate(predicted_indices):
batch_predicted_tokens: List[List[str]] = []
batch_predicted_abstract_tokens: List[List[str]] = []
pseudo_tokens = meta_field[bidx]['pseudo_tokens']
for indices in top_k_predictions:
indices = list(indices)
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
tokens = list()
abstract_tokens = list()
for x in indices:
if x in [self._end_index, self._start_index, self._pad_index]:
continue
if x >= self._num_classes:
index = x - self._num_classes
# source_word = "@entity_%d" % index
source_word = pseudo_tokens[index]
tokens.append(source_word)
abstract_tokens.append("@entity_%d" % index)
else:
w = self.vocab.get_token_from_index(x, namespace=self._target_namespace)
tokens.append(w)
abstract_tokens.append(w)
batch_predicted_tokens.append(tokens)
batch_predicted_abstract_tokens.append(abstract_tokens)
predicted_tokens.append(batch_predicted_tokens)
predicted_abstract_tokens.append(batch_predicted_abstract_tokens)
return predicted_tokens, predicted_abstract_tokens
def _get_target_tokens(self, target_token_ids: Union[torch.Tensor, numpy.ndarray]) -> List[List[str]]:
if not isinstance(target_token_ids, numpy.ndarray):
_target_token_ids = target_token_ids.detach().cpu().numpy()
else:
_target_token_ids = target_token_ids
tokens = list()
for ids in _target_token_ids:
_tokens = [self.vocab.get_token_from_index(x, namespace=self._target_namespace) for x in ids
if x not in [self._end_index, self._start_index, self._pad_index]]
tokens.append(_tokens)
return tokens
| 34,001 | 50.130827 | 122 |
py
|
Unimer
|
Unimer-master/neural_models/__init__.py
|
# coding=utf-8
| 16 | 4.666667 | 14 |
py
|
Unimer
|
Unimer-master/neural_models/grammar_based_models.py
|
# coding=utf8
import numpy
import torch
import torch.nn as nn
from typing import Dict, List
from overrides import overrides
from allennlp.training.metrics import Metric
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from .modules.grammar_copy_decoder import LSTMGrammarCopyDecoder
from .modules.grammar_copy_decoder_2 import LSTMGrammarCopyDecoder as LSTMGrammarCopyDecoder2
class GrammarModel(Model):
def __init__(self, vocab: Vocabulary, source_embedder: BasicTextFieldEmbedder, encoder, decoder, metric, flags, regularizer=None):
super().__init__(vocab, regularizer=regularizer)
self._source_embedder = source_embedder
self._encoder = encoder
self._encoder_input_dropout = nn.Dropout(p=flags.encoder_input_dropout)
self._encoder_output_dropout = nn.Dropout(
p=flags.encoder_output_dropout)
self._decoder = decoder
self._metric = metric
@overrides
def forward(self,
source_tokens: Dict[str, torch.LongTensor],
source_token_copy_indices: torch.Tensor = None,
target_rules: torch.LongTensor = None,
target_nonterminals: torch.LongTensor = None,
target_mask: torch.LongTensor=None,
target_allow_copy_mask: torch.Tensor = None,
meta_field: List[Dict] = None,):
state = self.encode(source_tokens)
if isinstance(self._decoder, LSTMGrammarCopyDecoder) or isinstance(self._decoder, LSTMGrammarCopyDecoder2):
output_dict = self._decoder(
encodings=state['encoder_outputs'],
source_mask=state['source_mask'],
source_token_copy_indices=source_token_copy_indices,
target_rules=target_rules,
target_nonterminals=target_nonterminals,
target_mask=target_mask,
target_allow_copy_mask=target_allow_copy_mask,
meta_field=meta_field
)
else:
output_dict = self._decoder(
encodings=state['encoder_outputs'],
source_mask=state['source_mask'],
target_rules=target_rules,
target_nonterminals=target_nonterminals,
target_mask=target_mask,
meta_field=meta_field
)
if self.training:
self._metric(output_dict['predicted_rules'].float(
), gold_labels=target_rules[:, 1:].float(), mask=target_mask[:, 1:].float())
else:
self._metric(output_dict['predicted_rules'].float(
), gold_labels=target_rules.float(), mask=target_mask.float())
return output_dict
def encode(self, source_tokens: Dict[str, torch.LongTensor]):
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
embedded_input = self._encoder_input_dropout(embedded_input)
encoder_outputs = self._encoder(embedded_input, source_mask)
encoder_outputs = self._encoder_output_dropout(encoder_outputs)
return {
"source_mask": source_mask,
"encoder_outputs": encoder_outputs,
}
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
accuracy = self._metric.get_metric(reset)['accuracy']
return {"accuracy": accuracy}
| 3,744 | 42.546512 | 134 |
py
|
Unimer
|
Unimer-master/neural_models/recombination_seq2seq.py
|
# coding=utf8
import numpy
import torch
from typing import Dict, Tuple, Union, List, Any
from allennlp.models import SimpleSeq2Seq
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, Attention, SimilarityFunction
from allennlp.nn import util, InitializerApplicator
from allennlp.training.metrics import Metric
from overrides import overrides
from torch.nn import Linear, LSTMCell
class RecombinationSeq2Seq(SimpleSeq2Seq):
"""
Neural Architecture taken from "Data Recombination for Neural Semantic Parsing"
"""
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
max_decoding_steps: int,
seq_metrics: Metric,
input_attention: Attention = None,
input_attention_function: SimilarityFunction = None,
beam_size: int = None,
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.,
use_bleu: bool = True,
encoder_input_dropout: int = 0.0,
encoder_output_dropout: int = 0.0,
dropout=0.0,
output_attention: Attention = None,
feed_output_attention_to_decoder: bool = False,
keep_decoder_output_dim_same_as_encoder: bool = True,
initializer: InitializerApplicator = InitializerApplicator()) -> None:
super().__init__(vocab, source_embedder, encoder, max_decoding_steps, input_attention,
input_attention_function, beam_size, target_namespace, target_embedding_dim,
scheduled_sampling_ratio, use_bleu)
self._seq_metric = seq_metrics
self._pad_index = self.vocab.get_token_index(self.vocab._padding_token,
self._target_namespace) # pylint: disable=protected-access
self._output_attention = output_attention
self._encoder_input_dropout = torch.nn.Dropout(p=encoder_input_dropout)
self._encoder_output_dropout = torch.nn.Dropout(p=encoder_output_dropout)
self._output_dropout = torch.nn.Dropout(p=dropout)
self._embedded_dropout = torch.nn.Dropout(p=dropout)
self._feed_output_attention_to_decoder = feed_output_attention_to_decoder
self._keep_decoder_output_dim_same_as_encoder = keep_decoder_output_dim_same_as_encoder
if not self._keep_decoder_output_dim_same_as_encoder:
self._decoder_output_dim = int(self._encoder_output_dim / 2) if encoder.is_bidirectional() \
else self._encoder_output_dim
self._transform_decoder_init_state = torch.nn.Sequential(
torch.nn.Tanh(),
torch.nn.Linear(self._encoder_output_dim, self._decoder_output_dim)
)
if self._feed_output_attention_to_decoder:
self._decoder_input_dim = target_embedding_dim + self._encoder_output_dim
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
else:
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
num_classes = self.vocab.get_vocab_size(self._target_namespace)
if self._output_attention:
# self._fuse_decoder_hidden_attention_layout = torch.nn.Sequential(torch.nn.Tanh(), Linear(
# self._decoder_output_dim * 2, self._decoder_output_dim
# ))
self._output_projection_layer = Linear(self._decoder_output_dim + self._encoder_output_dim, num_classes)
else:
self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)
initializer(self)
def _prepare_output_attended_input(self,
decoder_hidden_state: torch.LongTensor = None,
encoder_outputs: torch.LongTensor = None,
encoder_outputs_mask: torch.LongTensor = None) -> torch.Tensor:
"""Apply ouput attention over encoder outputs and decoder state."""
# Ensure mask is also a FloatTensor. Or else the multiplication within
# attention will complain.
# shape: (batch_size, max_input_sequence_length)
encoder_outputs_mask = encoder_outputs_mask.float()
# shape: (batch_size, max_input_sequence_length)
input_weights = self._output_attention(
decoder_hidden_state, encoder_outputs, encoder_outputs_mask)
# shape: (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, input_weights)
return attended_input
@overrides
def _prepare_output_projections(self,
last_predictions: torch.Tensor,
state: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
# pylint: disable=line-too-long
"""
Decode current state and last prediction to produce produce projections
into the target space, which can then be used to get probabilities of
each target token for the next step.
Add dropout before the softmax classifier (Following "Language to Logical Form with Neural Attention")
Inputs are the same as for `take_step()`.
"""
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (group_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (group_size, decoder_output_dim)
decoder_hidden = state["decoder_hidden"]
# shape: (group_size, decoder_output_dim)
decoder_context = state["decoder_context"]
# shape: (group_size, target_embedding_dim)
embedded_input = self._target_embedder(last_predictions)
embedded_input = self._embedded_dropout(embedded_input)
if self._attention:
# shape: (group_size, encoder_output_dim)
attended_input = self._prepare_attended_input(decoder_hidden, encoder_outputs, source_mask)
# shape: (group_size, decoder_output_dim + target_embedding_dim)
decoder_input = torch.cat((attended_input, embedded_input), -1)
else:
# shape: (group_size, target_embedding_dim)
decoder_input = embedded_input
if self._feed_output_attention_to_decoder:
decoder_input = torch.cat((decoder_input, state["attention_context"]), -1)
# shape (decoder_hidden): (batch_size, decoder_output_dim)
# shape (decoder_context): (batch_size, decoder_output_dim)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input,
(decoder_hidden, decoder_context))
state["decoder_hidden"] = decoder_hidden
state["decoder_context"] = decoder_context
if self._output_attention:
# shape: (group_size, encoder_output_dim)
output_attended_input = self._prepare_output_attended_input(decoder_hidden, encoder_outputs, source_mask)
if self._feed_output_attention_to_decoder:
state["attention_context"] = output_attended_input
# output_projection_input = self._fuse_decoder_hidden_attention_layout(torch.cat((decoder_hidden,
# output_attended_input), -1))
output_projection_input = torch.cat((decoder_hidden, output_attended_input), -1)
else:
output_projection_input = decoder_hidden
# dropped_output_projection_input = self._input_dropout(output_projection_input)
dropped_output_projection_input = self._output_dropout(output_projection_input)
# shape: (group_size, num_classes)
output_projections = self._output_projection_layer(dropped_output_projection_input)
return output_projections, state
def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
embedded_input = self._encoder_input_dropout(embedded_input)
encoder_outputs = self._encoder(embedded_input, source_mask)
encoder_outputs = self._encoder_output_dropout(encoder_outputs)
return {
"source_mask": source_mask,
"encoder_outputs": encoder_outputs,
}
@overrides
def _init_decoder_state(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
batch_size = state["source_mask"].size(0)
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
state["encoder_outputs"],
state["source_mask"],
self._encoder.is_bidirectional())
# Initialize the decoder hidden state with the final output of the encoder.
# shape: (batch_size, decoder_output_dim)
state["decoder_hidden"] = self._transform_decoder_init_state(final_encoder_output)
# shape: (batch_size, decoder_output_dim)
state["decoder_context"] = state["encoder_outputs"].new_zeros(batch_size, self._decoder_output_dim)
if self._feed_output_attention_to_decoder:
state["attention_context"] = state["encoder_outputs"].new_zeros(batch_size, self._encoder_output_dim)
return state
@overrides
def forward(self, # type: ignore
source_tokens: Dict[str, torch.LongTensor],
target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:
state = self._encode(source_tokens)
if target_tokens:
state = self._init_decoder_state(state)
# The `_forward_loop` decodes the input sequence and computes the loss during training
# and validation.
output_dict = self._forward_loop(state, target_tokens)
else:
output_dict = {}
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
if self._bleu:
self._bleu(best_predictions, target_tokens["tokens"])
if self._seq_metric:
self._seq_metric(
best_predictions.float(),
gold_labels=target_tokens["tokens"][:, 1:].float(),
mask=util.get_text_field_mask(
target_tokens).float()[:, 1:]
)
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._bleu:
all_metrics.update(self._bleu.get_metric(reset=reset))
if self._seq_metric:
all_metrics.update(
{"accuracy": self._seq_metric.get_metric(reset)['accuracy']})
return all_metrics
| 11,830 | 47.093496 | 122 |
py
|
Unimer
|
Unimer-master/neural_models/modules/grammar_decoder.py
|
# coding=utf-8
import torch
import copy
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from overrides import overrides
from allennlp.modules import Embedding
from typing import Tuple, List, Dict
from .. import utils as nn_utils
class LSTMGrammarDecoder(nn.Module):
def __init__(self,
grammar,
ast_class,
lstm_hidden_dim: int,
num_lstm_layers: int,
rule_pad_index: int,
rule_embedding_dim: int,
nonterminal_pad_index: int,
nonterminal_end_index: int,
nonterminal_embedding_dim: int,
source_encoding_dim: int,
dropout: float,
max_target_length: int,
):
super().__init__()
self._grammar = grammar
self._root_rule = grammar.get_production_rule_by_id(grammar.root_rule_id)
self._ast_class = ast_class
self._lstm_hidden_dim = lstm_hidden_dim
self._num_lstm_layers = num_lstm_layers
# Production Rules + PAD Rule
self._rule_pad_index = rule_pad_index
self._num_rules = grammar.num_rules + 1
self._rule_embedding_dim = rule_embedding_dim
print("Rule Pad Index: ", self._rule_pad_index)
# Non-Terminals + PAD Node
self._nonterminal_end_index = nonterminal_end_index
self._nonterminal_pad_index = nonterminal_pad_index
self._num_nonterminals = grammar.num_non_terminals + 2
self._nonterminal_embedding_dim = nonterminal_embedding_dim
print("Non-Terminal Pad Index: ", self._nonterminal_pad_index)
print("Non-Terminal End Index: ", self._nonterminal_end_index)
self._source_encoding_dim = source_encoding_dim
self._max_target_length = max_target_length
self._transform_encodings_key = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
self._transform_encodings_value = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
# Input: (Attention Context + Previous Rule Embedding + Current Nonterminal Embedding)
decode_lstm_input_dim = lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim
self._decoder_lstm = nn.LSTM(
input_size=decode_lstm_input_dim,
hidden_size=lstm_hidden_dim,
num_layers=num_lstm_layers,
batch_first=False
)
self._attn_dropout = nn.Dropout(p=dropout)
self._decode_dropout = nn.Dropout(p=dropout)
self._rule_embedder = Embedding(self._num_rules, rule_embedding_dim)
self._nonterminal_embedder = Embedding(self._num_nonterminals, nonterminal_embedding_dim)
self._attention_hidden_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim + lstm_hidden_dim, lstm_hidden_dim),
nn.Tanh(),
)
# Rule Predictions
self._rule_prediction_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, rule_embedding_dim),
# nn.Tanh()
)
self._rule_prediction_bias = nn.Parameter(torch.FloatTensor(self._num_rules).zero_())
@overrides
def forward(self,
encodings: torch.Tensor,
source_mask: torch.Tensor,
target_rules: torch.Tensor,
target_nonterminals: torch.Tensor,
target_mask: torch.Tensor,
meta_field: List[Dict] = None,
):
"""
:param encodings: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:param column_mask: (batch_size, length)
:param target_rules: (batch_size, target_length)
:param target_nonterminals: (batch_size, target_length)
:param target_mask: (batch_size, target_length)
"""
if self.training:
output_dict = self.train_decode(encodings, source_mask, target_rules, target_nonterminals, target_mask)
else:
output_dict = self.eval_decode(encodings, source_mask)
return output_dict
def train_decode(self, encodings, source_mask, target_rules, target_nonterminals, target_mask):
source_length = encodings.size(1)
batch_size, target_length = target_rules.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_probs = list()
for ts in range(target_length - 1):
# Input
# (batch_size, 1, rule_embedding_size)
prev_rule_embedded = self._rule_embedder(target_rules[:, ts].unsqueeze(1).long())
prev_embedded = prev_rule_embedded
# (batch_size, 1, nonterminal_embedding_size)
curr_nonterminal_embedded = self._nonterminal_embedder(target_nonterminals[:, ts].unsqueeze(1).long())
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
# (batch_size, ts + 1, length)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
for bidx in range(batch_size):
# Keep Valid Rule
nonterminal_id = int(target_nonterminals[bidx, ts])
if nonterminal_id == self._nonterminal_pad_index or nonterminal_id == self._nonterminal_end_index:
active_rule_ids = [0]
else:
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal_id(nonterminal_id)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
rule_scores[bidx, :].masked_fill_((1 - active_rule_mask).bool(), float('-inf'))
curr_rule_probs = F.softmax(rule_scores, dim=-1)
rule_probs.append(curr_rule_probs)
rule_probs = torch.stack(rule_probs, dim=0).permute(1, 0, 2)
# Loss
loss = self.get_loss(rule_probs=rule_probs, target_rules=target_rules[:, 1:].long(), target_mask=target_mask[:, 1:].float())
# Predicted Labels
_, predicted_rules = rule_probs.max(dim=-1)
output_dict = {"loss": loss, "predicted_rules": predicted_rules}
return output_dict
def eval_decode(self, encodings, source_mask):
batch_size, source_length, _ = encodings.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_pad_index_tensor = torch.Tensor([self._rule_pad_index]).long().to(encodings.device)
nonterminal_pad_index_tensor = torch.Tensor([self._nonterminal_pad_index]).long().to(encodings.device)
ast_results, is_complete = list(), list()
for i in range(batch_size):
ast_results.append(self._ast_class(root_rule=self._root_rule))
is_complete.append(False)
for ts in range(self._max_target_length):
prev_embedded = list()
curr_nonterminal_embedded = list()
for bidx, ast in enumerate(ast_results):
if is_complete[bidx]:
# PAD
prev_embedded.append(self._rule_embedder(rule_pad_index_tensor))
curr_nonterminal_embedded.append(self._nonterminal_embedder(nonterminal_pad_index_tensor))
else:
last_production_rule = ast.get_last_production_rule()
# Rule
rule_index_tensor = torch.Tensor([last_production_rule.rule_id]).long().to(encodings.device)
prev_embedded.append(self._rule_embedder(rule_index_tensor))
# Curr Non-Terminal
curr_non_terminal_id = self._grammar.get_non_terminal_id(ast.get_curr_non_terminal())
nonterminal_index_tensor = torch.Tensor([curr_non_terminal_id]).long().to(encodings.device)
curr_nonterminal_embedded.append(
self._nonterminal_embedder(nonterminal_index_tensor)
)
# (batch_size, 1, rule_embedding_size)
prev_embedded = torch.stack(prev_embedded, dim=0)
# (batch_size, 1, type_embedding_size)
curr_nonterminal_embedded = torch.stack(curr_nonterminal_embedded, dim=0)
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
is_finish = True
for bidx, ast in enumerate(ast_results):
if not is_complete[bidx]:
curr_non_terminal = ast.get_curr_non_terminal()
# Rule
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal(curr_non_terminal)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
brule_scores = rule_scores[bidx, :].masked_fill((1 - active_rule_mask).bool(), float('-inf'))
curr_rule_probs = F.softmax(brule_scores, dim=-1)
rule_id = int(torch.argmax(curr_rule_probs))
production_rule = self._grammar.get_production_rule_by_id(rule_id)
ast.add_rule(production_rule)
if ast.is_complete:
is_complete[bidx] = True
else:
is_finish = False
if is_finish:
break
# Pad For evaluation
predicted_rules = list()
max_length = 0
for ast in ast_results:
rules = ast.get_production_rules()
rule_ids = [rule.rule_id for rule in rules]
predicted_rules.append(np.array(rule_ids, dtype=int))
if len(rules) > max_length:
max_length = len(rules)
# Pad
for i in range(batch_size):
if len(predicted_rules[i]) < max_length:
predicted_rules[i] = np.concatenate(
[predicted_rules[i], np.ones(max_length - len(predicted_rules[i])) * self._rule_pad_index],
axis=0
)
predicted_rules = torch.from_numpy(np.array(predicted_rules, dtype=int)).to(encodings.device)
output_dict = {
"loss": torch.Tensor([0.0]).to(encodings.device),
"predicted_rules": predicted_rules.long()
}
return output_dict
def take_decode_step(self,
source_encoding_key: torch.Tensor,
source_encoding_value: torch.Tensor,
source_mask: torch.Tensor,
decoder_inputs: torch.Tensor,
decoder_hidden_state: Tuple[torch.Tensor, torch.Tensor],
):
"""
:param source_encoding_key: (batch_size, length, hidden_dim)
:param source_encoding_value: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:decoder_inputs: (batch_size, 1, lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim)
:decoder_hidden_state: (h, c)
:return
decoder_outputs: (batch_size, 1, lstm_hidden_dim)
context: (batch_size, 1, hidden_dim)
att: (batch_size, 1, lstm_hidden_dim)
decoder_hidden_state: (h, c)
"""
decoder_outputs, (h, c) = self._decoder_lstm(decoder_inputs.permute(1, 0, 2), decoder_hidden_state)
decoder_hidden_state = (h, c)
# (batch_size, 1, lstm_hidden_dim)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
# Attention
# (batch_size, 1, length)
weights = decoder_outputs.bmm(source_encoding_key.permute(0, 2, 1))
weights = weights.masked_fill((1 - source_mask.unsqueeze(1)).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
# (batch_size, 1, hidden_dim)
context = weights.bmm(source_encoding_value)
att = self._attention_hidden_layer(torch.cat([decoder_outputs, context], dim=-1))
att = self._attn_dropout(att)
return decoder_outputs, context, att, decoder_hidden_state
def get_loss(self,
rule_probs: torch.FloatTensor,
target_rules: torch.LongTensor,
target_mask: torch.FloatTensor,
):
"""
:param rule_probs (batch_size, target_length, num_rules)
:param target_mask (batch_size, target_length)
"""
batch_size, target_length = target_rules.size()
rule_probs = torch.gather(
rule_probs.reshape(-1, self._num_rules),
dim=1,
index=target_rules.reshape(-1).unsqueeze(-1).long()
)
rule_probs = rule_probs.reshape(batch_size, target_length)
rule_log_probs = (rule_probs + 1e-10).log()
rule_log_probs *= target_mask.float()
rule_normalize_factor = target_mask.sum(-1)
rule_normalize_factor[rule_normalize_factor == 0] = 1
rule_loss = rule_log_probs.sum(-1) / rule_normalize_factor.float()
rule_loss = -1 * (rule_loss.sum() / batch_size)
return rule_loss
| 14,904 | 44.166667 | 137 |
py
|
Unimer
|
Unimer-master/neural_models/modules/gnn_multi_head_attention.py
|
# coding=utf8
import math
import torch
import numpy as np
import torch.nn as nn
from allennlp.nn import util
from torch.nn import Parameter
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
class GNNMatrixMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_ks = Parameter(torch.Tensor(nlabels, d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_ks)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask: (batch_size, len_q, len_k)
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
# shape: (nlabels, batch_size, len_q, len_k)
mask = edge_mask.permute(3, 0, 1, 2)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# shape: (nhead * sz_b, len_k, d_q)
edge_values = list()
attention_weights = list()
for i in range(self._nlabels):
w = self._w_ks[i]
ek = F.linear(k, w).view(sz_b, len_k, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_k, d_q)
ek = ek.permute(2, 0, 1, 3).contiguous().view(-1, len_k, self._d_q)
edge_values.append(ek)
aw = query.bmm(ek.permute(0, 2, 1))
attention_weights.append(aw / self._attention_temperature)
# (nlabels, sz_b * nhead, len_q, len_k)
attention_weights = torch.stack(attention_weights, dim=0)
# (nlabels, sz_b * nhead, len_q, len_k)
attention_weights = attention_weights * mask.repeat(1, self._nhead, 1, 1)
attention_weights = attention_weights.sum(dim=0)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
output = attention_weights.new_zeros((self._nhead * sz_b, len_q, self._d_q))
for i in range(self._nlabels):
v, m = edge_values[i], mask[i]
_m = m.repeat(self._nhead, 1, 1)
output += (attention_weights * _m).bmm(v)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
class GNNVectorMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._b_ks = Parameter(torch.Tensor(self._nlabels, d_model))
self._b_vs = Parameter(torch.Tensor(self._nlabels, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
xavier_uniform_(self._b_ks)
xavier_uniform_(self._b_vs)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask: (batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
self._w_k.to(k.device)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# key
edge_vectors = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_ks).reshape(sz_b, len_q, len_k,
self._d_model)
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (sz_b, len_q, len_k, d_model)
key = key.unsqueeze(1).repeat(1, len_q, 1, 1)
key = edge_vectors + key
key = key.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key = key.contiguous().view(-1, len_q, len_k, self._d_q)
mask = (edge_mask.sum(-1) > 0).float().repeat(self._nhead, 1, 1)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = torch.mul(query.unsqueeze(2).repeat(1, 1, len_k, 1), key).sum(-1)
attention_weights = attention_weights / self._attention_temperature
attention_weights = attention_weights * mask
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
# value
# shape: (sz_b, len_k, d_model)
# value = F.linear(k, self._w_v)
# # shape: (sz_b, len_q, len_k, d_model)
# value = value.unsqueeze(1).repeat(1, len_q, 1, 1)
# value = edge_vectors + value
# value = value.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# # shape: (nhead * sz_b, len_q, len_k, d_q)
# value = value.contiguous().view(-1, len_q, len_k, self._d_q)
value = key
output = ((attention_weights * mask).unsqueeze(-1) * value).sum(2)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
class GNNVectorMultiHeadAttention2(nn.Module):
"""
Implementation based on "Self-Attention with Relative Position Representations"
According to Tensor2Tensor
https://github.com/tensorflow/tensor2tensor/blob/ab918e0d9592394614aa2e10cfc8f23e8cb24dfc/tensor2tensor/layers/common_attention.py
"""
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_q = nn.Linear(d_model, d_model)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._b_ks = Parameter(torch.Tensor(self._nlabels, self._d_q))
self._b_vs = Parameter(torch.Tensor(self._nlabels, self._d_q))
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
xavier_uniform_(self._b_ks)
xavier_uniform_(self._b_vs)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask:(batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
self._w_k.to(k.device)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
expanded_query = query.unsqueeze(2).repeat(1, 1, len_k, 1)
# Relation Embeddings
# shape: (sz_b, len_q, len_k, d_q)
key_relation_embeded = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_ks).reshape(sz_b, len_q, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key_relation_embeded = key_relation_embeded.repeat(self._nhead, 1, 1, 1)
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (nhead * sz_b, len_k, d_q)
key = key.view(sz_b, len_k, self._nhead, self._d_q).permute(2, 0, 1, 3).contiguous().view(-1, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, len_k)
qk_weights = query.bmm(key.permute(0, 2, 1))
# shape: (nhead * sz_b, len_q, len_k)
qkr_weights = torch.mul(expanded_query, key_relation_embeded).sum(-1)
attention_weights = qk_weights + qkr_weights
output_attention_weights = attention_weights / self._attention_temperature
# attention_weights = attention_weights.masked_fill(
# padding_mask.repeat(self._nhead, 1, 1).bool(),
# float('-inf'),
# )
# relation mask
# shape: (nhead * sz_b, len_q, len_k)
# Note that we need ensure that there are at least one relations for each position
# eye_mask = torch.eye(len_q).unsqueeze(0).repeat(sz_b, 1, 1).to(edge_mask.device)
# relation_mask = ((edge_mask.sum(-1) + eye_mask + (1 - padding_mask)) == 0).repeat(self._nhead, 1, 1)
relation_mask = ((edge_mask.sum(-1) + (1 - padding_mask)) == 0).repeat(self._nhead, 1, 1)
attention_weights = output_attention_weights.masked_fill(
relation_mask.bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = attention_weights.masked_fill(
relation_mask.bool(),
0.0
)
# Remove nan
# attention_weights[attention_weights != attention_weights] = 0
attention_weights = self._attn_dropout(attention_weights)
# Value Relation Embeddings
# shape: (sz_b, len_q, len_k, d_q)
value_relation_embeded = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_vs).reshape(sz_b, len_q, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
value_relation_embeded = value_relation_embeded.repeat(self._nhead, 1, 1, 1)
# shape: (sz_b, len_k, d_model)
value = F.linear(k, self._w_v)
# shape: (nhead * sz_b, len_k, d_q)
value = value.view(sz_b, len_k, self._nhead, self._d_q).permute(2, 0, 1, 3).contiguous().view(-1, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
qv_output = attention_weights.bmm(value)
# shape: (nhead * sz_b, len_q, d_q)
qvr_output = torch.mul(attention_weights.unsqueeze(-1), value_relation_embeded).sum(2)
output = qv_output + qvr_output
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output, output_attention_weights
class GNNVectorContinuousMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, d_model)
:param padding_mask: (batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
# query
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# key
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (sz_b, len_q, len_k, d_model)
key = key.unsqueeze(1).repeat(1, len_q, 1, 1)
key = edge_mask + key
key = key.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key = key.contiguous().view(-1, len_q, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = torch.mul(query.unsqueeze(2).repeat(1, 1, len_k, 1), key).sum(-1)
attention_weights = attention_weights / self._attention_temperature
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
# value
# shape: (sz_b, len_k, d_model)
value = F.linear(k, self._w_v)
# shape: (sz_b, len_q, len_k, d_model)
value = value.unsqueeze(1).repeat(1, len_q, 1, 1)
value = edge_mask + value
value = value.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
value = value.contiguous().view(-1, len_q, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, d_p)
output = (attention_weights.unsqueeze(-1) * value).sum(2)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
| 17,043 | 40.77451 | 134 |
py
|
Unimer
|
Unimer-master/neural_models/modules/gnn_encoder.py
|
# coding=utf8
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import MultiheadAttention
from .gnn_multi_head_attention import GNNMatrixMultiHeadAttention, GNNVectorMultiHeadAttention, \
GNNVectorContinuousMultiHeadAttention, GNNVectorMultiHeadAttention2
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def get_decode_edge_mask(tgt, max_decode_clip_range):
"""
:param max_decode_clip_range:
:param tgt: (tgt_length, batch_size, d_model)
:return:
(batch_size, max_decode_clip_range, tgt_length, tgt_length)
"""
tgt_length, batch_size, _ = tgt.size()
edge_mask = list()
i = 0
while i < tgt_length and i < max_decode_clip_range + 1:
mask = torch.diag(tgt.new_ones(tgt_length - i))
if mask.size(0) == tgt_length:
edge_mask.append(mask)
else:
mask = F.pad(mask, [0, i, i, 0], mode='constant', value=0)
edge_mask.append(mask)
i += 1
if i < max_decode_clip_range + 1:
edge_mask = torch.stack(edge_mask, dim=0)
# shape: (tgt_length, tgt_length, tgt_length)
edge_mask = torch.cat((edge_mask, tgt.new_zeros([max_decode_clip_range - i + 1,
tgt_length, tgt_length])), dim=0)
else:
# i == max_decode_clip_range
if i < tgt_length:
edge_mask[-1] = torch.tril(tgt.new_ones([tgt_length, tgt_length]),
diagonal=-1 * max_decode_clip_range)
edge_mask = torch.stack(edge_mask, dim=0)
edge_mask = edge_mask.unsqueeze(0).repeat(batch_size, 1, 1, 1)
return edge_mask
class GNNTransformerEncoderLayer(nn.Module):
def __init__(self, d_model: int, nhead: int, dim_feedforward: int, nlabels: int,
dropout=0.1, is_matrix=True, is_discrete: bool = True):
super(GNNTransformerEncoderLayer, self).__init__()
if is_matrix:
self.self_attn = GNNMatrixMultiHeadAttention(d_model, nhead, nlabels, dropout)
else:
print("GNN Vector Multi Head Attention")
if is_discrete:
# self.self_attn = GNNVectorMultiHeadAttention(d_model, nhead, nlabels, dropout)
self.self_attn = GNNVectorMultiHeadAttention2(d_model, nhead, nlabels, dropout)
else:
self.self_attn = GNNVectorContinuousMultiHeadAttention(d_model, nhead, dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, src, edge_mask, padding_mask):
"""
Each sub-layer is followed by a residual connection and layer normalization
:param src: (batch_size, src_length, d_model)
:param edge_mask: (batch_size, nlabels, src_length, src_length)
:param padding_mask: (batch_size, src_length, src_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
"""
src2, attention_weights = self.self_attn.forward(q=src, k=src, edge_mask=edge_mask, padding_mask=padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, attention_weights
class GNNTransformerEncoderWithMemoryLayer(nn.Module):
def __init__(self, d_model: int, nhead: int, dim_feedforward: int, memory_nlabels: int, self_nlabels: int,
dropout: float = 0.1, is_matrix: bool = True, kdim: int = None, vdim: int = None):
super(GNNTransformerEncoderWithMemoryLayer, self).__init__()
if is_matrix:
self.attn = GNNMatrixMultiHeadAttention(d_model, nhead, memory_nlabels + self_nlabels, dropout)
else:
print("GNN Vector Multi Head Attention")
self.attn = GNNVectorMultiHeadAttention2(d_model, nhead, memory_nlabels + self_nlabels, dropout)
self._memory_nlabels = memory_nlabels
self._self_nlabels = self_nlabels
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
def forward(self, memory, memory_edge_mask, memory_padding_mask, src, src_edge_mask, src_padding_mask):
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
:param memory: (batch_size, memory_length, d_model)
:param memory_edge_mask: (batch_size, src_length, memory_length, memory_nlabels)
:param memory_padding_mask: (batch_size, src_length, memory_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:param src: (batch_size, src_length, d_model)
:param src_edge_mask: (batch_size, src_length, src_length, nlabels,)
:param src_padding_mask: (batch_size, src_length, src_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
"""
# shape: (batch_size, memory_length + src_length, d_model)
key = torch.cat([memory, src], dim=1)
batch_size, src_length, memory_length, memory_nlabels = memory_edge_mask.size()
self_nlabels = src_edge_mask.size(-1)
# shape: (batch_size, src_length, memory_length, memory_nlabels + self_nlabels, )
extended_memory_edge_mask = torch.cat([memory_edge_mask, memory_edge_mask.new_zeros((batch_size, src_length, memory_length, self_nlabels,))], dim=-1)
# shape: (batch_size, src_length, src_length, memory_nlabels + self_nlabels)
extended_src_edge_mask = torch.cat([src_edge_mask.new_zeros((batch_size, src_length, src_length, memory_nlabels)), src_edge_mask], dim=-1)
# shape: (batch_size, src_length, memory_length + src_length, memory_nlabels + self_nlabels)
edge_mask = torch.cat([extended_memory_edge_mask, extended_src_edge_mask], dim=2)
# shape: (batch_size, src_length, memory_length + src_length)
padding_mask = torch.cat([memory_padding_mask, src_padding_mask], dim=-1)
src2 = self.attn.forward(q=src, k=key, edge_mask=edge_mask, padding_mask=padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class GNNTransformerDecoderLayer(nn.Module):
def __init__(self, d_model: int, nhead: int, dim_feedforward: int, nlabels: int,
dropout: float = 0.1, is_matrix: bool = True, kdim: int = None, vdim: int = None):
super(GNNTransformerDecoderLayer, self).__init__()
if is_matrix:
self.self_attn = GNNMatrixMultiHeadAttention(d_model, nhead, nlabels, dropout)
else:
print("GNN Vector Multi Head Attention")
self.self_attn = GNNVectorMultiHeadAttention2(d_model, nhead, nlabels, dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, kdim=kdim, vdim=vdim)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
def forward(self, tgt, edge_mask, tgt_padding_mask, memory, memory_mask=None, memory_key_padding_mask=None):
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
:param tgt: (tgt_length, batch_size, d_model)
:param edge_mask: (batch_size, nlabels, tgt_length, decode_length)
:param tgt_padding_mask: (batch_size, tgt_length, tgt_length)
:param memory: (src_length, batch_size, d_model)
:param memory_mask: (src_length, src_length)
:param memory_key_padding_mask: (batch_size, src_length)
"""
# shape: (batch_size, tgt_length, d_model)
permuted_tgt = tgt.permute(1, 0, 2)
tgt2, _ = self.self_attn(q=permuted_tgt, k=permuted_tgt, edge_mask=edge_mask, padding_mask=tgt_padding_mask)
tgt2 = tgt2.permute(1, 0, 2)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
class GNNTransformerEncoder(nn.Module):
def __init__(self, encoder_layer: GNNTransformerEncoderLayer,
num_layers: int, norm=None, output_weights: bool = False):
super(GNNTransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self._output_weights = output_weights
def forward(self, src, edge_mask, padding_mask=None):
"""
:param src: (src_length, batch_size, encoder_d_model)
:param edge_mask: (batch_size, src_length, src_length, nlabels,) | (batch_size, num_layers, src_length, src_length, nlabels)
:param padding_mask: (batch_size, src_length)
where True values are positions that should be masked with float('-inf') and False values will be unchanged.
:return:
(src_length, batch_size, d_model)
"""
# shape: (batch_size, src_length, d_model)
length, batch_size, _ = src.size()
if padding_mask is None:
padding_mask = edge_mask.new_ones((batch_size, length, length)).float()
else:
padding_mask = padding_mask.unsqueeze(1).expand(batch_size, length, length).float()
# shape: (batch_size, src_length, d_model)
output = src.permute(1, 0, 2)
layer_weights = list()
for i in range(self.num_layers):
if len(edge_mask.size()) == 4:
# (nhead * batch_size, src_length, src_length)
output, attention_weights = self.layers[i](output, edge_mask=edge_mask, padding_mask=padding_mask)
layer_weights.append(attention_weights)
else:
# (nhead * batch_size, src_length, src_length)
output, attention_weights = self.layers[i](output, edge_mask=edge_mask[:, i, :, :, :], padding_mask=padding_mask)
layer_weights.append(attention_weights)
if self.norm:
output = self.norm(output)
output = output.permute(1, 0, 2)
if self._output_weights:
# (num_layers, nhead * batch_size, src_length, src_length)
layer_weights = torch.stack(layer_weights, dim=0)
# (nhead, batch_size, num_layers, src_length, src_length)
layer_weights = layer_weights.permute(1, 0, 2, 3).contiguous().reshape(-1, batch_size, self.num_layers, length, length)
# (batch_size, num_layers, nhead, src_length, src_length)
layer_weights = layer_weights.permute(1, 2, 0, 3, 4)
return output, layer_weights
return output
class GNNTransformerEncoderWithMemory(nn.Module):
def __init__(self, encoder_layer: GNNTransformerEncoderWithMemoryLayer,
num_layers: int, norm=None):
super(GNNTransformerEncoderWithMemory, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, memory, memory_edge_mask, memory_padding_mask, src, src_edge_mask, src_padding_mask):
"""
:param memory: (memory_length, batch_size, d_model)
:param memory_edge_mask: (batch_size, src_length, memory_length, memory_nlabels)
:param memory_padding_mask: (batch_size, src_length, memory_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:param src: (src_length, batch_size, d_model)
:param src_edge_mask: (batch_size, src_length, src_length, nlabels,)
:param src_padding_mask: (batch_size, src_length, src_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
(src_length, batch_size, d_model)
"""
# shape: (batch_size, src_length, d_model)
output = src.permute(1, 0, 2)
permuted_memory = memory.permute(1, 0, 2)
for i in range(self.num_layers):
output = self.layers[i](permuted_memory, memory_edge_mask, memory_padding_mask, output, src_edge_mask, src_padding_mask)
if self.norm:
output = self.norm(output)
output = output.permute(1, 0, 2)
return output
class GNNTransformerDecoder(nn.Module):
r"""TransformerDecoder is a stack of N decoder layers
Args:
decoder_layer: an instance of the TransformerDecoderLayer() class (required).
num_layers: the number of sub-decoder-layers in the decoder (required).
norm: the layer normalization component (optional).
"""
def __init__(self, decoder_layer, num_layers, norm=None):
super(GNNTransformerDecoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, tgt, edge_mask, memory, tgt_padding_mask=None,
memory_mask=None,
memory_key_padding_mask=None):
r"""Pass the inputs (and mask) through the decoder layer in turn.
:param memory_key_padding_mask: (batch_size, src_length)
:param memory_mask: (src_length, src_length)
:param memory: (src_length, batch_size, d_model)
:param tgt: (tgt_length, batch_size, d_model)
:param edge_mask: (batch_size, nlabels, tgt_length, tgt_length)
:param tgt_padding_mask: (batch_size, tgt_length, tgt_length), where True values are positions that should be masked with float('-inf') and False values will be unchanged.
:return:
(src_length, batch_size, d_model)
"""
output = tgt
tgt_length, batch_size, _ = tgt.size()
if tgt_padding_mask is None:
_tgt_padding_mask = tgt.new_ones((batch_size, tgt_length, tgt_length))
else:
_tgt_padding_mask = tgt_padding_mask
for i in range(self.num_layers):
output = self.layers[i](output, memory=memory, tgt_padding_mask=_tgt_padding_mask,
edge_mask=edge_mask, memory_mask=memory_mask,
memory_key_padding_mask=memory_key_padding_mask)
if self.norm:
output = self.norm(output)
return output
class TransformerDecoderLayer(nn.Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, kdim=None, vdim=None):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, kdim=kdim, vdim=vdim)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
| 20,183 | 48.349633 | 179 |
py
|
Unimer
|
Unimer-master/neural_models/modules/grammar_copy_decoder_2.py
|
# coding=utf-8
import torch
import copy
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from overrides import overrides
from allennlp.modules import Embedding
from typing import Tuple, List, Dict
from .. import utils as nn_utils
class LSTMGrammarCopyDecoder(nn.Module):
def __init__(self,
grammar,
ast_class,
lstm_hidden_dim: int,
num_lstm_layers: int,
rule_pad_index: int,
rule_embedding_dim: int,
nonterminal_pad_index: int,
nonterminal_end_index: int,
nonterminal_embedding_dim: int,
source_encoding_dim: int,
dropout: float,
max_target_length: int,
):
super().__init__()
self._grammar = grammar
self._root_rule = grammar.get_production_rule_by_id(grammar.root_rule_id)
self._ast_class = ast_class
self._lstm_hidden_dim = lstm_hidden_dim
self._num_lstm_layers = num_lstm_layers
# Production Rules + PAD Rule
self._rule_pad_index = rule_pad_index
self._num_rules = grammar.num_rules + 1
self._rule_embedding_dim = rule_embedding_dim
print("Rule Pad Index: ", self._rule_pad_index)
# Non-Terminals + PAD Node
self._nonterminal_end_index = nonterminal_end_index
self._nonterminal_pad_index = nonterminal_pad_index
self._num_nonterminals = grammar.num_non_terminals + 2
self._nonterminal_embedding_dim = nonterminal_embedding_dim
print("Non-Terminal Pad Index: ", self._nonterminal_pad_index)
print("Non-Terminal End Index: ", self._nonterminal_end_index)
self._source_encoding_dim = source_encoding_dim
self._max_target_length = max_target_length
self._transform_encodings_key = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
self._transform_encodings_value = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
# Input: (Attention Context + Previous Rule Embedding + Current Nonterminal Embedding)
decode_lstm_input_dim = lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim
self._decoder_lstm = nn.LSTM(
input_size=decode_lstm_input_dim,
hidden_size=lstm_hidden_dim,
num_layers=num_lstm_layers,
batch_first=False
)
self._attn_dropout = nn.Dropout(p=dropout)
self._decode_dropout = nn.Dropout(p=dropout)
self._rule_embedder = Embedding(self._num_rules, rule_embedding_dim)
self._nonterminal_embedder = Embedding(self._num_nonterminals, nonterminal_embedding_dim)
self._attention_hidden_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim + lstm_hidden_dim, lstm_hidden_dim),
nn.Tanh(),
)
# Rule Predictions
self._rule_prediction_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, rule_embedding_dim),
# nn.Tanh()
)
self._rule_prediction_bias = nn.Parameter(
torch.FloatTensor(self._num_rules).zero_())
self._copy_gate_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, 1),
nn.Sigmoid()
)
self._transform_for_copy_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, source_encoding_dim)
)
@overrides
def forward(self,
encodings: torch.Tensor,
source_mask: torch.Tensor,
source_token_copy_indices: torch.Tensor,
target_rules: torch.Tensor,
target_nonterminals: torch.Tensor,
target_mask: torch.Tensor,
target_allow_copy_mask: torch.Tensor,
meta_field: List[Dict] = None,
):
"""
:param encodings: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:param source_token_copy_indices: (batch_size, length, max_linked_rule_num)
:param target_rules: (batch_size, target_length)
:param target_nonterminals: (batch_size, target_length)
:param target_mask: (batch_size, target_length)
:param target_allow_copy_mask: (batch_size, target_length)
"""
if self.training:
output_dict = self.train_decode(encodings, source_mask, source_token_copy_indices,
target_rules, target_nonterminals, target_mask, target_allow_copy_mask)
else:
output_dict = self.eval_decode(
encodings, source_mask, source_token_copy_indices)
return output_dict
def compute_copy_probs(self, encodings, source_mask, attention_vector):
"""
:param encodings: (length, hidden_dim)
:param source_mask: (length,)
:param attention_vector: (hidden_dim)
"""
# Attention
# (1, hidden_dim)
unsqueezed_attention_vector = self._transform_for_copy_layer(attention_vector).unsqueeze(0)
weights = unsqueezed_attention_vector.mm(encodings.permute(1, 0)).squeeze(0)
weights = weights.masked_fill((1 - source_mask).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
return weights
def train_decode(self, encodings, source_mask, source_token_copy_indices, target_rules, target_nonterminals, target_mask, target_allow_copy_mask):
source_length = encodings.size(1)
batch_size, target_length = target_rules.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_probs = list()
for ts in range(target_length - 1):
# Input
# (batch_size, 1, rule_embedding_size)
prev_rule_embedded = self._rule_embedder(target_rules[:, ts].unsqueeze(1).long())
prev_embedded = prev_rule_embedded
# (batch_size, 1, nonterminal_embedding_size)
curr_nonterminal_embedded = self._nonterminal_embedder(target_nonterminals[:, ts].unsqueeze(1).long())
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state, attention_weights = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
# (batch_size, ts + 1, length)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
# Copy Gate
# (batch_size, 1)
copy_gate = self._copy_gate_layer(attention_vector.squeeze(1))
curr_rule_probs = list()
for bidx in range(batch_size):
# Keep Valid Rule
nonterminal_id = int(target_nonterminals[bidx, ts])
if nonterminal_id == self._nonterminal_pad_index or nonterminal_id == self._nonterminal_end_index:
active_rule_ids = [0]
else:
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal_id(nonterminal_id)
# (num_rules)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
probs = F.softmax(rule_scores[bidx, :].masked_fill(
(1 - active_rule_mask).bool(), float('-inf')), dim=-1)
if target_allow_copy_mask[bidx, ts] == 1:
# (source_length, max_linked_rule_num)
token_copy_indices = source_token_copy_indices[bidx]
# (source_length, num_rules)
one_hot_token_copy_indices = (torch.sum(
torch.nn.functional.one_hot(token_copy_indices, self._num_rules), dim=1) > 0).float()
if torch.sum((torch.sum(one_hot_token_copy_indices, dim=0) > 0).float() * active_rule_mask.float()) > 0:
# allow soft copy
copy_score_gate = copy_gate.squeeze(-1)[bidx]
# (source_length)
copy_scores = attention_weights[bidx, 0, :]
# copy_scores = self.compute_copy_probs(
# encodings[bidx, :, :], source_mask[bidx, :], attention_vector.squeeze(1)[bidx, :])
# There is a chance that we can copy from source
# num_rules
copy_scores = torch.sum(
copy_scores.unsqueeze(-1) * one_hot_token_copy_indices.float(),
dim=0
)
copy_scores.masked_fill_(
(1 - active_rule_mask).bool(), float('-inf'))
normalized_copy_scores = F.softmax(copy_scores, dim=-1)
# Score
probs = copy_score_gate * normalized_copy_scores + \
(1 - copy_score_gate) * probs
curr_rule_probs.append(probs)
curr_rule_probs = torch.stack(curr_rule_probs, dim=0)
rule_probs.append(curr_rule_probs)
rule_probs = torch.stack(rule_probs, dim=0).permute(1, 0, 2)
# Loss
loss = self.get_loss(rule_probs=rule_probs, target_rules=target_rules[:, 1:].long(), target_mask=target_mask[:, 1:].float())
# Predicted Labels
_, predicted_rules = rule_probs.max(dim=-1)
output_dict = {"loss": loss, "predicted_rules": predicted_rules}
return output_dict
def eval_decode(self, encodings, source_mask, source_token_copy_indices):
batch_size, source_length, _ = encodings.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_pad_index_tensor = torch.Tensor([self._rule_pad_index]).long().to(encodings.device)
nonterminal_pad_index_tensor = torch.Tensor([self._nonterminal_pad_index]).long().to(encodings.device)
ast_results, is_complete, recorded_copy_gates, recorded_copy_weights = list(), list(), list(), list()
for i in range(batch_size):
ast_results.append(self._ast_class(root_rule=self._root_rule))
is_complete.append(False)
for ts in range(self._max_target_length):
prev_embedded = list()
curr_nonterminal_embedded = list()
for bidx, ast in enumerate(ast_results):
if is_complete[bidx]:
# PAD
prev_embedded.append(self._rule_embedder(rule_pad_index_tensor))
curr_nonterminal_embedded.append(self._nonterminal_embedder(nonterminal_pad_index_tensor))
else:
last_production_rule = ast.get_last_production_rule()
# Rule
rule_index_tensor = torch.Tensor([last_production_rule.rule_id]).long().to(encodings.device)
prev_embedded.append(self._rule_embedder(rule_index_tensor))
# Curr Non-Terminal
curr_non_terminal_id = self._grammar.get_non_terminal_id(ast.get_curr_non_terminal())
nonterminal_index_tensor = torch.Tensor([curr_non_terminal_id]).long().to(encodings.device)
curr_nonterminal_embedded.append(
self._nonterminal_embedder(nonterminal_index_tensor)
)
# (batch_size, 1, rule_embedding_size)
prev_embedded = torch.stack(prev_embedded, dim=0)
# (batch_size, 1, type_embedding_size)
curr_nonterminal_embedded = torch.stack(curr_nonterminal_embedded, dim=0)
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state, attention_weights = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
# Copy Gate
# (batch_size, 1)
copy_gate = self._copy_gate_layer(attention_vector.squeeze(1))
recorded_copy_gates.append(copy_gate.squeeze(1))
# (batch_size, source_length)
batch_copy_scores = attention_weights.squeeze(dim=1)
recorded_copy_weights.append(batch_copy_scores)
is_finish = True
for bidx, ast in enumerate(ast_results):
if not is_complete[bidx]:
curr_non_terminal = ast.get_curr_non_terminal()
# Rule
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal(curr_non_terminal)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
brule_scores = rule_scores[bidx, :].masked_fill((1 - active_rule_mask).bool(), float('-inf'))
curr_rule_probs = F.softmax(brule_scores, dim=-1)
if curr_non_terminal in self._grammar.copy_terminal_set:
# TODO examinze
# Copy
# (source_length, max_linked_rule_num)
token_copy_indices = source_token_copy_indices[bidx]
# (source_length, num_rules)
one_hot_token_copy_indices = (torch.sum(
torch.nn.functional.one_hot(token_copy_indices, self._num_rules), dim=1) > 0).float()
if torch.sum((torch.sum(one_hot_token_copy_indices, dim=0) > 0).float() * active_rule_mask.float()) > 0:
# allow soft copy
copy_score_gate = copy_gate.squeeze(-1)[bidx]
# (source_length)
copy_scores = attention_weights[bidx, 0, :]
# copy_scores = self.compute_copy_probs(
# encodings[bidx, :, :], source_mask[bidx, :], attention_vector.squeeze(1)[bidx, :])
# There is a chance that we can copy from source
# (num_rules)
copy_scores = torch.sum(
copy_scores.unsqueeze(-1) *
one_hot_token_copy_indices.float(),
dim=0
)
copy_scores.masked_fill_(
(1 - active_rule_mask).bool(), float('-inf'))
normalized_copy_scores = F.softmax(copy_scores, dim=-1)
# Score
curr_rule_probs = copy_score_gate * normalized_copy_scores + \
(1 - copy_score_gate) * curr_rule_probs
rule_id = int(torch.argmax(curr_rule_probs))
production_rule = self._grammar.get_production_rule_by_id(rule_id)
ast.add_rule(production_rule)
if ast.is_complete:
is_complete[bidx] = True
else:
is_finish = False
if is_finish:
break
# Pad For evaluation
predicted_rules = list()
max_length = 0
for ast in ast_results:
rules = ast.get_production_rules()
rule_ids = [rule.rule_id for rule in rules]
predicted_rules.append(np.array(rule_ids, dtype=int))
if len(rules) > max_length:
max_length = len(rules)
# Pad
for i in range(batch_size):
if len(predicted_rules[i]) < max_length:
predicted_rules[i] = np.concatenate(
[predicted_rules[i], np.ones(max_length - len(predicted_rules[i])) * self._rule_pad_index],
axis=0
)
predicted_rules = torch.from_numpy(np.array(predicted_rules, dtype=int)).to(encodings.device)
recorded_copy_gates = torch.stack(recorded_copy_gates, dim=0).transpose(dim0=1, dim1=0)
recorded_copy_weights = torch.stack(recorded_copy_weights, dim=0).permute(1, 0, 2)
output_dict = {
"loss": torch.Tensor([0.0]).to(encodings.device),
"predicted_rules": predicted_rules.long(),
"recorded_copy_gates": recorded_copy_gates,
"recorded_copy_weights": recorded_copy_weights
}
return output_dict
def take_decode_step(self,
source_encoding_key: torch.Tensor,
source_encoding_value: torch.Tensor,
source_mask: torch.Tensor,
decoder_inputs: torch.Tensor,
decoder_hidden_state: Tuple[torch.Tensor, torch.Tensor],
):
"""
:param source_encoding_key: (batch_size, length, hidden_dim)
:param source_encoding_value: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:decoder_inputs: (batch_size, 1, lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim)
:decoder_hidden_state: (h, c)
:return
decoder_outputs: (batch_size, 1, lstm_hidden_dim)
context: (batch_size, 1, hidden_dim)
att: (batch_size, 1, lstm_hidden_dim)
decoder_hidden_state: (h, c)
"""
decoder_outputs, (h, c) = self._decoder_lstm(decoder_inputs.permute(1, 0, 2), decoder_hidden_state)
decoder_hidden_state = (h, c)
# (batch_size, 1, lstm_hidden_dim)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
# Attention
# (batch_size, 1, length)
raw_weights = decoder_outputs.bmm(source_encoding_key.permute(0, 2, 1))
weights = raw_weights.masked_fill((1 - source_mask.unsqueeze(1)).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
# (batch_size, 1, hidden_dim)
context = weights.bmm(source_encoding_value)
att = self._attention_hidden_layer(torch.cat([decoder_outputs, context], dim=-1))
att = self._attn_dropout(att)
return decoder_outputs, context, att, decoder_hidden_state, raw_weights
def get_loss(self,
rule_probs: torch.FloatTensor,
target_rules: torch.LongTensor,
target_mask: torch.FloatTensor,
):
"""
:param rule_probs (batch_size, target_length, num_rules)
:param target_mask (batch_size, target_length)
"""
batch_size, target_length = target_rules.size()
rule_probs = torch.gather(
rule_probs.reshape(-1, self._num_rules),
dim=1,
index=target_rules.reshape(-1).unsqueeze(-1).long()
)
rule_probs = rule_probs.reshape(batch_size, target_length)
rule_log_probs = (rule_probs + 1e-10).log()
rule_log_probs *= target_mask.float()
rule_normalize_factor = target_mask.sum(-1)
rule_normalize_factor[rule_normalize_factor == 0] = 1
rule_loss = rule_log_probs.sum(-1) / rule_normalize_factor.float()
rule_loss = -1 * (rule_loss.sum() / batch_size)
return rule_loss
| 20,773 | 46.429224 | 150 |
py
|
Unimer
|
Unimer-master/neural_models/modules/grammar_copy_decoder.py
|
# coding=utf-8
import torch
import copy
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from overrides import overrides
from allennlp.modules import Embedding
from typing import Tuple, List, Dict
from .. import utils as nn_utils
class LSTMGrammarCopyDecoder(nn.Module):
def __init__(self,
grammar,
ast_class,
lstm_hidden_dim: int,
num_lstm_layers: int,
rule_pad_index: int,
rule_embedding_dim: int,
nonterminal_pad_index: int,
nonterminal_end_index: int,
nonterminal_embedding_dim: int,
source_encoding_dim: int,
dropout: float,
max_target_length: int,
):
super().__init__()
self._grammar = grammar
self._root_rule = grammar.get_production_rule_by_id(grammar.root_rule_id)
self._ast_class = ast_class
self._lstm_hidden_dim = lstm_hidden_dim
self._num_lstm_layers = num_lstm_layers
# Production Rules + PAD Rule
self._rule_pad_index = rule_pad_index
self._num_rules = grammar.num_rules + 1
self._rule_embedding_dim = rule_embedding_dim
print("Rule Pad Index: ", self._rule_pad_index)
# Non-Terminals + PAD Node
self._nonterminal_end_index = nonterminal_end_index
self._nonterminal_pad_index = nonterminal_pad_index
self._num_nonterminals = grammar.num_non_terminals + 2
self._nonterminal_embedding_dim = nonterminal_embedding_dim
print("Non-Terminal Pad Index: ", self._nonterminal_pad_index)
print("Non-Terminal End Index: ", self._nonterminal_end_index)
self._source_encoding_dim = source_encoding_dim
self._max_target_length = max_target_length
self._transform_encodings_key = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
self._transform_encodings_value = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
# Input: (Attention Context + Previous Rule Embedding + Current Nonterminal Embedding)
decode_lstm_input_dim = lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim
self._decoder_lstm = nn.LSTM(
input_size=decode_lstm_input_dim,
hidden_size=lstm_hidden_dim,
num_layers=num_lstm_layers,
batch_first=False
)
self._attn_dropout = nn.Dropout(p=dropout)
self._decode_dropout = nn.Dropout(p=dropout)
self._rule_embedder = Embedding(self._num_rules, rule_embedding_dim)
self._nonterminal_embedder = Embedding(self._num_nonterminals, nonterminal_embedding_dim)
self._attention_hidden_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim + lstm_hidden_dim, lstm_hidden_dim),
nn.Tanh(),
)
# Rule Predictions
self._rule_prediction_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, rule_embedding_dim),
# nn.Tanh()
)
self._rule_prediction_bias = nn.Parameter(
torch.FloatTensor(self._num_rules).zero_())
self._copy_gate_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, 1),
nn.Sigmoid()
)
self._transform_for_copy_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, source_encoding_dim)
)
@overrides
def forward(self,
encodings: torch.Tensor,
source_mask: torch.Tensor,
source_token_copy_indices: torch.Tensor,
target_rules: torch.Tensor,
target_nonterminals: torch.Tensor,
target_mask: torch.Tensor,
target_allow_copy_mask: torch.Tensor,
meta_field: List[Dict] = None,
):
"""
:param encodings: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:param source_token_copy_indices: (batch_size, length, max_linked_rule_num)
:param target_rules: (batch_size, target_length)
:param target_nonterminals: (batch_size, target_length)
:param target_mask: (batch_size, target_length)
:param target_allow_copy_mask: (batch_size, target_length)
"""
if self.training:
output_dict = self.train_decode(encodings, source_mask, source_token_copy_indices,
target_rules, target_nonterminals, target_mask, target_allow_copy_mask)
else:
output_dict = self.eval_decode(
encodings, source_mask, source_token_copy_indices)
return output_dict
def compute_copy_probs(self, encodings, source_mask, attention_vector):
"""
:param encodings: (length, hidden_dim)
:param source_mask: (length,)
:param attention_vector: (hidden_dim)
"""
# Attention
# (1, hidden_dim)
unsqueezed_attention_vector = self._transform_for_copy_layer(attention_vector).unsqueeze(0)
weights = unsqueezed_attention_vector.mm(encodings.permute(1, 0)).squeeze(0)
weights = weights.masked_fill((1 - source_mask).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
return weights
def train_decode(self, encodings, source_mask, source_token_copy_indices, target_rules, target_nonterminals, target_mask, target_allow_copy_mask):
source_length = encodings.size(1)
batch_size, target_length = target_rules.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_probs = list()
for ts in range(target_length - 1):
# Input
# (batch_size, 1, rule_embedding_size)
prev_rule_embedded = self._rule_embedder(target_rules[:, ts].unsqueeze(1).long())
prev_embedded = prev_rule_embedded
# (batch_size, 1, nonterminal_embedding_size)
curr_nonterminal_embedded = self._nonterminal_embedder(target_nonterminals[:, ts].unsqueeze(1).long())
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
# (batch_size, ts + 1, length)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
# Copy Gate
# (batch_size, 1)
copy_gate = self._copy_gate_layer(attention_vector.squeeze(1))
curr_rule_probs = list()
for bidx in range(batch_size):
# Keep Valid Rule
nonterminal_id = int(target_nonterminals[bidx, ts])
if nonterminal_id == self._nonterminal_pad_index or nonterminal_id == self._nonterminal_end_index:
active_rule_ids = [0]
else:
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal_id(nonterminal_id)
# (num_rules)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
probs = F.softmax(rule_scores[bidx, :].masked_fill(
(1 - active_rule_mask).bool(), float('-inf')), dim=-1)
if target_allow_copy_mask[bidx, ts] == 1:
# (source_length, max_linked_rule_num)
token_copy_indices = source_token_copy_indices[bidx]
# (source_length, num_rules)
one_hot_token_copy_indices = (torch.sum(
torch.nn.functional.one_hot(token_copy_indices, self._num_rules), dim=1) > 0).float()
if torch.sum((torch.sum(one_hot_token_copy_indices, dim=0) > 0).float() * active_rule_mask.float()) > 0:
# allow soft copy
copy_score_gate = copy_gate.squeeze(-1)[bidx]
# (source_length)
copy_scores = self.compute_copy_probs(
encodings[bidx, :, :], source_mask[bidx, :], attention_vector.squeeze(1)[bidx, :])
# There is a chance that we can copy from source
# num_rules
copy_scores = torch.sum(
copy_scores.unsqueeze(-1) * one_hot_token_copy_indices.float(),
dim=0
)
copy_scores.masked_fill_(
(1 - active_rule_mask).bool(), float('-inf'))
normalized_copy_scores = F.softmax(copy_scores, dim=-1)
# Score
probs = copy_score_gate * normalized_copy_scores + \
(1 - copy_score_gate) * probs
curr_rule_probs.append(probs)
curr_rule_probs = torch.stack(curr_rule_probs, dim=0)
rule_probs.append(curr_rule_probs)
rule_probs = torch.stack(rule_probs, dim=0).permute(1, 0, 2)
# Loss
loss = self.get_loss(rule_probs=rule_probs, target_rules=target_rules[:, 1:].long(), target_mask=target_mask[:, 1:].float())
# Predicted Labels
_, predicted_rules = rule_probs.max(dim=-1)
output_dict = {"loss": loss, "predicted_rules": predicted_rules}
return output_dict
def eval_decode(self, encodings, source_mask, source_token_copy_indices):
batch_size, source_length, _ = encodings.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_pad_index_tensor = torch.Tensor([self._rule_pad_index]).long().to(encodings.device)
nonterminal_pad_index_tensor = torch.Tensor([self._nonterminal_pad_index]).long().to(encodings.device)
ast_results, is_complete, recorded_copy_gates, recorded_copy_weights = list(), list(), list(), list()
for i in range(batch_size):
ast_results.append(self._ast_class(root_rule=self._root_rule))
is_complete.append(False)
for ts in range(self._max_target_length):
prev_embedded = list()
curr_nonterminal_embedded = list()
for bidx, ast in enumerate(ast_results):
if is_complete[bidx]:
# PAD
prev_embedded.append(self._rule_embedder(rule_pad_index_tensor))
curr_nonterminal_embedded.append(self._nonterminal_embedder(nonterminal_pad_index_tensor))
else:
last_production_rule = ast.get_last_production_rule()
# Rule
rule_index_tensor = torch.Tensor([last_production_rule.rule_id]).long().to(encodings.device)
prev_embedded.append(self._rule_embedder(rule_index_tensor))
# Curr Non-Terminal
curr_non_terminal_id = self._grammar.get_non_terminal_id(ast.get_curr_non_terminal())
nonterminal_index_tensor = torch.Tensor([curr_non_terminal_id]).long().to(encodings.device)
curr_nonterminal_embedded.append(
self._nonterminal_embedder(nonterminal_index_tensor)
)
# (batch_size, 1, rule_embedding_size)
prev_embedded = torch.stack(prev_embedded, dim=0)
# (batch_size, 1, type_embedding_size)
curr_nonterminal_embedded = torch.stack(curr_nonterminal_embedded, dim=0)
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
# Copy Gate
# (batch_size, 1)
copy_gate = self._copy_gate_layer(attention_vector.squeeze(1))
recorded_copy_gates.append(copy_gate.squeeze(1))
# (batch_size, source_length)
batch_copy_scores = copy_gate.new_zeros((batch_size, source_length))
recorded_copy_weights.append(batch_copy_scores)
is_finish = True
for bidx, ast in enumerate(ast_results):
if not is_complete[bidx]:
curr_non_terminal = ast.get_curr_non_terminal()
# Rule
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal(curr_non_terminal)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
brule_scores = rule_scores[bidx, :].masked_fill((1 - active_rule_mask).bool(), float('-inf'))
curr_rule_probs = F.softmax(brule_scores, dim=-1)
if curr_non_terminal in self._grammar.copy_terminal_set:
# TODO examinze
# Copy
# (source_length, max_linked_rule_num)
token_copy_indices = source_token_copy_indices[bidx]
# (source_length, num_rules)
one_hot_token_copy_indices = (torch.sum(
torch.nn.functional.one_hot(token_copy_indices, self._num_rules), dim=1) > 0).float()
if torch.sum((torch.sum(one_hot_token_copy_indices, dim=0) > 0).float() * active_rule_mask.float()) > 0:
# allow soft copy
copy_score_gate = copy_gate.squeeze(-1)[bidx]
# (source_length)
copy_scores = self.compute_copy_probs(
encodings[bidx, :, :], source_mask[bidx, :], attention_vector.squeeze(1)[bidx, :])
# For Copy Analysis
batch_copy_scores[bidx, :] = copy_scores
# There is a chance that we can copy from source
# (num_rules)
copy_scores = torch.sum(
copy_scores.unsqueeze(-1) *
one_hot_token_copy_indices.float(),
dim=0
)
copy_scores.masked_fill_(
(1 - active_rule_mask).bool(), float('-inf'))
normalized_copy_scores = F.softmax(copy_scores, dim=-1)
# Score
curr_rule_probs = copy_score_gate * normalized_copy_scores + \
(1 - copy_score_gate) * curr_rule_probs
rule_id = int(torch.argmax(curr_rule_probs))
production_rule = self._grammar.get_production_rule_by_id(rule_id)
ast.add_rule(production_rule)
if ast.is_complete:
is_complete[bidx] = True
else:
is_finish = False
if is_finish:
break
# Pad For evaluation
predicted_rules = list()
max_length = 0
for ast in ast_results:
rules = ast.get_production_rules()
rule_ids = [rule.rule_id for rule in rules]
predicted_rules.append(np.array(rule_ids, dtype=int))
if len(rules) > max_length:
max_length = len(rules)
# Pad
for i in range(batch_size):
if len(predicted_rules[i]) < max_length:
predicted_rules[i] = np.concatenate(
[predicted_rules[i], np.ones(max_length - len(predicted_rules[i])) * self._rule_pad_index],
axis=0
)
predicted_rules = torch.from_numpy(np.array(predicted_rules, dtype=int)).to(encodings.device)
recorded_copy_gates = torch.stack(recorded_copy_gates, dim=0).transpose(dim0=1, dim1=0)
recorded_copy_weights = torch.stack(recorded_copy_weights, dim=0).permute(1, 0, 2)
output_dict = {
"loss": torch.Tensor([0.0]).to(encodings.device),
"predicted_rules": predicted_rules.long(),
"recorded_copy_gates": recorded_copy_gates,
"recorded_copy_weights": recorded_copy_weights
}
return output_dict
def take_decode_step(self,
source_encoding_key: torch.Tensor,
source_encoding_value: torch.Tensor,
source_mask: torch.Tensor,
decoder_inputs: torch.Tensor,
decoder_hidden_state: Tuple[torch.Tensor, torch.Tensor],
):
"""
:param source_encoding_key: (batch_size, length, hidden_dim)
:param source_encoding_value: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:decoder_inputs: (batch_size, 1, lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim)
:decoder_hidden_state: (h, c)
:return
decoder_outputs: (batch_size, 1, lstm_hidden_dim)
context: (batch_size, 1, hidden_dim)
att: (batch_size, 1, lstm_hidden_dim)
decoder_hidden_state: (h, c)
"""
decoder_outputs, (h, c) = self._decoder_lstm(decoder_inputs.permute(1, 0, 2), decoder_hidden_state)
decoder_hidden_state = (h, c)
# (batch_size, 1, lstm_hidden_dim)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
# Attention
# (batch_size, 1, length)
weights = decoder_outputs.bmm(source_encoding_key.permute(0, 2, 1))
weights = weights.masked_fill((1 - source_mask.unsqueeze(1)).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
# (batch_size, 1, hidden_dim)
context = weights.bmm(source_encoding_value)
att = self._attention_hidden_layer(torch.cat([decoder_outputs, context], dim=-1))
att = self._attn_dropout(att)
return decoder_outputs, context, att, decoder_hidden_state
def get_loss(self,
rule_probs: torch.FloatTensor,
target_rules: torch.LongTensor,
target_mask: torch.FloatTensor,
):
"""
:param rule_probs (batch_size, target_length, num_rules)
:param target_mask (batch_size, target_length)
"""
batch_size, target_length = target_rules.size()
rule_probs = torch.gather(
rule_probs.reshape(-1, self._num_rules),
dim=1,
index=target_rules.reshape(-1).unsqueeze(-1).long()
)
rule_probs = rule_probs.reshape(batch_size, target_length)
rule_log_probs = (rule_probs + 1e-10).log()
rule_log_probs *= target_mask.float()
rule_normalize_factor = target_mask.sum(-1)
rule_normalize_factor[rule_normalize_factor == 0] = 1
rule_loss = rule_log_probs.sum(-1) / rule_normalize_factor.float()
rule_loss = -1 * (rule_loss.sum() / batch_size)
return rule_loss
| 20,697 | 46.363844 | 150 |
py
|
Unimer
|
Unimer-master/executions/compare_funql_prolog_denotations.py
|
# codint=uf8
import re
def read_logical_forms(path):
ql_dict = dict()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
ql_dict[splits[0]] = splits[1]
return ql_dict
def process_prolog_denotation(denotation):
assert denotation[0] == '[' and denotation[-1] == ']'
values = denotation[1:-1].split(',')
processed_values = list()
for v in values:
if len(v) == 0:
continue
processed_values.append(v)
return sorted(processed_values)
def process_funql_denotation(denotation):
assert denotation[0] == '[' and denotation[-1] == ']'
if '(' in denotation:
values = denotation[1:-1].split('),')
else:
values = denotation[1:-1].split(',')
processed_values = list()
for v in values:
if len(v) == 0:
continue
nv = re.sub(r'.*\((.*),\s*[a-z|\_]+\)', r'\1', v)
nv = re.sub(r'.*\((.*),\s*[a-z|\_]+', r'\1', nv)
nv = re.sub(r'.*\((.*)\)', r'\1', nv)
nv = re.sub(r'.*\((.*)', r'\1', nv)
processed_values.append(nv)
return sorted(processed_values)
def read_denotations(path, denotation_process_func):
questions, denotations = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
# print(splits[1])
d = denotation_process_func(splits[1])
denotations.append(d)
return questions, denotations
def compare(prolog_data_path, prolog_path, funql_data_path, funql_path):
prolog_dict, funql_dict = read_logical_forms(
prolog_data_path), read_logical_forms(funql_data_path)
prolog_questions, prolog_denotations = read_denotations(
prolog_path, process_prolog_denotation)
funql_questions, funql_denotations = read_denotations(
funql_path, process_funql_denotation)
total, same = 0, 0
for pq, pd in zip(prolog_questions, prolog_denotations):
pq_1 = pq[: -1].lower().strip()
funql_idx = funql_questions.index(pq_1)
fd = funql_denotations[funql_idx]
if pd == fd:
same += 1
else:
print(pq)
print(prolog_dict[pq])
print(pd)
print(funql_dict[pq_1.strip()])
print(fd)
print("===\n\n")
total += 1
print(total, same)
if __name__ == '__main__':
compare('../data/geo/geo_prolog_test_v2.tsv', './prolog_execution_results.tsv',
'../data/geo/geo_funql_test.tsv', './funql_execution_results.tsv')
| 2,670 | 30.05814 | 83 |
py
|
Unimer
|
Unimer-master/executions/geo/get_sql_denotations.py
|
# coding=utf8
from sql.evaluator import get_result
def evaluate(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
q, l = splits[0], splits[1]
questions.append(q)
logical_forms.append(l)
with open('./sql_execution_results.tsv', 'w') as f:
for question, lf in zip(questions, logical_forms):
print(question)
print(lf)
formated_results = get_result(lf)
results = list()
for fr in formated_results:
key = list(fr.keys())[0]
results.append(fr[key])
print(results)
f.write("%s\t%s\n" % (question, str(results)))
print('===\n\n')
if __name__ == '__main__':
evaluate('../../data/geo/geo_sql_question_based_train.tsv')
| 922 | 27.84375 | 63 |
py
|
Unimer
|
Unimer-master/executions/geo/get_prolog_denotations.py
|
# coding=utf8
import os
import re
import json
import argparse
import subprocess
pattern = re.compile('(\d+)(\[.*\])')
script_template = """
:-compile('%s').
:-compile('%s').
%s
:-halt.
"""
def evaluate(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
q, origin_lf = splits[0], splits[1]
questions.append(q)
# l = re.sub(r"\s*\(\s*", "(", origin_lf)
# l = re.sub(r"\s*\)\s*", ")", l)
# l = re.sub(r"\s*,\s*", ",", l)
# l = re.sub(r"\s*'\s*", "'", l)
# l = re.sub(r'_([a-z]+\()', r'\1', l)
# l = l.replace("_nextto", "next_to")
# l = l.replace("_highpoint", "high_point")
# l = l.replace("_lowpoint", "low_point")
# l = l.replace("\+", 'not')
logical_forms.append(origin_lf)
commands = list()
for idx, lf in enumerate(logical_forms):
if idx == 257:
continue
command = ":-execute_query(%s, U%d),print(%d),print(U%d),nl." % (
lf, idx, idx, idx)
commands.append(command)
commands = '\n'.join(commands)
library_path = os.path.join(os.getcwd(), 'geo', 'prolog')
script = script_template % (
os.path.join(library_path, 'geobase.pl'),
os.path.join(library_path, 'geoquery.pl'),
commands
)
with open('test_prolog.pl', 'w') as f:
f.write(script)
command = 'swipl -l ' + 'test_prolog.pl > prolog_result.log'
subprocess.call(command, shell=True)
# Read result
with open('prolog_result.log', 'r') as f, open('prolog_execution_results.tsv', 'w') as wf:
for line in f:
line = line.strip()
match = pattern.match(line)
assert match is not None
idx = int(match.group(1))
result = match.group(2)
wf.write("%s\t%s\n" % (questions[idx], result))
if __name__ == '__main__':
evaluate('../data/geo/geo_prolog_test_fixed.tsv')
| 2,091 | 28.464789 | 94 |
py
|
Unimer
|
Unimer-master/executions/geo/evaluate_sql.py
|
# coding=utf8
import os
import re
import json
import argparse
from sql.evaluator import compare_sqls
def evaluate(path, timeout=120):
with open(path, 'r') as f:
predictions = json.load(f)
total = len(predictions)
correct = 0
for pidx, p in enumerate(predictions):
truth = p['truth_logical_form']
pred = p['predicted_logical_form']
if compare_sqls(truth, pred):
correct += 1
print("Total: %d, Correct: %d, Accuracy: %f" %
(total, correct, float(correct / total)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--predictions', help='file that stores the prediction results', required=True)
args = parser.parse_args()
evaluate(args.predictions)
| 790 | 24.516129 | 87 |
py
|
Unimer
|
Unimer-master/executions/geo/evaluate_lambda_calculus.py
|
# coding=utf8
import re
import os
import json
import shutil
import argparse
import subprocess
from lambda_calculus.transform_lambda_caculus import transform
import sys
sys.path += ['..', os.path.join('..', '..')]
pattern = re.compile('\(\"([p|t])\",(\d+),Just\s+(.*)\)')
failed_pattern = re.compile('\(\"([p|t])\",(\d+),(Nothing|\"Nothing\")\)')
script_template = r"""
module Main where
import Lib
import Geobase
import Geofunctions
import System.Environment
import System.Timeout
main :: IO ()
main = do
putStrLn "Execute Lambda Calculus"
-- let predicted_result = (count_ (\x -> (and [(river x), (loc x "texas:s")])))
-- let truth_result = (count_ (\x -> (and [(river x), (loc x "texas:s")])))
-- let compare_result = predicted_result == truth_result
%s
"""
def is_valid(lf, grammar):
try:
grammar.parse(lf)
except:
return False
return True
def check_misuse_of_variable(lf, tokenizer):
tokens = tokenizer.tokenize(lf)
defined_variables = list()
for token in tokens:
if token.text.endswith(":e") or token.text.endswith(":i"):
defined_variables.append(token.text)
else:
if token.text.startswith("$"):
# Check if it is defined
for dv in defined_variables:
if token.text == dv[:2]:
break
else:
return False
return True
def calculuate_result(path):
with open(path, 'r') as f:
predictions = json.load(f)
predicted_query_results, truth_query_results = list(), list()
with open('./lambda_calculus/evaluator/lambda_calculus_is_correct_result.log', 'r') as f:
for lidx, line in enumerate(f):
line = line.strip()
if lidx == 0 or len(line) == 0:
continue
match = failed_pattern.match(line)
if match:
is_predict = match.group(1) == 'p'
if is_predict:
predicted_query_results.append((int(match.group(2)), None))
else:
truth_query_results.append((int(match.group(2)), None))
continue
print(line)
match = pattern.match(line)
assert match is not None
is_predict = match.group(1) == 'p'
idx = int(match.group(2))
result = match.group(3)
if is_predict:
predicted_query_results.append((idx, result))
else:
truth_query_results.append((idx, result))
correct_count = 0
for idx, pq_result in predicted_query_results:
if pq_result is None:
continue
# find truth result
for tidx, tq_result in truth_query_results:
if idx == tidx:
if pq_result == tq_result:
correct_count += 1
break
print("Correct Count: %d, Total: %d, Accuracy: %f" %
(correct_count, len(predictions), (correct_count/len(predictions))))
def evaluate(path, grammar, tokenizer, timeout=120):
with open(path, 'r') as f:
predictions = json.load(f)
timeout_limits = 10 * 60 * 1000000
code = list()
result_code = """ let results = ["""
predicted_queries, truth_queries = list(), list()
for pidx, p in enumerate(predictions):
if is_valid(p['predicted_logical_form'], grammar) and \
check_misuse_of_variable(p['predicted_logical_form'], tokenizer):
print(pidx)
print(p['question'])
print(p['truth_logical_form'])
print(p['predicted_logical_form'])
print("==\n\n")
transformed_pred = transform(p['predicted_logical_form'])
transform_truth = transform(p['truth_logical_form'])
if transformed_pred.startswith("[e") and not transform_truth.startswith('[e'):
transform_truth = "[%s]" % transform_truth
if transform_truth.startswith('[e') and not transformed_pred.startswith("[e"):
transformed_pred = "[%s]" % transformed_pred
predicted_queries.append((pidx, transformed_pred))
truth_queries.append((pidx, transform_truth))
for (idx, pq) in predicted_queries:
code.append(""" compare_result_p%d <- (timeout %d (return $! (%s)))""" %
(idx, timeout_limits, pq))
code.append(""" print ("p", %d, compare_result_p%d)""" % (idx, idx))
for (idx, tq) in truth_queries:
code.append(""" compare_result_t%d <- (timeout %d (return $! (%s)))""" %
(idx, timeout_limits, tq))
code.append(""" print ("t", %d, compare_result_t%d)""" % (idx, idx))
code = "\n".join(code)
code = script_template % (code)
with open('Main.hs', 'w') as f:
f.write(code)
# copy file
shutil.copyfile('./Main.hs', './lambda_calculus/evaluator/app/Main.hs')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--predictions', help='file that stores the prediction results', required=True)
parser.add_argument("--timeout",
help='timeout limit for expression', default=120, type=int)
args = parser.parse_args()
from grammars.grammar import get_grammar
grammar = get_grammar('geo', 'lambda')
from grammars.utils import get_logical_form_tokenizer
tokenizer = get_logical_form_tokenizer('geo', 'lambda')
evaluate(args.predictions, grammar, tokenizer, args.timeout)
# calculuate_result(args.predictions)
| 5,659 | 32.892216 | 103 |
py
|
Unimer
|
Unimer-master/executions/geo/evaluate_funql.py
|
# coding=utf8
import os
import re
import json
import argparse
import subprocess
pattern = re.compile("(\d+)'\s+([yn])'")
script_template = """
:-compile('%s').
:-compile('%s').
:-compile('%s').
:-use_module(library(time)).
%s
:-halt.
"""
def evaluate(path, timeout=120):
with open(path, 'r') as f:
predictions = json.load(f)
library_path = os.path.join(os.getcwd(), 'funql')
code = list()
for pidx, p in enumerate(predictions):
truth = p['truth_logical_form']
pred = p['predicted_logical_form']
code.append(
":-catch((call_with_time_limit(%d, execute_funql_query(%s, U%d1)), call_with_time_limit(%d, execute_funql_query(%s, U%d2)), print(%d), (U%d1 == U%d2 -> print(' y') ; print(' n')), nl), time_limit_exceeded, (print(%d), print(' n'), nl))." %
(timeout, pred, pidx, timeout, truth, pidx, pidx, pidx, pidx, pidx))
code = "\n".join(code)
script = script_template % (
os.path.join(library_path, 'geobase.pl'),
os.path.join(library_path, 'geoquery.pl'),
os.path.join(library_path, 'eval.pl'),
code
)
with open('eval_funql.pl', 'w') as f:
f.write(script)
command = 'swipl -l ' + 'eval_funql.pl > funql_result.log'
subprocess.call(command, shell=True)
# Parse Result
count = 0
correct = 0
with open('funql_result.log', 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0:
match = pattern.match(line)
if match:
count += 1
index, is_correct = int(
match.group(1)), match.group(2) == 'y'
predictions[index]['execution_correct'] = is_correct
if is_correct:
correct += 1
print("Total: %d, Correct: %d, Accuracy: %f" %
(len(predictions), correct, float(correct / len(predictions))))
# assert count == len(predictions)
with open(path, 'w') as f:
f.write(json.dumps(predictions, indent=4))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--predictions', help='file that stores the prediction results', required=True)
args = parser.parse_args()
evaluate(args.predictions)
| 2,316 | 30.310811 | 251 |
py
|
Unimer
|
Unimer-master/executions/geo/evaluate_prolog.py
|
# coding=utf8
import re
import os
import json
import argparse
import subprocess
import sys
sys.path += ['..', '../../']
pattern = re.compile("(\d+)'\s+([yn])'")
script_template = """
:-compile('%s').
:-compile('%s').
:-compile('%s').
:-use_module(library(time)).
%s
:-halt.
"""
def tokenize(logical_form):
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
toks = [t if "::" not in t else t.replace(
"::", " ") for t in normalized_lf.split()]
return toks
def fix_variables(logical_form):
# Tokenize Prolog
toks = tokenize(logical_form)
toks = [t.upper() if len(t) == 1 and re.match(
'[a-z]', t) else t for t in toks]
return "".join(toks)
def recover_variable_name(logical_form):
"""Undo the variable name standardization."""
toks = tokenize(logical_form)
cur_var = chr(ord('A') - 1)
new_toks = []
for w in toks:
if w == 'NV' or w == 'nv':
cur_var = chr(ord(cur_var) + 1)
new_toks.append(cur_var)
elif re.match('[V|v]\d+', w):
ind = int(w[1:])
new_toks.append(chr(ord(cur_var) - ind))
else:
new_toks.append(w)
return ''.join(new_toks)
def is_valid(lf, grammar):
try:
grammar.parse(lf)
except:
return False
return True
def evaluate(path, grammar, is_recover_variable, timeout=120):
with open(path, 'r') as f:
predictions = json.load(f)
library_path = os.path.join(os.getcwd(), 'prolog')
count = len(predictions)
code = list()
grammar_valid_count = 0
for pidx, p in enumerate(predictions):
if is_valid(p['predicted_logical_form'], grammar):
if is_recover_variable:
pred = recover_variable_name(p['predicted_logical_form'])
truth = recover_variable_name(p['truth_logical_form'])
else:
pred = fix_variables(p['predicted_logical_form'])
truth = fix_variables(p['truth_logical_form'])
code.append(
":-catch((call_with_time_limit(%d, execute_query(%s, U%d1)), call_with_time_limit(%d, execute_query(%s, U%d2)), print(%d), (U%d1 == U%d2 -> print(' y') ; print(' n')), nl), time_limit_exceeded, (print(%d), print(' n'), nl))." %
(timeout, pred, pidx, timeout, truth, pidx, pidx, pidx, pidx, pidx))
grammar_valid_count += 1
code = "\n".join(code)
script = script_template % (
os.path.join(library_path, 'geobase.pl'),
os.path.join(library_path, 'geoquery.pl'),
os.path.join(library_path, 'evalp.pl'),
code
)
with open('eval_prolog.pl', 'w') as f:
f.write(script)
command = 'swipl -l ' + 'eval_prolog.pl > prolog_result.log'
subprocess.call(command, shell=True)
# Parse Result
correct = 0
valid_executions = 0
with open('prolog_result.log', 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0:
match = pattern.match(line)
if match:
valid_executions += 1
index, is_correct = int(
match.group(1)), match.group(2) == 'y'
predictions[index]['execution_correct'] = is_correct
if is_correct:
correct += 1
print("Total: %d, Grammar Valid: %d, Valid Executions: %d, Correct: %d, Accuracy: %f" %
(len(predictions), grammar_valid_count, valid_executions, correct, float(correct / len(predictions))))
# with open(path, 'w') as f:
# f.write(json.dumps(predictions))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--predictions', help='file that stores the prediction results', required=True)
parser.add_argument("--recover_variable",
action='store_true', default=False)
parser.add_argument("--timeout",
help='timeout limit for expression', default=120, type=int)
args = parser.parse_args()
from grammars.grammar import get_grammar
grammar = get_grammar('geo', 'prolog')
evaluate(args.predictions, grammar, args.recover_variable, args.timeout)
| 4,435 | 30.913669 | 243 |
py
|
Unimer
|
Unimer-master/executions/geo/get_lambda_calculus_denotations.py
|
# coding=utf8
import os
import re
import json
import shutil
import argparse
import subprocess
from geo.lambda_calculus.transform_lambda_caculus import transform
pattern = re.compile('\((\d+),Just\s+(.*)\)')
failed_pattern = re.compile('\((\d+),Nothing\)')
script_template = r"""
module Main where
import Lib
import Geobase
import Geofunctions
import System.Environment
import System.Timeout
main :: IO ()
main = do
putStrLn "Execute Lambda Calculus"
-- let predicted_result = (count_ (\x -> (and [(river x), (loc x "texas:s")])))
-- let truth_result = (count_ (\x -> (and [(river x), (loc x "texas:s")])))
-- let compare_result = predicted_result == truth_result
%s
"""
def evaluate(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
q, l = splits[0], splits[1]
questions.append(q)
logical_forms.append(l)
# timeout_limits = 5 * 60 * 1000000
# code = list()
# for pidx, p in enumerate(logical_forms):
# code.append(""" compare_result_%d <- (timeout %d (return $! (%s)))""" %
# (pidx, timeout_limits, transform(p)))
# code.append(""" print (%d, compare_result_%d)""" % (pidx, pidx))
# code = "\n".join(code)
# code = script_template % code
# with open('Main.hs', 'w') as f:
# f.write(code)
# # copy file
# shutil.copyfile('./Main.hs', './geo/lambda_calculus/evaluator/app/Main.hs')
# # Change directory
# os.chdir('./geo/lambda_calculus/evaluator')
# # Compile & run
# command = 'stack build'
# subprocess.call(command, shell=True)
# command = 'stack exec evaluator-exe > ./lambda_calculus_result.log'
# subprocess.call(command, shell=True)
# # move file
# shutil.copyfile('./lambda_calculus_result.log',
# '../../lambda_calculus_result.log')
# Read result
failed_count = 0
with open('lambda_calculus_train_result.log', 'r') as f, open('lambda_calculus_train_execution_results.tsv', 'w') as wf:
for lidx, line in enumerate(f):
line = line.strip()
if lidx == 0 or len(line) == 0:
continue
print(line)
# Failed
match = failed_pattern.match(line)
if match:
failed_count += 1
idx = int(match.group(1))
wf.write("%s\t%s\n" % (questions[idx], "Nothing"))
continue
match = pattern.match(line)
assert match is not None
idx = int(match.group(1))
result = match.group(2)
wf.write("%s\t%s\n" % (questions[idx], result))
print("Failed Count: ", failed_count)
if __name__ == '__main__':
evaluate('../data/geo/geo_lambda_calculus_train.tsv')
| 2,902 | 28.323232 | 124 |
py
|
Unimer
|
Unimer-master/executions/geo/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/geo/get_funql_denotations.py
|
# coding=utf8
import os
import re
import json
import argparse
import subprocess
pattern = re.compile('(\d+)(\[.*\])')
script_template = """
:-compile('%s').
:-compile('%s').
:-compile('%s').
%s
:-halt.
"""
def evaluate(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
q, l = splits[0], splits[1]
questions.append(q)
logical_forms.append(l)
commands = list()
for idx, lf in enumerate(logical_forms):
command = ":-execute_funql_query(%s, U%d),print(%d),print(U%d),nl." % (
lf, idx, idx, idx)
commands.append(command)
commands = '\n'.join(commands)
library_path = os.path.join(os.getcwd(), 'funql')
script = script_template % (
os.path.join(library_path, 'geobase.pl'),
os.path.join(library_path, 'geoquery.pl'),
os.path.join(library_path, 'eval.pl'),
commands
)
with open('test_funql.pl', 'w') as f:
f.write(script)
command = 'swipl -l ' + 'test_funql.pl > funql_result.log'
subprocess.call(command, shell=True)
# Read result
with open('funql_result.log', 'r') as f, open('funql_execution_results.tsv', 'w') as wf:
for line in f:
line = line.strip()
match = pattern.match(line)
assert match is not None
idx = int(match.group(1))
result = match.group(2)
wf.write("%s\t%s\n" % (questions[idx], result))
if __name__ == '__main__':
evaluate('../../data/geo/geo_funql_train.tsv')
| 1,642 | 25.934426 | 92 |
py
|
Unimer
|
Unimer-master/executions/geo/sql/evaluator.py
|
# coding=utf8
import re
import mysql.connector
from pprint import pprint
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="123456",
database="geo",
auth_plugin='mysql_native_password'
)
def normalize(sql):
s = re.sub(' +', ' ', sql)
s = s.replace('MAX (', 'MAX(')
s = s.replace('MIN (', 'MIN(')
s = s.replace('AVG (', 'AVG(')
s = s.replace('COUNT (', 'COUNT(')
s = s.replace('count (', 'count(')
s = s.replace('SUM (', 'SUM(')
s = s.replace('< =', '<=')
s = s.replace('> =', '>=')
return s
def format_headers(header):
s = header.replace("( ", "(").replace(" )", ")").strip().lower()
return s
def get_result(sql):
_sql = normalize(sql)
cursor = db.cursor()
cursor.execute(_sql)
# print(cursor.description)
headers = cursor.description
results = cursor.fetchall()
formatted_results = list()
for x in results:
r = dict()
for value, header in zip(x, headers):
r[format_headers(header[0])] = value
formatted_results.append(r)
# pprint(formatted_results)
return formatted_results
def compare_sqls(sql_1, sql_2):
try:
sql_1_results = get_result(sql_1)
sql_2_results = get_result(sql_2)
except Exception as e:
return False
if len(sql_1_results) != len(sql_2_results):
return False
for sql_1_row in sql_1_results:
for sql_2_row in sql_2_results:
is_same = True
for key, value in sql_1_row.items():
if key not in sql_2_row or sql_2_row[key] != value:
is_same = False
if is_same:
sql_2_results.remove(sql_2_row)
break
else:
return False
return True
if __name__ == '__main__':
sql_1 = 'SELECT STATEalias0.POPULATION FROM STATE AS STATEalias0 WHERE STATEalias0.STATE_NAME = "texas" ;'
# sql_2 = "select count(distinct flight_1.flight_id) from flight flight_1 , airport_service airport_service_1 , city city_1 , airport_service airport_service_2 , city city_2 , days days_1 , date_day date_day_1 where flight_1.from_airport = airport_service_1.airport_code and airport_service_1.city_code = city_1.city_code and city_1.city_name = 'san francisco' and (flight_1.to_airport = airport_service_2.airport_code and airport_service_2.city_code = city_2.city_code and city_2.city_name = 'philadelphia' and flight_1.flight_days = days_1.days_code and days_1.day_name = date_day_1.day_name and date_day_1.year = 1991 and date_day_1.month_number = 8 and date_day_1.day_number = 18);"
# print(compare_sqls(sql_1, sql_2))
formatted_results = get_result(sql_1)
pprint(formatted_results)
| 2,741 | 32.439024 | 690 |
py
|
Unimer
|
Unimer-master/executions/geo/lambda_calculus/parse_geobase.py
|
# coding=utf8
import re
def parse_state(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('state('):
info = line[len('state('):-2]
# print(info)
infos = info.split(',')
for idx, content in enumerate(infos):
if "'" in content:
infos[idx] = content.replace("'", '"')
new_infos = ["State"]
state_abbre = None
for idx, content in enumerate(infos):
if idx == 6:
new_infos.append('[' + content + ',')
elif idx == 0:
# state_name
n = infos[idx]
if " " in n:
n = n.replace(" ", "_")
n = n[:-1] + ':s"'
new_infos.append(n)
elif idx == 1:
state_abbre = content.replace('"', '')
new_infos.append(content)
elif idx == 2:
# Capital
n = infos[idx]
if " " in n:
n = n.replace(" ", "_")
n = n[:-1] + '_%s:c"' % state_abbre
new_infos.append(n)
elif idx > 6:
new_infos.append(content + ',')
else:
new_infos.append(content)
else:
new_infos[-1] = new_infos[-1][:-1]+']'
# Process city
string = " ".join(new_infos)
cities = string[string.index('[')+1:string.index(']')]
new_cities = list()
for c in cities.split(","):
c = c.strip().replace(" ", "_")
new_cities.append(c[:-1] + '_%s:c"' % state_abbre)
string = string[:string.index('[')+1] + ", ".join(new_cities) + ']'
print(string + ",")
def parse_city(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('city('):
info = line[len('city('):-2]
# print(info)
infos = info.split(',')
state_abbre = None
for idx, content in enumerate(infos):
if "'" in content:
infos[idx] = content.replace("'", '"')
content = infos[idx]
if idx == 0:
# state_name
n = content
if " " in n:
n = n.replace(" ", "_")
n = n[:-1] + ':s"'
infos[idx] = n
elif idx == 1:
# abbre
state_abbre = content.replace('"', '')
elif idx == 2:
# city
n = content
if " " in n:
n = n.replace(" ", "_")
n = n[:-1] + '_%s:c"' % state_abbre
infos[idx] = n
infos.insert(0, "City")
print(" ".join(infos) + ",")
def parse_river(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('river(') and line.endswith('.'):
info = line[len('river('):-2]
info = info.replace("'", '"')
lindex = info.index('[')
rindex = info.index(']')
infos = info.split(',')
new_infos = ['River', infos[0][:-1].replace(" ", "_") + ':r"', infos[1], info[lindex:rindex+1]]
string = " ".join(new_infos)
states = string[string.index('[')+1:string.index(']')]
new_states = list()
for c in states.split(","):
c = c.strip().replace(" ", "_")
new_states.append(c[:-1] + ':s"')
string = string[:string.index('[')+1] + ", ".join(new_states) + ']'
print(string + ",")
def parse_border(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('border(') and line.endswith('.'):
info = line[len('border('):-2]
info = info.replace("'", '"')
lindex = info.index('[')
rindex = info.index(']')
infos = info.split(',')
new_infos = ['Border', infos[0][:-1].replace(" ", "_") + ':s"', infos[1], info[lindex:rindex+1]]
string = " ".join(new_infos)
states = string[string.index('[')+1:string.index(']')]
new_states = list()
for c in states.split(","):
c = c.strip().replace(" ", "_")
new_states.append(c[:-1] + ':s"')
string = string[:string.index('[')+1] + ", ".join(new_states) + ']'
print(" " + string + ",")
def parse_highlow(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('highlow(') and line.endswith('.'):
info = line[len('highlow('):-2]
info = info.replace("'", '"')
infos = info.split(',')
infos.insert(0, 'HighLow')
infos[1] = infos[1][:-1].replace(" ", "_") + ':s"'
infos[3] = infos[3][:-1].replace(" ", "_") + ':lo"'
infos[5] = infos[5][:-1].replace(" ", "_") + ':lo"'
print(" ".join(infos) + ",")
def parse_mountain(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('mountain(') and line.endswith('.'):
info = line[len('mountain('):-2]
info = info.replace("'", '"')
infos = info.split(',')
infos.insert(0, 'Mountain')
infos[1] = infos[1][:-1].replace(" ", "_") + ':s"'
infos[3] = infos[3][:-1].replace(" ", "_") + ':m"'
print(" ".join(infos) + ",")
def parse_road(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('road(') and line.endswith('.'):
info = line[len('road('):-2]
info = info.replace("'", '"')
index = info.index(',')
info = "RoadInfo " + info[:index] + " " + info[index+1:]+","
print(info)
def parse_lake(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('lake(') and line.endswith('.'):
info = line[len('lake('):-2]
info = info.replace("'", '"')
lindex = info.index('[')
rindex = info.index(']')
infos = info.split(',')
new_infos = ['Lake', infos[0][:-1].replace(" ", "_") + ':l"', infos[1], info[lindex:rindex+1]]
string = " ".join(new_infos)
states = string[string.index('[')+1:string.index(']')]
new_states = list()
for c in states.split(","):
c = c.strip().replace(" ", "_")
new_states.append(c[:-1] + ':s"')
string = string[:string.index('[')+1] + ", ".join(new_states) + ']'
print(" " + string + ",")
if __name__ == '__main__':
path = '../prolog/geobase.pl'
parse_lake(path)
| 7,848 | 37.665025 | 112 |
py
|
Unimer
|
Unimer-master/executions/geo/lambda_calculus/transform_lambda_caculus.py
|
# coding=utf8
import re
import json
entity_pattern = re.compile('\s([a-z|_|.]+?:[a-z]+)[\s|)]')
def process_logic_expression(haskell_lf, is_and):
if is_and:
target, replace_target, replace_result = "(and:<t*,t>", "and:<t*,t>", "and ["
else:
target, replace_target, replace_result = "(or:<t*,t>", "or:<t*,t>", "or ["
and_count = haskell_lf.count(target)
# print(and_count)
for and_idx in range(and_count):
try:
index = haskell_lf.rindex(target)
except:
pass
else:
prefix = haskell_lf[:index]
suffix = haskell_lf[index:].replace(replace_target, replace_result)
assert suffix[0] == '('
idx = 1
stack = [suffix[idx]]
new_suffix = "("
while idx < len(suffix):
if len(stack) == 0:
break
character = suffix[idx]
new_suffix += character
if character == ')':
stack.pop(len(stack) - 1)
if len(stack) == 1:
new_suffix += ','
elif character == '(':
stack.append('(')
idx += 1
new_suffix = new_suffix[:-1] + '])' + suffix[idx:]
if new_suffix.count('[') != new_suffix.count(']'):
print("Not equals")
# assert new_suffix.count('[') == new_suffix.count(']')
haskell_lf = prefix + new_suffix
# print(new_suffix)
haskell_lf = haskell_lf.replace("[ (", "[(").replace(",]", "]")
return haskell_lf
def transform(logical_form):
# 1. Replace argument $0, $1, $2, $3, $4 -> x, y, z, m, n
haskell_lf = logical_form.replace('$0', 'x')
haskell_lf = haskell_lf.replace('$1', 'y')
haskell_lf = haskell_lf.replace('$2', 'z')
haskell_lf = haskell_lf.replace('$3', 'm')
haskell_lf = haskell_lf.replace('$4', 'n')
# 2. Replace Predicate
haskell_lf = haskell_lf.replace('state:<s,t>', 'state')
haskell_lf = haskell_lf.replace('capital:<c,t>', 'capital')
haskell_lf = haskell_lf.replace('capital:<s,c>', 'capital2')
haskell_lf = haskell_lf.replace('capital:<s,<c,t>>', 'is_state_capital')
haskell_lf = haskell_lf.replace('capital2:<s,<c,t>>', 'is_state_capital')
haskell_lf = haskell_lf.replace('place:<p,t>', 'place')
haskell_lf = haskell_lf.replace('city:<c,t>', 'city')
haskell_lf = haskell_lf.replace('town:<lo,t>', 'town')
haskell_lf = haskell_lf.replace('river:<r,t>', 'river')
haskell_lf = haskell_lf.replace('lake:<l,t>', 'lake')
haskell_lf = haskell_lf.replace('mountain:<m,t>', 'mountain')
haskell_lf = haskell_lf.replace('high_point:<e,<e,t>>', 'high_point')
haskell_lf = haskell_lf.replace('elevation:<lo,i>', 'elevation')
haskell_lf = haskell_lf.replace('area:<lo,i>', 'area')
haskell_lf = haskell_lf.replace('population:<lo,i>', 'population')
haskell_lf = haskell_lf.replace('population:<lo,<i,t>>', 'is_population')
haskell_lf = haskell_lf.replace('area:<lo,i>', 'area')
haskell_lf = haskell_lf.replace('len:<r,i>', 'len')
haskell_lf = haskell_lf.replace('size:<lo,i>', 'size_')
haskell_lf = haskell_lf.replace('next_to:<lo,<lo,t>>', 'next_to')
haskell_lf = haskell_lf.replace('loc:<lo,<lo,t>>', 'loc')
haskell_lf = haskell_lf.replace('major:<lo,t>', 'major')
haskell_lf = haskell_lf.replace('density:<lo,i>', 'density')
haskell_lf = haskell_lf.replace('density:<lo,<i,t>>', 'is_density')
haskell_lf = haskell_lf.replace('elevation:<lo,<i,t>>', 'is_correct_elevation')
haskell_lf = haskell_lf.replace('the:<<e,t>,e>', 'take_first')
haskell_lf = haskell_lf.replace('argmax:<<e,t>,<<e,i>,e>>', 'argmax_')
haskell_lf = haskell_lf.replace('argmin:<<e,t>,<<e,i>,e>>', 'argmin_')
haskell_lf = haskell_lf.replace('sum:<<e,t>,<<e,i>,i>>', 'sum_')
haskell_lf = haskell_lf.replace('forall:<<e,t>,t>', 'forall_')
haskell_lf = haskell_lf.replace('exists:<<e,t>,t>', 'exists_')
haskell_lf = haskell_lf.replace('count:<<e,t>,i>', 'count_')
haskell_lf = haskell_lf.replace('equals:<e,<e,t>>', 'equals_')
haskell_lf = haskell_lf.replace('in:<lo,<lo,t>>', 'equals_')
haskell_lf = haskell_lf.replace('named:<e,<n,t>>', 'named')
haskell_lf = haskell_lf.replace('not:<t,t>', 'not')
haskell_lf = haskell_lf.replace('<:<i,<i,t>>', 'lower_than')
haskell_lf = haskell_lf.replace('>:<i,<i,t>>', 'larger_than')
haskell_lf = haskell_lf.replace('=:<i,<i,t>>', 'num_equals')
# 3. Replace Lambda
haskell_lf = haskell_lf.replace('lambda x:e', '\\x ->')
haskell_lf = haskell_lf.replace('lambda x:i', '\\x ->')
haskell_lf = haskell_lf.replace('lambda y:e', '\\y ->')
haskell_lf = haskell_lf.replace('lambda z:e', '\\z ->')
haskell_lf = haskell_lf.replace('lambda m:e', '\\m ->')
haskell_lf = haskell_lf.replace('lambda n:e', '\\n ->')
# 4. Replace and:<t*,t>
haskell_lf = haskell_lf.replace("\s+", " ")
haskell_lf = process_logic_expression(haskell_lf, is_and=True)
haskell_lf = process_logic_expression(haskell_lf, is_and=False)
# Replace Entity
entities = set(entity_pattern.findall(haskell_lf))
for e in entities:
haskell_lf = haskell_lf.replace(e, '"%s"' % e)
# Add runnning environment for Lambda expression
if haskell_lf.startswith('(\\'):
# Lambda
lambda_variable_types = logical_form.split()[1]
assert lambda_variable_types.endswith(
":e") or lambda_variable_types.endswith(":i")
subjects = "all_entities" if lambda_variable_types.endswith(':e') else "all_numbers"
haskell_lf = "[e | e <- %s, %s e]" % (subjects, haskell_lf)
# Replace Constance Value
haskell_lf = haskell_lf.replace('0:i', '0')
return haskell_lf
if __name__ == '__main__':
path = '../../../data/geo/geo_lambda_calculus_train.tsv'
results = list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
question, logical_form = line.split('\t')
haskell_lf = transform(logical_form)
print(question)
print(logical_form)
print(haskell_lf)
print("===\n\n")
results.append({"question": question, "predicted_logical_form": haskell_lf, "truth_logical_form": haskell_lf})
with open("test_predictions.json", 'w') as f:
f.write(json.dumps(results))
| 6,483 | 42.810811 | 122 |
py
|
Unimer
|
Unimer-master/executions/geo/lambda_calculus/__init__.py
|
# coding=utf8
| 13 | 13 | 13 |
py
|
Unimer
|
Unimer-master/executions/geo/lambda_calculus/create_evaluate_script.py
|
# coding=utf8
import os
import json
script_template = r"""
module Main where
import Lib
import Geobase
import Geofunctions
import System.Environment
import System.Timeout
main :: IO ()
main = do
putStrLn "Execute Lambda Calculus"
-- let predicted_result = (count_ (\x -> (and [(river x), (loc x "texas:s")])))
-- let truth_result = (count_ (\x -> (and [(river x), (loc x "texas:s")])))
-- let compare_result = predicted_result == truth_result
%s
let total = length results
let correct = length . filter (\x -> x == Just True) $ results
putStrLn "Results: "
print total
print correct"""
def read_predictions(filepath):
with open(filepath, 'r') as f:
return json.load(f)
def main(filepath):
predictions = read_predictions(filepath)
timeout_limits = 10 * 60 * 1000000
code = list()
result_code = """ let results = ["""
for pidx, p in enumerate(predictions):
print(pidx)
print(p['question'])
print(p['predicted_logical_form'])
print("==\n\n")
code.append(""" compare_result_%d <- (timeout %d (return $! ((%s) == (%s))))""" % (pidx, timeout_limits, p['predicted_logical_form'], p['truth_logical_form']))
code.append(""" print (%d, compare_result_%d)""" % (pidx, pidx))
result_code += "compare_result_%d," % pidx
result_code = result_code[:-1] + "]"
code.append(result_code)
code = "\n".join(code)
code = script_template % code
with open('Main.hs', 'w') as f:
f.write(code)
if __name__ == '__main__':
main("test_predictions.json")
| 1,605 | 26.689655 | 170 |
py
|
Unimer
|
Unimer-master/executions/atis/evaluate_sql.py
|
# coding=utf8
import os
import re
import json
import argparse
from sql.evaluator import compare_sqls
def evaluate(path, timeout=120):
with open(path, 'r') as f:
predictions = json.load(f)
total = len(predictions)
correct = 0
for pidx, p in enumerate(predictions):
truth = p['truth_logical_form']
pred = p['predicted_logical_form']
print(pidx)
print(truth)
print(pred)
if truth == pred or compare_sqls(truth, pred):
print(True)
correct += 1
else:
print(False)
print("===\n\n")
print("Total: %d, Correct: %d, Accuracy: %f" %
(total, correct, float(correct / total)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--predictions', help='file that stores the prediction results', required=True)
args = parser.parse_args()
evaluate(args.predictions)
| 956 | 24.184211 | 87 |
py
|
Unimer
|
Unimer-master/executions/atis/evaluate_lambda_calculus.py
|
# coding=utf8
import json
import argparse
from lambda_calculus.lc_evaluator import compare_lambda_calculus
def evaluate(path, timeout=600):
with open(path, 'r') as f:
predictions = json.load(f)
total = len(predictions)
correct = 0
for pidx, p in enumerate(predictions):
print(pidx)
print(p['question'])
truth = p['truth_logical_form']
pred = p['predicted_logical_form']
is_correct = False
if truth == pred:
is_correct = True
elif compare_lambda_calculus(truth, pred, time_limit=timeout):
is_correct = True
if is_correct:
correct += 1
print("is_correct: ", is_correct)
print("===\n\n")
print("Total: %d, Correct: %d, Accuracy: %f" %
(total, correct, float(correct / total)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--predictions', help='file that stores the prediction results', required=True)
args = parser.parse_args()
evaluate(args.predictions)
| 1,076 | 26.615385 | 87 |
py
|
Unimer
|
Unimer-master/executions/atis/evaluate_funql.py
|
# coding=utf8
import json
import argparse
from funql.evaluator import compare_funql
def evaluate(path, timeout=600):
with open(path, 'r') as f:
predictions = json.load(f)
total = len(predictions)
correct = 0
for pidx, p in enumerate(predictions):
print(pidx)
print(p['question'])
truth = p['truth_logical_form']
pred = p['predicted_logical_form']
is_correct = False
if truth == pred:
is_correct = True
elif compare_funql(truth, pred, time_limit=timeout):
is_correct = True
if is_correct:
correct += 1
print("is_correct: ", is_correct)
print("===\n\n")
print("Total: %d, Correct: %d, Accuracy: %f" %
(total, correct, float(correct / total)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--predictions', help='file that stores the prediction results', required=True)
args = parser.parse_args()
evaluate(args.predictions)
| 1,042 | 26.447368 | 87 |
py
|
Unimer
|
Unimer-master/executions/atis/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/atis/funql/transform.py
|
# coding=utf
ENTITY_TYPE_MAP = {
"ac": "aircraft_code",
"al": "airline_code",
"ci": "city_name",
"ap": "airport_code",
"fn": "flight_number",
"cl": "class_description",
"ti": "time",
"pd": "day_period",
"mf": "manufacturer",
"mn": "month",
"da": "day",
"i": "integer",
"yr": "year",
"dn": "day_number",
"do": "dollar",
"hr": "hour",
"rc": "meal_code",
"st": "state_name",
"fb": "fare_basis_code",
"me": "meal_description",
"bat": "basis_type",
"dc": "days_code"
}
def tokenize_funql(funql):
normalized_lf = funql.replace(" ", "::")
replacements = [
('(', '( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [t if "::" not in t else t.replace("::", " ") for t in normalized_lf.split()]
return tokens
def transform(funql):
expression = funql
tokens = tokenize_funql(expression)
print(tokens)
all_entity_types = {"%s(" % value for _, value in ENTITY_TYPE_MAP.items()}
transformed_tokens = list()
tidx = 0
while tidx < len(tokens):
token = tokens[tidx]
if token in all_entity_types and tidx + 2 < len(tokens) and tokens[tidx + 2] == ')':
transformed_tokens.append("build_entity(")
transformed_tokens.append('"%s"' % token[:-1])
transformed_tokens.append(",")
transformed_tokens.append('"%s"' % tokens[tidx + 1])
transformed_tokens.append(")")
tidx += 3
elif token == 'all':
print(transformed_tokens)
print(token, transformed_tokens[-1], tokens[tidx + 1])
assert transformed_tokens[-1].endswith('(') and tokens[tidx + 1] == ')'
transformed_tokens[-1] = "%s_all(" % transformed_tokens[-1][:-1]
tidx += 1
else:
if token.startswith('_'):
token = token[1:]
if token in ['count(', 'min(', 'or(', 'not(', 'max(']:
token = token.replace("(", "_(")
token = token.replace(":_", "_").replace("<_", "less_than_").replace(">_", "larger_than_")
transformed_tokens.append(token)
tidx += 1
expression = "".join(transformed_tokens)
return expression
if __name__ == '__main__':
funql = "answer(_flight(intersection(_to_2(airport_code(dal)),_from_2(_airport(all)))))"
expression = transform(funql)
print(expression)
# data = list()
# with open('../../../data/atis/atis_funql_test.tsv', 'r') as f:
# for line in f:
# line = line.strip()
# data.append(line.split('\t'))
# for idx, (question, funql) in enumerate(data):
# print(idx)
# print(question)
# print(funql)
# expression = transform(funql)
# print(expression)
# print('====\n\n')
| 2,952 | 29.760417 | 102 |
py
|
Unimer
|
Unimer-master/executions/atis/funql/evaluator.py
|
# coding=utf8
import logging
from .query import *
from .transform import transform
def get_result(funql):
python_lf = transform(funql)
return_dict = dict()
try:
results = eval(python_lf)
except:
logging.error("Exception", exc_info=True)
return_dict['is_valid'] = False
else:
return_dict['is_valid'] = True
return_dict['results'] = results
close_connection()
return return_dict
def compare_funql(funql_1, funql_2, time_limit=600):
try:
lc_1_results = get_result(funql_1)
lc_2_results = get_result(funql_2)
except Exception as e:
return False
if type(lc_1_results) != type(lc_2_results):
return False
if isinstance(lc_1_results, list):
if len(lc_1_results) != len(lc_2_results):
return False
for lc_1_row in lc_1_results:
for lc_2_row in lc_2_results:
is_same = True
used_keys = set()
for key, value in lc_1_row.items():
if key not in lc_2_row:
is_same = False
else:
# Key in lc_2_row
# Find key
if key.startswith("<lambda>"):
for k2, v2 in lc_2_row.items():
if k2 not in used_keys and k2.startswith("<lambda>") and value == v2:
used_keys.add(k2)
break
else:
is_same = False
else:
if lc_2_row[key] != value:
is_same = False
if is_same:
lc_2_results.remove(lc_2_row)
break
else:
return False
return True
else:
return lc_1_results == lc_2_results
if __name__ == '__main__':
funql_1 = 'answer(intersection(_from_2(city_name(milwaukee)),_to_2(city_name(phoenix)),_day_2(day(saturday))))'
funql_2 = 'answer(intersection(_to_2(city_name(phoenix)), _from_2(city_name(milwaukee)),_day_2(day(saturday))))'
print(compare_funql(funql_1, funql_2))
| 2,277 | 30.638889 | 116 |
py
|
Unimer
|
Unimer-master/executions/atis/funql/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/atis/funql/query.py
|
# coding=utf8
import re
import mysql.connector
from pprint import pprint
from .transform import transform
db = None
def get_connection():
global db
if db and db.is_connected():
return db
else:
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="123456",
database="atis",
auth_plugin='mysql_native_password'
)
return db
def close_connection():
if db is not None and db.is_connected():
db.close()
def format_headers(header):
s = header.replace("( ", "(").replace(" )", ")").strip().lower()
return s
def get_result(sql):
db = get_connection()
_sql = sql
cursor = db.cursor()
cursor.execute(_sql)
# print(cursor.description)
headers = cursor.description
results = cursor.fetchall()
formatted_results = list()
for x in results:
r = dict()
for value, header in zip(x, headers):
r[format_headers(header[0])] = value
formatted_results.append(r)
# pprint(formatted_results)
return formatted_results
"""
Entity Type
"""
def build_entity(entity_type, entity_value):
return {entity_type: entity_value}
def answer(values):
return values
def get_entity_value(entity, key=None):
assert isinstance(entity, dict)
if key:
entity_type = key
entity_value = entity[key]
else:
entity_type = list(entity.keys())[0]
entity_value = entity[entity_type].replace("_", " ")
if entity_value == 'st louis':
entity_value = 'st. louis'
elif entity_value == 'st petersburg':
entity_value = 'st. petersburg'
elif entity_value == 'st paul':
entity_value = 'st. paul'
return entity_type, entity_value
def meal_code_all():
sql = "SELECT distinct meal_code FROM food_service"
return get_result(sql)
def airport_all():
sql = "SELECT distinct airport_code FROM airport"
return get_result(sql)
def aircraft_all():
sql = "SELECT distinct aircraft_code FROM aircraft"
return get_result(sql)
def city_all():
sql = "SELECT distinct city_name FROM city"
return get_result(sql)
def fare_basis_code_all():
sql = "SELECT distinct fare_basis_code FROM fare_basis"
return get_result(sql)
def airline_all():
sql = "SELECT distinct airline_code FROM airline"
return get_result(sql)
def flight_all():
sql = 'SELECT DISTINCT flight_id FROM flight'
return get_result(sql)
def booking_class_t_all():
sql = "SELECT distinct class_description FROM class_of_service"
return get_result(sql)
def class_of_service_all():
sql = 'SELECT DISTINCT class_of_service_1.booking_class FROM class_of_service class_of_service_1'
return get_result(sql)
def ground_transport_all():
sql = "SELECT distinct transport_type FROM ground_service"
return get_result(sql)
def abbrev(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
results = list()
for e in entities:
sql = "SELECT DISTINCT airline_1.airline_code FROM airline airline_1 WHERE airline_1.airline_name like '%" + e['airline_code'] + "%'"
results += get_result(sql)
return results
def capacity(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
results = list()
flight_number_template = "SELECT flight_1.flight_number, aircraft_1.capacity FROM aircraft as aircraft_1 JOIN flight as flight_1 on aircraft_1.aircraft_code = flight_1.aircraft_code_sequence WHERE flight_1.flight_number = %s;"
flight_id_template = "SELECT flight_1.flight_id, aircraft_1.capacity FROM aircraft as aircraft_1 JOIN flight as flight_1 on aircraft_1.aircraft_code = flight_1.aircraft_code_sequence WHERE flight_1.flight_id = %s;"
aircraft_code_template = "SELECT DISTINCT aircraft_1.aircraft_code, aircraft_1.capacity FROM aircraft aircraft_1 WHERE aircraft_1.aircraft_code = '%s'"
for e in entities:
if 'aircraft_code' in e:
entity_type, entity_name = get_entity_value(e, key='aircraft_code')
sql = aircraft_code_template % entity_name
elif 'flight_id' in e:
entity_type, entity_name = get_entity_value(e, key='flight_id')
# flight id
sql = flight_id_template % entity_name
else:
# entity_type == 'flight_number':
entity_type, entity_name = get_entity_value(e, key='flight_number')
sql = flight_number_template % entity_name
results += get_result(sql)
return results
def flight_number(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight_number FROM flight WHERE flight_id IN %s" % flight_id
results = get_result(sql)
return results
def time_elapsed(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight_id, time_elapsed FROM flight WHERE flight_id IN %s" % flight_id
return get_result(sql)
def time_elapsed_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight_id FROM flight WHERE time_elapsed = %s" % entity_value_1
return get_result(sql)
def minimum_connection_time(airport_code):
entity_type_1, entity_value_1 = get_entity_value(airport_code)
sql = "SELECT DISTINCT airport_1.minimum_connect_time FROM airport airport_1 WHERE airport_1.airport_code = '%s'" % (
entity_value_1)
return get_result(sql)
def miles_distant(entity_1, entity_2):
"""
_miles_distant
:entity_type: (airport_code, city_name)
:entity_type: (city_name, city_name)
"""
entity_type_1, entity_value_1 = get_entity_value(entity_1)
entity_type_2, entity_value_2 = get_entity_value(entity_2)
if entity_type_1 == 'airport_code' and entity_type_2 == 'city_name':
sql = "SELECT airport_service.miles_distant FROM airport_service JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name = '%s' AND airport_service.airport_code = '%s'" % (
entity_value_2, entity_value_1)
else:
sql = "SELECT distinct airport_service.miles_distant FROM airport_service JOIN city ON airport_service.city_code = city.city_code WHERE city.city_name = '%s' AND airport_service.airport_code IN (SELECT T1.airport_code FROM airport_service AS T1 JOIN city AS T2 ON T1.city_code = T2.city_code WHERE T2.city_name = '%s');" % (
entity_value_1, entity_value_2)
return get_result(sql)
def minutes_distant(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
key = 'city_name' if 'city_name' in entities[0] else 'airport_code'
values = "(%s)" % ','.join(['"%s"' % e[key] for e in entities])
if key == 'city_name':
sql = "SELECT minutes_distant, city_name FROM airport_service JOIN city ON airport_service.city_code = city.city_code WHERE city.city_name IN %s" % (values)
else:
# airport_code
sql = "SELECT minutes_distant FROM airport_service WHERE airport_code IN %s" % values
results = get_result(sql)
return results
def services_1(airline_code):
entity_type_1, entity_value_1 = get_entity_value(airline_code)
sql = "SELECT city.city_name, flight.to_airport FROM flight JOIN airport_service ON flight.to_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE flight.airline_code = '%s'" % (entity_value_1)
results = get_result(sql)
return results
def services_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
if entity_type_1 == 'city_name':
sql = "SELECT flight.airline_code FROM flight JOIN airport_service ON flight.to_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name = '%s'" % (
entity_value_1)
else:
assert entity_type_1 == 'airport_code'
sql = "SELECT DISTINCT flight.airline_code FROM flight WHERE flight.to_airport = '%s'" % (
entity_value_1,)
results = get_result(sql)
return results
def services(entity_1, entity_2):
entity_type_1, entity_value_1 = get_entity_value(entity_1)
entity_type_2, entity_value_2 = get_entity_value(entity_2)
if entity_type_2 == 'city_name':
sql = "SELECT DISTINCT flight.airline_code, city.city_name FROM flight JOIN airport_service ON flight.to_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE flight.airline_code = '%s' AND city.city_name = '%s'" % (
entity_value_1, entity_value_2)
else:
assert entity_type_2 == 'airport_code'
sql = "SELECT DISTINCT flight.airline_code, flight.to_airport FROM flight WHERE flight.airline_code = '%s' AND flight.to_airport = '%s'" % (
entity_value_1, entity_value_2,)
results = get_result(sql)
return results
def airport(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['from_airport', 'to_airport', 'airport_code']:
if key in entity and entity[key] not in value_set:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def aircraft(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['aircraft_code']:
if key in entity and entity[key] not in value_set:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def airline(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['airline_code']:
if key in entity and entity[key] not in value_set:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def city(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['city_name']:
if key in entity and entity[key] not in value_set:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def time_zone_code(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['time_zone_code']:
if key in entity and entity[key] not in value_set:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def flight(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['flight_id']:
if key in entity and entity[key] not in value_set:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def taxi(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['transport_type']:
if key in entity and "TAXI" in entity[key]:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def air_taxi_operation(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['transport_type']:
if key in entity and "AIR TAXI OPERATION" == entity[key]:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def limousine(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['transport_type']:
if key in entity and "LIMOUSINE" == entity[key]:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def rapid_transit(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['transport_type']:
if key in entity and "RAPID TRANSIT" == entity[key]:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def rental_car(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['transport_type']:
if key in entity and "RENTAL CAR" == entity[key]:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def ground_transport(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
value_set = set()
for entity in entities:
# Airport code
for key in ['transport_type']:
results.append({key: entity[key]})
value_set.add(entity[key])
break
return results
def turboprop(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
et = 'aircraft_code' if 'aircraft_code' in entities[0] else 'flight_id'
values = "(%s)" % ','.join(['"%s"' % e[et] for e in entities])
if et == 'aircraft_code':
sql = "SELECT aircraft_code FROM aircraft WHERE aircraft_code IN %s AND propulsion = 'TURBOPROP'" % values
else:
sql = "SELECT flight_id FROM flight JOIN aircraft ON flight.aircraft_code_sequence = aircraft.aircraft_code WHERE propulsion = 'TURBOPROP' AND flight_id IN %s" % values
results = get_result(sql)
return results
def jet(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
et = 'aircraft_code' if 'aircraft_code' in entities[0] else 'flight_id'
values = "(%s)" % ','.join(['"%s"' % e[et] for e in entities])
if et == 'aircraft_code':
sql = "SELECT aircraft_code FROM aircraft WHERE aircraft_code IN %s AND propulsion = 'JET'" % values
else:
sql = "SELECT flight_id FROM flight JOIN aircraft ON flight.aircraft_code_sequence = aircraft.aircraft_code WHERE propulsion = 'JET' AND flight_id IN %s" % values
results = get_result(sql)
return results
def economy(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
sql = "SELECT flight_fare.flight_id FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code WHERE fare_basis.economy = 'YES'"
results = get_result(sql)
results = intersection(entities, results)
return results
def connecting(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = 'SELECT DISTINCT flight_id FROM flight WHERE connections > 0 AND flight_id IN %s ' % flight_id
results = get_result(sql)
return results
def discounted(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code WHERE fare_basis.discounted = 'YES' AND flight.flight_id IN %s" % flight_id
results = get_result(sql)
return results
def nonstop(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
sql = 'SELECT flight.flight_id FROM flight WHERE flight.stops = 0'
results = get_result(sql)
results = intersection(entities, results)
return results
def daily(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
sql = "SELECT flight_id FROM flight WHERE flight_days = 'daily'"
results = get_result(sql)
results = intersection(entities, results)
return results
def today(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 6 AND date_day.day_number = 22"
results = get_result(sql)
results = intersection(entities, results)
return results
def after_day_2(entity):
return flight_all()
def before_day_2(entity):
return flight_all()
def tomorrow(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 1 AND date_day.day_number = 20"
results = get_result(sql)
results = intersection(entities, results)
return results
def overnight(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
return entities
def tonight(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight_id FROM flight WHERE departure_time BETWEEN %d AND %d AND flight_id IN %s" % (
1800, 2359, flight_id)
results = get_result(sql)
return results
def day_number_return_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN days ON fare_basis.basis_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.day_number = %s" % (
entity_value_1)
results = get_result(sql)
return results
def tomorrow_arrival(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 1 AND date_day.day_number = 20 AND flight.departure_time > flight.arrival_time AND flight.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def day_after_tomorrow(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 1 AND date_day.day_number = 21"
results = get_result(sql)
results = intersection(entities, results)
return results
def oneway(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
sql = 'SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id WHERE fare.round_trip_required = "NO"'
results = get_result(sql)
results = intersection(entities, results)
return results
def round_trip(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
sql = 'SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id WHERE fare.round_trip_required IS NOT NULL'
results = get_result(sql)
results = intersection(entities, results)
return results
def weekday(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
for entity in entities:
if 'flight_id' not in entity:
assert 'transport_type' in entity
results.append(entity)
else:
sql = "SELECT distinct day_name FROM flight JOIN days ON flight.flight_days = days.days_code WHERE flight_id = %s AND day_name IN ('MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY')" % entity['flight_id']
tmp = get_result(sql)
if len(tmp) == 5:
results.append(entity)
return results
def airline_2(airline_code):
entity_type_1, entity_value_1 = get_entity_value(airline_code)
sql = "SELECT flight_id FROM flight WHERE airline_code = '%s'" % (
entity_value_1)
return get_result(sql)
def aircraft_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN equipment_sequence ON flight.aircraft_code_sequence = equipment_sequence.aircraft_code_sequence WHERE equipment_sequence.aircraft_code = '%s'" % (
entity_value_1)
return get_result(sql)
def manufacturer_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT aircraft.aircraft_code , flight.flight_id FROM flight JOIN aircraft ON flight.aircraft_code_sequence = aircraft.aircraft_code WHERE aircraft.manufacturer = '%s'" % (
entity_value_1)
results = get_result(sql)
return get_result(sql)
def meal(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
for entity in entities:
sql = "SELECT food_service.meal_description FROM flight JOIN food_service ON flight.meal_code = food_service.meal_code WHERE flight_id = %s" % (
entity['flight_id'])
results += get_result(sql)
return results
def loc_t_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
if entity_type_1 == 'city_name':
sql = "SELECT T.airport_code FROM airport_service AS T JOIN city ON T.city_code = city.city_code WHERE city.city_name = '%s';" % (
entity_value_1)
elif entity_type_1 == 'state_name':
sql = "SELECT T.airport_code FROM airport_service AS T JOIN city ON T.city_code = city.city_code JOIN state ON city.state_code = state.state_code WHERE state.state_name = '%s';" % (
entity_value_1)
else:
assert entity_type_1 == 'time_zone_code'
sql = "SELECT city_name FROM city WHERE time_zone_code = '%s'" % (
entity_value_1)
return get_result(sql)
def loc_t_1(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
if entity_type_1 == 'airport_code':
sql = "SELECT city.city_name FROM airport_service AS T JOIN city ON T.city_code = city.city_code WHERE T.airport_code = '%s';" % (
entity_value_1)
else:
assert entity_type_1 == 'city_name'
sql = "SELECT time_zone_code FROM city WHERE city_name = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def during_day_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
period_map = {
"morning": [0, 1200],
"afternoon": [1200, 1800],
"early": [0, 800],
"evening": [1800, 2200],
"pm": [1200, 2400],
"late": [601, 1759],
"breakfast": [600, 900],
"late evening": [2000, 2400],
"late night": [2159, 301],
"daytime": [600, 1800]
}
if entity_value_1 == 'late night':
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days ON flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 3 AND ( (date_day.day_number = 21 AND flight.departure_time > 2159) OR (date_day.day_number = 22 AND flight.departure_time < 301))"
else:
start, end = period_map[entity_value_1]
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.departure_time BETWEEN %d AND %d" % (
start, end)
results = get_result(sql)
return results
def during_day_arrival_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
processed_day_period = entity_value_1
period_map = {
"morning": [0, 1200],
"afternoon": [1200, 1800],
"early": [0, 800],
"evening": [1800, 2200],
"pm": [1200, 2400],
"late": [601, 1759],
"breakfast": [600, 900],
"late evening": [2000, 2400],
"daytime": [600, 1800],
"late night": [2159, 301],
'mealtime': [1700, 2000]
}
if processed_day_period == 'late night':
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days ON flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE flight.flight_id = %s AND date_day.year = 1991 AND date_day.month_number = 3 AND ( (date_day.day_number = 21 AND flight.arrival_time > 2159) OR (date_day.day_number = 22 AND flight.arrival_time < 301))"
else:
start, end = period_map[processed_day_period]
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.arrival_time BETWEEN %d AND %d" % (
start, end)
results = get_result(sql)
return results
def day_number_arrival_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND ((date_day.day_number = %s AND flight.arrival_time < flight.departure_time) OR (date_day.day_number = %s))" % (
str(int(entity_value_1) - 1), entity_value_1)
results = get_result(sql)
return results
def flight_number_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight_id FROM flight WHERE flight_number = '%s'" % entity_value_1
results = get_result(sql)
return results
def aircraft_basis_type_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT aircraft_code FROM aircraft WHERE basic_type = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def from_2(entity):
if isinstance(entity, dict):
entity_type_1, entity_value_1 = get_entity_value(entity)
if entity_type_1 == 'airport_code':
sql = "SELECT DISTINCT flight_id FROM flight WHERE flight.from_airport = '%s'" % (
entity_value_1)
else:
# entity_type == 'city_name'
sql = "SELECT DISTINCT flight_1.flight_id FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.from_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code WHERE city_1.city_name = '%s'" % (
entity_value_1)
results = get_result(sql)
else:
assert isinstance(entity, list)
if len(entity) == 0:
return list()
entity_type_1, entity_value_1 = get_entity_value(entity[0])
values = "(%s)" % ','.join(
['"%s"' % e[entity_type_1] for e in entity])
if entity_type_1 == 'airport_code':
sql = "SELECT DISTINCT flight_id FROM flight WHERE flight.from_airport IN %s" % (
values)
else:
# city_name
sql = "SELECT DISTINCT flight_1.flight_id FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.from_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code WHERE city_1.city_name IN %s" % (
values)
results = get_result(sql)
return results
def from_1(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT flight_1.from_airport, city_1.city_name FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.from_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code WHERE flight_1.flight_id in %s" % (
flight_id)
results = get_result(sql)
return results
def to_2(entity):
"""
_to(x,"mke:_ap"/"indianapolis:_ci")
"""
if isinstance(entity, dict):
entity_type_1, entity_value_1 = get_entity_value(entity)
if entity_type_1 == 'airport_code':
sql = "SELECT DISTINCT flight_id FROM flight WHERE flight.to_airport = '%s'" % (
entity_value_1)
elif entity_type_1 == 'city_name':
sql = "SELECT DISTINCT flight_1.flight_id FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.to_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code WHERE city_1.city_name = '%s'" % (
entity_value_1)
else:
# entity_type == 'state_name':
sql = "SELECT DISTINCT flight_1.flight_id FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.to_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code JOIN state ON city_1.state_code = state.state_code WHERE state.state_name = '%s'" % (
entity_value_1)
results = get_result(sql)
else:
assert isinstance(entity, list)
if len(entity) == 0:
return list()
entity_type_1, entity_value_1 = get_entity_value(entity[0])
values = "(%s)" % ','.join(
['"%s"' % e[entity_type_1] for e in entity])
if entity_type_1 == 'airport_code':
sql = "SELECT DISTINCT flight_id FROM flight WHERE flight.to_airport IN %s" % (
values)
else:
# city_name
sql = "SELECT DISTINCT flight_1.flight_id FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.to_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code WHERE city_1.city_name IN %s" % (
values)
results = get_result(sql)
return results
def to_1(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
results = list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT flight_1.to_airport, city_1.city_name FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.to_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code WHERE flight_1.flight_id in %s" % (
flight_id)
results = get_result(sql)
return results
def airport_1(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
if len(entities) == 0:
return list()
city_names = "(%s)" % ','.join(['"%s"' % e['city_name'] for e in entities])
sql = 'SELECT airport_service.airport_code FROM airport_service JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name IN %s' % (
city_names)
results = get_result(sql)
return results
def airline_1(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT airline_code FROM flight WHERE flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def booking_class_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT flight_fare.flight_id FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN class_of_service ON fare_basis.booking_class = class_of_service.booking_class WHERE class_of_service.class_description = '%s'" % entity_value_1
results = get_result(sql)
return results
def booking_class_1(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT class_of_service.class_description FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN class_of_service ON fare_basis.booking_class = class_of_service.booking_class WHERE flight_fare.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def from_airport_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
if entity_type_1 == 'city_name':
sql = "SELECT DISTINCT T3.transport_type FROM airport_service AS T1 JOIN city AS T2 ON T1.city_code = T2.city_code JOIN ground_service AS T3 ON T1.airport_code = T3.airport_code WHERE T2.city_name = '%s'" % entity_value_1
else:
assert entity_type_1 == 'airport_code'
sql = "SELECT DISTINCT ground_service_1.transport_type, ground_service_1.airport_code FROM ground_service ground_service_1 WHERE ground_service_1.airport_code = '%s'" % entity_value_1
results = get_result(sql)
return results
def to_city_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
assert entity_type_1 == 'city_name'
sql = "SELECT DISTINCT ground_service_1.transport_type, city_1.city_name FROM ground_service AS ground_service_1 JOIN city AS city_1 ON ground_service_1.city_code = city_1.city_code WHERE city_1.city_name = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def meal_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
if entity_type_1 == 'meal_code':
sql = "SELECT flight_id FROM flight WHERE meal_code = '%s'" % (entity_value_1)
else:
sql = "SELECT flight_id FROM flight JOIN food_service ON flight.meal_code = food_service.meal_code WHERE food_service.meal_description = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def meal_code_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT flight_id FROM flight WHERE meal_code = '%s'" % (entity_value_1)
results = get_result(sql)
return results
def day_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code WHERE days.day_name = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def day_return_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN days ON fare_basis.basis_days = days.days_code WHERE days.day_name = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def year_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight_id FROM flight JOIN days ON flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = %s" % (
entity_value_1)
results = get_result(sql)
return results
def day_arrival_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code WHERE days.day_name = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def day_number_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.day_number = %s" % (
entity_value_1)
results = get_result(sql)
return results
def next_days_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 1 AND date_day.day_number BETWEEN 20 and %s" % (
int(entity_value_1) + 20 )
results = get_result(sql)
return results
def month_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
month_map = {
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12
}
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = %s" % (
month_map[entity_value_1])
results = get_result(sql)
return results
def month_arrival_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
month_map = {
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12
}
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = %s" % (
month_map[entity_value_1])
results = get_result(sql)
return results
def month_return_2(entity):
"""
_month_return(x, "june:_mn")
:entity_type (flight_id, month)
"""
entity_type_1, entity_value_1 = get_entity_value(entity)
month_map = {
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12
}
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN days ON fare_basis.basis_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = %s" % (
month_map[entity_value_1])
results = get_result(sql)
return results
def days_from_today_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 5 AND date_day.day_number = %s" % (
int(entity_value_1) + 27)
results = get_result(sql)
return results
def stop_1(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT city.city_name, flight_stop.stop_airport FROM flight JOIN flight_stop ON flight.flight_id = flight_stop.flight_id JOIN airport_service ON flight_stop.stop_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE flight.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def stop_2(entity):
if isinstance(entity, dict):
entity_type_1, entity_value_1 = get_entity_value(entity)
if entity_type_1 == 'city_name':
sql = "SELECT flight.flight_id FROM flight JOIN flight_stop ON flight.flight_id = flight_stop.flight_id JOIN airport_service ON flight_stop.stop_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name = '%s'" % (
entity_value_1)
elif entity_type_1 == 'airport_code':
sql = "SELECT flight_stop.flight_id FROM flight_stop WHERE flight_stop.stop_airport = '%s'" % (
entity_value_1)
results = get_result(sql)
else:
assert isinstance(entity, list)
if len(entity) == 0:
return list()
entity_type_1, entity_value_1 = get_entity_value(entity[0])
values = "(%s)" % ','.join(
['"%s"' % e[entity_type_1] for e in entity])
if entity_type_1 == 'city_name':
sql = "SELECT flight.flight_id FROM flight JOIN flight_stop ON flight.flight_id = flight_stop.flight_id JOIN airport_service ON flight_stop.stop_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name IN %s" % (
values)
elif entity_type_1 == 'airport_code':
sql = "SELECT flight_stop.flight_id FROM flight_stop WHERE flight_stop.stop_airport IN %s" % (
values)
results = get_result(sql)
return results
def stop_arrival_time(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT flight_stop.arrival_time, city.city_name FROM flight_stop JOIN airport_service ON flight_stop.stop_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE flight_stop.flight_id IN %s" % (flight_id)
return get_result(sql)
def stops(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT flight_id, stops FROM flight WHERE flight.flight_id IN %s" % (flight_id)
return get_result(sql)
def stops_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight_id FROM flight WHERE stops = %s" % (
entity_value_1)
results = get_result(sql)
return results
def class_type_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT flight_fare.flight_id FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code WHERE fare_basis.class_type = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def fare_basis_code_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight_id FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id WHERE fare.fare_basis_code = '%s'" % (entity_value_1)
results = get_result(sql)
return results
def fare_basis_code(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT fare.fare_basis_code FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id WHERE flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def has_meal(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT flight_id FROM flight WHERE meal_code is not NULL AND flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def has_stops(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = 'SELECT T1.flight_id FROM flight AS T1 JOIN flight_stop AS T2 ON T1.flight_id = T2.flight_id WHERE T1.flight_id IN %s' % (
flight_id)
results = get_result(sql)
return results
def less_than_fare_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id WHERE fare.one_direction_cost < %s" % (entity_value_1)
results = get_result(sql)
return results
def fare_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id WHERE fare.one_direction_cost = '%s'" % (
entity_value_1)
results = get_result(sql)
return results
def fare(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT flight.flight_id, fare.one_direction_cost FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id WHERE flight.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def fare_time(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT flight.flight_id, fare.one_direction_cost, flight.departure_time FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id WHERE flight.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def ground_fare(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
transport_types = "(%s)" % ','.join(['"%s"' % e['transport_type'] for e in entities])
sql = "SELECT ground_fare FROM ground_service WHERE transport_type IN %s" % (
transport_types)
results = get_result(sql)
return results
def aircraft_1(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT equipment_sequence.aircraft_code FROM flight JOIN equipment_sequence ON flight.aircraft_code_sequence = equipment_sequence.aircraft_code_sequence WHERE flight.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def flight_aircraft(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight.flight_id, equipment_sequence.aircraft_code FROM flight JOIN equipment_sequence ON flight.aircraft_code_sequence = equipment_sequence.aircraft_code_sequence WHERE flight.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def flight_airline(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
results = list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight.flight_id, airline_code FROM flight WHERE flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def departure_time_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.departure_time = %s" % (
entity_value_1)
results = get_result(sql)
return results
def departure_time(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight_1.departure_time FROM flight flight_1 WHERE flight_1.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def arrival_time(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight_1.arrival_time FROM flight flight_1 WHERE flight_1.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def arrival_time_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.arrival_time = %s" % (
entity_value_1)
results = get_result(sql)
return results
def approx_return_time_2(entity):
return approx_arrival_time_2(entity)
def approx_arrival_time_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
processed_arrival_time = entity_value_1
if len(processed_arrival_time) == 4:
if processed_arrival_time[2:] == '00':
start_time = "%d%d" % (int(processed_arrival_time[:2]) - 1, 30)
end_time = "%d%d" % (int(processed_arrival_time[:2]), 30)
elif processed_arrival_time[2:] == '15':
start_time = "%d%d" % (int(processed_arrival_time[:2]) - 1, 45)
end_time = "%d%d" % (int(processed_arrival_time[:2]), 45)
elif processed_arrival_time[2:] == '30':
start_time = "%d00" % (int(processed_arrival_time[:2]))
end_time = "%d00" % (int(processed_arrival_time[:2]) + 1)
else:
assert processed_arrival_time[2:] == '45'
start_time = "%d%d" % (int(processed_arrival_time[:2]), 15)
end_time = "%d%d" % (int(processed_arrival_time[:2]) + 1, 15)
else:
if processed_arrival_time[1:] == '00':
start_time = "%d%d" % (int(processed_arrival_time[:1]) - 1, 30)
end_time = "%d%d" % (int(processed_arrival_time[:1]), 30)
elif processed_arrival_time[1:] == '15':
start_time = "%d%d" % (int(processed_arrival_time[:1]) - 1, 45)
end_time = "%d%d" % (int(processed_arrival_time[:1]), 45)
elif processed_arrival_time[1:] == '30':
start_time = "%d00" % (int(processed_arrival_time[:1]))
end_time = "%d00" % (int(processed_arrival_time[:1]) + 1)
else:
assert processed_arrival_time[1:] == '45'
start_time = "%d%d" % (int(processed_arrival_time[:1]), 15)
end_time = "%d%d" % (int(processed_arrival_time[:1]) + 1, 15)
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.arrival_time >= %s AND flight_1.arrival_time <= %s" % (
start_time, end_time)
results = get_result(sql)
return results
def airline_name(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
results = list()
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(
['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT airline_name FROM flight JOIN airline ON flight.airline_code = airline.airline_code WHERE flight.flight_id IN %s" % (
flight_id)
results = get_result(sql)
return results
def flight_fare(argument):
return fare(argument)
def restriction_code(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(
['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT restriction.restriction_code FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN restriction ON fare.restriction_code = restriction.restriction_code WHERE flight_fare.flight_id IN %s" % (
flight_id)
return get_result(sql)
def approx_departure_time_2(entity):
"""
_approx_departure_time()
"""
entity_type_1, entity_value_1 = get_entity_value(entity)
processed_departure_time = entity_value_1
if len(processed_departure_time) == 4:
if processed_departure_time[2:] == '00':
start_time = "%d%d" % (int(processed_departure_time[:2]) - 1, 30)
end_time = "%d%d" % (int(processed_departure_time[:2]), 30)
elif processed_departure_time[2:] == '15':
start_time = "%d%d" % (int(processed_departure_time[:2]) - 1, 45)
end_time = "%d%d" % (int(processed_departure_time[:2]), 45)
elif processed_departure_time[2:] == '30':
start_time = "%d00" % (int(processed_departure_time[:2]))
end_time = "%d00" % (int(processed_departure_time[:2]) + 1)
print(start_time, end_time)
else:
assert processed_departure_time[2:] == '45'
start_time = "%d%d" % (int(processed_departure_time[:2]), 15)
end_time = "%d%d" % (int(processed_departure_time[:2]) + 1, 15)
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.departure_time >= %s AND flight_1.departure_time <= %s" % (
start_time, end_time)
elif len(processed_departure_time) == 3:
if processed_departure_time[1:] == '00':
start_time = "%d%d" % (int(processed_departure_time[:1]) - 1, 30)
end_time = "%d%d" % (int(processed_departure_time[:1]), 30)
elif processed_departure_time[1:] == '15':
start_time = "%d%d" % (int(processed_departure_time[:1]) - 1, 45)
end_time = "%d%d" % (int(processed_departure_time[:1]), 45)
elif processed_departure_time[1:] == '30':
start_time = "%d00" % (int(processed_departure_time[:1]))
end_time = "%d00" % (int(processed_departure_time[:1]) + 1)
else:
assert processed_departure_time[1:] == '45'
start_time = "%d%d" % (int(processed_departure_time[:1]), 15)
end_time = "%d%d" % (int(processed_departure_time[:1]) + 1, 15)
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.departure_time >= %s AND flight_1.departure_time <= %s" % (
start_time, end_time)
elif processed_departure_time == "0":
start_time = "2330"
end_time = "30"
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE (flight_1.departure_time >= %s OR flight_1.departure_time <= %s)" % (
start_time, end_time)
results = get_result(sql)
return results
def larger_than_stops_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight_id FROM flight WHERE stops > %s" % (
entity_value_1)
results = get_result(sql)
return results
def larger_than_arrival_time_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight WHERE arrival_time > %s" % entity_value_1
results = get_result(sql)
return results
def less_than_arrival_time_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight WHERE arrival_time < %s" % entity_value_1
results = get_result(sql)
return results
def larger_than_departure_time_2(entity):
if isinstance(entity, dict):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight WHERE departure_time > %s" % entity_value_1
results = get_result(sql)
else:
assert isinstance(entity, list)
flight_ids = "(%s)" % ','.join(
['"%s"' % e['flight_id'] for e in entity])
sql = "SELECT DISTINCT flight.flight_id FROM flight WHERE departure_time > (SELECT MAX(T.departure_time) FROM flight AS T WHERE T.flight_id IN %s)" % flight_ids
results = get_result(sql)
return results
def less_than_departure_time_2(entity):
entity_type_1, entity_value_1 = get_entity_value(entity)
sql = "SELECT DISTINCT flight.flight_id FROM flight WHERE departure_time < %s" % entity_value_1
results = get_result(sql)
return results
def larger_than_capacity_2(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
# flight id
valid_key = ''
if 'aircraft_code' in entities[0]:
valid_key = 'aircraft_code'
else:
raise Exception("Invalid key in larger_than_capacity_2")
values = "(%s)" % ','.join(['"%s"' % e[valid_key] for e in entities])
sql = "SELECT DISTINCT aircraft_1.aircraft_code FROM aircraft aircraft_1 WHERE aircraft_1.capacity > (SELECT MAX(T.capacity) FROM aircraft AS T WHERE T.aircraft_code IN %s)" % values
results = get_result(sql)
return results
def intersection(*args):
keys = {}
all_entities = list()
for arg in args:
if len(arg) == 0:
return list()
if isinstance(arg, dict):
if len(keys) == 0:
keys = set(arg.keys())
else:
keys &= set(arg.keys())
else:
assert isinstance(arg, list) and isinstance(arg[0], dict)
all_entities += arg
if len(keys) == 0:
keys = set(arg[0].keys())
else:
keys &= set(arg[0].keys())
if len(keys) == 0:
return list()
valid_key = list(keys)[0]
results = set()
for aidx, arg in enumerate(args):
tmp = set()
if isinstance(arg, list):
for a in arg:
tmp.add(a[valid_key])
else:
tmp.add(arg[valid_key])
if aidx == 0:
results = tmp
else:
results &= tmp
return_results = list()
for r in results:
info = {valid_key: r}
if valid_key == 'transport_type':
for e in all_entities:
if valid_key in e and e[valid_key] == r:
info.update(e)
return_results.append(info)
return return_results
def not_(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
# flight id
valid_key = ''
if 'flight_id' in entities[0]:
valid_key = 'flight_id'
elif 'airline_code' in entities[0]:
valid_key = 'airline_code'
elif 'aircraft_code' in entities[0]:
valid_key = 'aircraft_code'
elif 'city_name' in entities[0]:
valid_key = 'city_name'
else:
raise Exception("Invalid key in Not")
values = "(%s)" % ','.join(['"%s"' % e[valid_key] for e in entities])
if valid_key == 'flight_id':
sql = 'SELECT flight_id FROM flight WHERE flight_id NOT IN %s' % values
elif valid_key == 'airline_code':
sql = "SELECT distinct airline_code FROM airline WHERE airline_code NOT IN %s" % values
elif valid_key == 'aircraft_code':
sql = "SELECT distinct aircraft_code FROM aircraft WHERE aircraft_code NOT IN %s" % values
elif valid_key == 'city_name':
sql = "SELECT distinct city_name FROM city WHERE city_name NOT IN %s" % values
results = get_result(sql)
return results
def or_(*args):
keys = {}
for arg in args:
if len(arg) == 0:
return list()
if isinstance(arg, dict):
if len(keys) == 0:
keys = set(arg.keys())
else:
keys &= set(arg.keys())
else:
assert isinstance(arg, list) and isinstance(arg[0], dict)
if len(keys) == 0:
keys = set(arg[0].keys())
else:
keys &= set(arg[0].keys())
if len(keys) == 0:
return list()
valid_key = list(keys)[0]
results = set()
for aidx, arg in enumerate(args):
tmp = set()
if isinstance(arg, list):
for a in arg:
tmp.add(a[valid_key])
else:
tmp.add(arg[valid_key])
if aidx == 0:
results = tmp
else:
results |= tmp
return_results = list()
for r in results:
return_results.append({valid_key: r})
return return_results
def count_(argument):
if isinstance(argument, list):
return len(argument)
if argument is not None:
return 1
return 0
def max_(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
valid_key = None
keys = set()
for e in entities:
keys |= set(e.keys())
keys = keys & {'one_direction_cost', 'arrival_time', 'departure_time'}
if len(keys) > 0:
valid_key = list(keys)[0]
print(valid_key)
max_value = max([e[valid_key] for e in entities])
return max_value
else:
return 0.0
def min_(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
valid_key = None
keys = set()
for e in entities:
keys |= set(e.keys())
keys = keys & {'one_direction_cost', 'arrival_time', 'departure_time'}
if len(keys) > 0:
valid_key = list(keys)[0]
print(valid_key)
min_value = min([e[valid_key] for e in entities])
return min_value
else:
return 0.0
def argmin_departure_time(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "select flight.flight_id, flight.departure_time FROM flight WHERE flight.flight_id IN %s" % flight_id
results = get_result(sql)
min_time = min([r['departure_time'] for r in results])
return_results = [r for r in results if r['departure_time'] == min_time]
return return_results
def argmax_arrival_time(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "select flight.flight_id, flight.arrival_time FROM flight WHERE flight.flight_id IN %s" % flight_id
results = get_result(sql)
max_time = max([r['arrival_time'] for r in results])
return_results = [r for r in results if r['arrival_time'] == max_time]
return return_results
def argmax_departure_time(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "select flight.flight_id, flight.departure_time FROM flight WHERE flight.flight_id IN %s" % flight_id
results = get_result(sql)
max_time = max([r['departure_time'] for r in results])
return_results = [r for r in results if r['departure_time'] == max_time]
return return_results
def argmin_arrival_time(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "select flight.flight_id, flight.arrival_time FROM flight WHERE flight.flight_id IN %s" % flight_id
results = get_result(sql)
min_time = min([r['arrival_time'] for r in results])
return_results = [r for r in results if r['arrival_time'] == min_time]
return return_results
def argmin_fare(argument):
results = fare(argument)
if len(results) == 0:
return list()
min_fare = min([r['one_direction_cost'] for r in results])
return_results = [
r for r in results if r['one_direction_cost'] == min_fare]
return return_results
def argmax_fare(argument):
results = fare(argument)
max_fare = max([r['one_direction_cost'] for r in results])
return_results = [
r for r in results if r['one_direction_cost'] == max_fare]
return return_results
def argmax_capacity(argument):
results = capacity(argument)
max_capacity = max([r['capacity'] for r in results])
return_results = [
r for r in results if r['capacity'] == max_capacity]
return return_results
def argmin_capacity(argument):
results = capacity(argument)
min_capacity = min([r['capacity'] for r in results])
return_results = [
r for r in results if r['capacity'] == min_capacity]
return return_results
def sum_capacity(argument):
results = capacity(argument)
total_capacity = sum([r['capacity'] for r in results])
return total_capacity
def sum_stops(argument):
results = stops(argument)
total_stops = sum([r['stops'] for r in results])
return total_stops
def argmax_stops(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight_id, stops FROM flight WHERE flight_id IN %s" % (flight_id)
results = get_result(sql)
max_stops = max([r['stops'] for r in results])
return_results = [
r for r in results if r['stops'] == max_stops]
return return_results
def argmin_stops(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
flight_id = "(%s)" % ','.join(['"%s"' % e['flight_id'] for e in entities])
sql = "SELECT DISTINCT flight_id, stops FROM flight WHERE flight_id IN %s" % (
flight_id)
results = get_result(sql)
min_stops = min([r['stops'] for r in results])
return_results = [
r for r in results if r['stops'] == min_stops]
return return_results
def argmin_miles_distant_2(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
assert 'airport_code' in entities[0]
key = 'airport_code'
values = "(%s)" % ','.join(
['"%s"' % e[key] for e in entities])
sql = "SELECT airport_service.miles_distant, airport_service.airport_code FROM airport_service JOIN city ON city.city_code = airport_service.city_code WHERE airport_service.airport_code IN %s AND airport_service.miles_distant > 0 ORDER BY airport_service.miles_distant ASC LIMIT 1" % values
print(sql)
results = get_result(sql)
return results
def argmin_time_elapsed(argument):
results = time_elapsed(argument)
min_time = min([r['time_elapsed'] for r in results])
return_results = [
r for r in results if r['time_elapsed'] == min_time]
return return_results
def argmax_count(argument):
if isinstance(argument, dict):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
raise Exception("Not Supported Argument Type", argument)
if len(entities) == 0:
return list()
assert 'airline_code' in entities[0]
key = 'airline_code'
values = "(%s)" % ','.join(
['"%s"' % e[key] for e in entities])
sql = "SELECT airline_code FROM flight WHERE airline_code IN %s GROUP BY airline_code order by count(DISTINCT flight_id) LIMIT 1" % values
results = get_result(sql)
return results
def equals(arguemnt_1, arguemnt_2):
if isinstance(arguemnt_1, list):
entities_1 = arguemnt_1
elif isinstance(arguemnt_1, dict):
entities_1 = [arguemnt_1]
if isinstance(arguemnt_2, list):
entities_2 = arguemnt_2
elif isinstance(arguemnt_2, dict):
entities_2 = [arguemnt_2]
for e1 in entities_1:
is_found = False
for e2 in entities_2:
is_match = True
for k, v in e1.items():
if k not in e2 or e2[k].lower() != v.lower():
is_match = False
if is_match:
is_found = True
break
if not is_found:
return False
return True
def named_1(values):
return values
if __name__ == '__main__':
values = answer(argmin_capacity(aircraft(intersection(not_(turboprop(
aircraft_all())), larger_than_capacity_2(turboprop(aircraft_all()))))))
print(values)
data = list()
with open('../../../data/atis/atis_funql_train.tsv', 'r') as f:
for line in f:
line = line.strip()
data.append(line.split('\t'))
for idx, (question, funql) in enumerate(data):
print(idx)
print(question)
print(funql)
expression = transform(funql)
print(expression)
results = eval(expression)
print(results)
print('====\n\n')
| 80,393 | 34.651441 | 399 |
py
|
Unimer
|
Unimer-master/executions/atis/sql/evaluator.py
|
# coding=utf8
from multiprocessing import Process, Manager
import re
import mysql.connector
from pprint import pprint
class TimeoutException(Exception):
pass
def normalize(sql):
s = re.sub(' +', ' ', sql)
s = s.replace('MAX (', 'MAX(')
s = s.replace('MIN (', 'MIN(')
s = s.replace('AVG (', 'AVG(')
s = s.replace('COUNT (', 'COUNT(')
s = s.replace('count (', 'count(')
s = s.replace('SUM (', 'SUM(')
s = s.replace('< =', '<=')
s = s.replace('> =', '>=')
return s
def format_headers(header):
s = header.replace("( ", "(").replace(" )", ")").strip().lower()
return s
def get_result(sql, db):
_sql = normalize(sql)
cursor = db.cursor()
cursor.execute(_sql)
# print(cursor.description)
headers = cursor.description
results = cursor.fetchall()
formatted_results = list()
for x in results:
r = dict()
for value, header in zip(x, headers):
r[format_headers(header[0])] = value
formatted_results.append(r)
# pprint(formatted_results)
return formatted_results
def get_result_process_func(sql, return_dict):
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="123456",
database="atis",
auth_plugin='mysql_native_password'
)
try:
results = get_result(sql, db)
except Exception as e:
print(e)
return_dict['is_valid'] = False
else:
return_dict['is_valid'] = True
return_dict['results'] = results
def get_result_with_time_limit(sql, time):
manager = Manager()
return_dict = manager.dict()
p = Process(target=get_result_process_func, args=(sql, return_dict))
p.start()
p.join(time)
if p.is_alive():
p.terminate()
raise TimeoutException("Timeout")
is_valid = return_dict['is_valid']
if is_valid:
return return_dict['results']
else:
raise Exception("SQL Execution Error")
def compare_sqls(sql_1, sql_2, timeout=300):
try:
sql_1_results = get_result_with_time_limit(sql_1, 300)
sql_2_results = get_result_with_time_limit(sql_2, 300)
except Exception as e:
return False
if len(sql_1_results) != len(sql_2_results):
return False
for sql_1_row in sql_1_results:
for sql_2_row in sql_2_results:
is_same = True
for key, value in sql_1_row.items():
if key not in sql_2_row or sql_2_row[key] != value:
is_same = False
if is_same:
sql_2_results.remove(sql_2_row)
break
else:
return False
return True
if __name__ == '__main__':
sql_1 = "select distinct flight_1.flight_id from flight flight_1 where (flight_1.airline_code = 'aa' and (flight_1.from_airport in (select airport_service_1.airport_code from airport_service airport_service_1 where airport_service_1.city_code in (select city_1.city_code from city city_1 where city_1.city_name = 'miami')) and flight_1.to_airport in (select airport_service_1.airport_code from airport_service airport_service_1 where airport_service_1.city_code in (select city_1.city_code from city city_1 where city_1.city_name = 'chicago'))));"
# print(compare_sqls(sql_1, sql_2))
formatted_results = get_result_with_time_limit(sql_1, 60)
pprint(formatted_results)
| 3,401 | 29.648649 | 551 |
py
|
Unimer
|
Unimer-master/executions/atis/sql/__init__.py
| 0 | 0 | 0 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.