repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
astropy/photutils | photutils/psf/epsf.py | _py2intround | def _py2intround(a):
"""
Round the input to the nearest integer.
If two integers are equally close, rounding is done away from 0.
"""
data = np.asanyarray(a)
value = np.where(data >= 0, np.floor(data + 0.5),
np.ceil(data - 0.5)).astype(int)
if not hasattr(a, '__iter__'):
value = np.asscalar(value)
return value | python | def _py2intround(a):
"""
Round the input to the nearest integer.
If two integers are equally close, rounding is done away from 0.
"""
data = np.asanyarray(a)
value = np.where(data >= 0, np.floor(data + 0.5),
np.ceil(data - 0.5)).astype(int)
if not hasattr(a, '__iter__'):
value = np.asscalar(value)
return value | [
"def",
"_py2intround",
"(",
"a",
")",
":",
"data",
"=",
"np",
".",
"asanyarray",
"(",
"a",
")",
"value",
"=",
"np",
".",
"where",
"(",
"data",
">=",
"0",
",",
"np",
".",
"floor",
"(",
"data",
"+",
"0.5",
")",
",",
"np",
".",
"ceil",
"(",
"data",
"-",
"0.5",
")",
")",
".",
"astype",
"(",
"int",
")",
"if",
"not",
"hasattr",
"(",
"a",
",",
"'__iter__'",
")",
":",
"value",
"=",
"np",
".",
"asscalar",
"(",
"value",
")",
"return",
"value"
] | Round the input to the nearest integer.
If two integers are equally close, rounding is done away from 0. | [
"Round",
"the",
"input",
"to",
"the",
"nearest",
"integer",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L845-L859 | train |
astropy/photutils | photutils/psf/epsf.py | _interpolate_missing_data | def _interpolate_missing_data(data, mask, method='cubic'):
"""
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
"""
from scipy import interpolate
data_interp = np.array(data, copy=True)
if len(data_interp.shape) != 2:
raise ValueError('data must be a 2D array.')
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
y, x = np.indices(data_interp.shape)
xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0]
z = data_interp[~mask].ravel()
if method == 'nearest':
interpol = interpolate.NearestNDInterpolator(xy, z)
elif method == 'cubic':
interpol = interpolate.CloughTocher2DInterpolator(xy, z)
else:
raise ValueError('Unsupported interpolation method.')
xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0]
data_interp[mask] = interpol(xy_missing)
return data_interp | python | def _interpolate_missing_data(data, mask, method='cubic'):
"""
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
"""
from scipy import interpolate
data_interp = np.array(data, copy=True)
if len(data_interp.shape) != 2:
raise ValueError('data must be a 2D array.')
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
y, x = np.indices(data_interp.shape)
xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0]
z = data_interp[~mask].ravel()
if method == 'nearest':
interpol = interpolate.NearestNDInterpolator(xy, z)
elif method == 'cubic':
interpol = interpolate.CloughTocher2DInterpolator(xy, z)
else:
raise ValueError('Unsupported interpolation method.')
xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0]
data_interp[mask] = interpol(xy_missing)
return data_interp | [
"def",
"_interpolate_missing_data",
"(",
"data",
",",
"mask",
",",
"method",
"=",
"'cubic'",
")",
":",
"from",
"scipy",
"import",
"interpolate",
"data_interp",
"=",
"np",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"True",
")",
"if",
"len",
"(",
"data_interp",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'data must be a 2D array.'",
")",
"if",
"mask",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'mask and data must have the same shape.'",
")",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"data_interp",
".",
"shape",
")",
"xy",
"=",
"np",
".",
"dstack",
"(",
"(",
"x",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
",",
"y",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
")",
")",
"[",
"0",
"]",
"z",
"=",
"data_interp",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
"if",
"method",
"==",
"'nearest'",
":",
"interpol",
"=",
"interpolate",
".",
"NearestNDInterpolator",
"(",
"xy",
",",
"z",
")",
"elif",
"method",
"==",
"'cubic'",
":",
"interpol",
"=",
"interpolate",
".",
"CloughTocher2DInterpolator",
"(",
"xy",
",",
"z",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported interpolation method.'",
")",
"xy_missing",
"=",
"np",
".",
"dstack",
"(",
"(",
"x",
"[",
"mask",
"]",
".",
"ravel",
"(",
")",
",",
"y",
"[",
"mask",
"]",
".",
"ravel",
"(",
")",
")",
")",
"[",
"0",
"]",
"data_interp",
"[",
"mask",
"]",
"=",
"interpol",
"(",
"xy_missing",
")",
"return",
"data_interp"
] | Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image. | [
"Interpolate",
"missing",
"data",
"as",
"identified",
"by",
"the",
"mask",
"keyword",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L862-L916 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFFitter._fit_star | def _fit_star(self, epsf, star, fitter, fitter_kwargs,
fitter_has_fit_info, fit_boxsize):
"""
Fit an ePSF model to a single star.
The input ``epsf`` will usually be modified by the fitting
routine in this function. Make a copy before calling this
function if the original is needed.
"""
if fit_boxsize is not None:
try:
xcenter, ycenter = star.cutout_center
large_slc, small_slc = overlap_slices(star.shape,
fit_boxsize,
(ycenter, xcenter),
mode='strict')
except (PartialOverlapError, NoOverlapError):
warnings.warn('The star at ({0}, {1}) cannot be fit because '
'its fitting region extends beyond the star '
'cutout image.'.format(star.center[0],
star.center[1]),
AstropyUserWarning)
star = copy.deepcopy(star)
star._fit_error_status = 1
return star
data = star.data[large_slc]
weights = star.weights[large_slc]
# define the origin of the fitting region
x0 = large_slc[1].start
y0 = large_slc[0].start
else:
# use the entire cutout image
data = star.data
weights = star.weights
# define the origin of the fitting region
x0 = 0
y0 = 0
scaled_data = data / np.prod(epsf._oversampling)
# define positions in the ePSF oversampled grid
yy, xx = np.indices(data.shape, dtype=np.float)
xx = (xx - (star.cutout_center[0] - x0)) * epsf._oversampling[0]
yy = (yy - (star.cutout_center[1] - y0)) * epsf._oversampling[1]
# define the initial guesses for fitted flux and shifts
epsf.flux = star.flux
epsf.x_0 = 0.0
epsf.y_0 = 0.0
# create copy to avoid overwriting original oversampling factor
_epsf = epsf.copy()
_epsf._oversampling = np.array([1., 1.])
try:
fitted_epsf = fitter(model=_epsf, x=xx, y=yy, z=scaled_data,
weights=weights, **fitter_kwargs)
except TypeError:
# fitter doesn't support weights
fitted_epsf = fitter(model=_epsf, x=xx, y=yy, z=scaled_data,
**fitter_kwargs)
fit_error_status = 0
if fitter_has_fit_info:
fit_info = copy.copy(fitter.fit_info)
if 'ierr' in fit_info and fit_info['ierr'] not in [1, 2, 3, 4]:
fit_error_status = 2 # fit solution was not found
else:
fit_info = None
# compute the star's fitted position
x_center = (star.cutout_center[0] +
(fitted_epsf.x_0.value / epsf._oversampling[0]))
y_center = (star.cutout_center[1] +
(fitted_epsf.y_0.value / epsf._oversampling[1]))
star = copy.deepcopy(star)
star.cutout_center = (x_center, y_center)
# set the star's flux to the ePSF-fitted flux
star.flux = fitted_epsf.flux.value
star._fit_info = fit_info
star._fit_error_status = fit_error_status
return star | python | def _fit_star(self, epsf, star, fitter, fitter_kwargs,
fitter_has_fit_info, fit_boxsize):
"""
Fit an ePSF model to a single star.
The input ``epsf`` will usually be modified by the fitting
routine in this function. Make a copy before calling this
function if the original is needed.
"""
if fit_boxsize is not None:
try:
xcenter, ycenter = star.cutout_center
large_slc, small_slc = overlap_slices(star.shape,
fit_boxsize,
(ycenter, xcenter),
mode='strict')
except (PartialOverlapError, NoOverlapError):
warnings.warn('The star at ({0}, {1}) cannot be fit because '
'its fitting region extends beyond the star '
'cutout image.'.format(star.center[0],
star.center[1]),
AstropyUserWarning)
star = copy.deepcopy(star)
star._fit_error_status = 1
return star
data = star.data[large_slc]
weights = star.weights[large_slc]
# define the origin of the fitting region
x0 = large_slc[1].start
y0 = large_slc[0].start
else:
# use the entire cutout image
data = star.data
weights = star.weights
# define the origin of the fitting region
x0 = 0
y0 = 0
scaled_data = data / np.prod(epsf._oversampling)
# define positions in the ePSF oversampled grid
yy, xx = np.indices(data.shape, dtype=np.float)
xx = (xx - (star.cutout_center[0] - x0)) * epsf._oversampling[0]
yy = (yy - (star.cutout_center[1] - y0)) * epsf._oversampling[1]
# define the initial guesses for fitted flux and shifts
epsf.flux = star.flux
epsf.x_0 = 0.0
epsf.y_0 = 0.0
# create copy to avoid overwriting original oversampling factor
_epsf = epsf.copy()
_epsf._oversampling = np.array([1., 1.])
try:
fitted_epsf = fitter(model=_epsf, x=xx, y=yy, z=scaled_data,
weights=weights, **fitter_kwargs)
except TypeError:
# fitter doesn't support weights
fitted_epsf = fitter(model=_epsf, x=xx, y=yy, z=scaled_data,
**fitter_kwargs)
fit_error_status = 0
if fitter_has_fit_info:
fit_info = copy.copy(fitter.fit_info)
if 'ierr' in fit_info and fit_info['ierr'] not in [1, 2, 3, 4]:
fit_error_status = 2 # fit solution was not found
else:
fit_info = None
# compute the star's fitted position
x_center = (star.cutout_center[0] +
(fitted_epsf.x_0.value / epsf._oversampling[0]))
y_center = (star.cutout_center[1] +
(fitted_epsf.y_0.value / epsf._oversampling[1]))
star = copy.deepcopy(star)
star.cutout_center = (x_center, y_center)
# set the star's flux to the ePSF-fitted flux
star.flux = fitted_epsf.flux.value
star._fit_info = fit_info
star._fit_error_status = fit_error_status
return star | [
"def",
"_fit_star",
"(",
"self",
",",
"epsf",
",",
"star",
",",
"fitter",
",",
"fitter_kwargs",
",",
"fitter_has_fit_info",
",",
"fit_boxsize",
")",
":",
"if",
"fit_boxsize",
"is",
"not",
"None",
":",
"try",
":",
"xcenter",
",",
"ycenter",
"=",
"star",
".",
"cutout_center",
"large_slc",
",",
"small_slc",
"=",
"overlap_slices",
"(",
"star",
".",
"shape",
",",
"fit_boxsize",
",",
"(",
"ycenter",
",",
"xcenter",
")",
",",
"mode",
"=",
"'strict'",
")",
"except",
"(",
"PartialOverlapError",
",",
"NoOverlapError",
")",
":",
"warnings",
".",
"warn",
"(",
"'The star at ({0}, {1}) cannot be fit because '",
"'its fitting region extends beyond the star '",
"'cutout image.'",
".",
"format",
"(",
"star",
".",
"center",
"[",
"0",
"]",
",",
"star",
".",
"center",
"[",
"1",
"]",
")",
",",
"AstropyUserWarning",
")",
"star",
"=",
"copy",
".",
"deepcopy",
"(",
"star",
")",
"star",
".",
"_fit_error_status",
"=",
"1",
"return",
"star",
"data",
"=",
"star",
".",
"data",
"[",
"large_slc",
"]",
"weights",
"=",
"star",
".",
"weights",
"[",
"large_slc",
"]",
"# define the origin of the fitting region",
"x0",
"=",
"large_slc",
"[",
"1",
"]",
".",
"start",
"y0",
"=",
"large_slc",
"[",
"0",
"]",
".",
"start",
"else",
":",
"# use the entire cutout image",
"data",
"=",
"star",
".",
"data",
"weights",
"=",
"star",
".",
"weights",
"# define the origin of the fitting region",
"x0",
"=",
"0",
"y0",
"=",
"0",
"scaled_data",
"=",
"data",
"/",
"np",
".",
"prod",
"(",
"epsf",
".",
"_oversampling",
")",
"# define positions in the ePSF oversampled grid",
"yy",
",",
"xx",
"=",
"np",
".",
"indices",
"(",
"data",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"xx",
"=",
"(",
"xx",
"-",
"(",
"star",
".",
"cutout_center",
"[",
"0",
"]",
"-",
"x0",
")",
")",
"*",
"epsf",
".",
"_oversampling",
"[",
"0",
"]",
"yy",
"=",
"(",
"yy",
"-",
"(",
"star",
".",
"cutout_center",
"[",
"1",
"]",
"-",
"y0",
")",
")",
"*",
"epsf",
".",
"_oversampling",
"[",
"1",
"]",
"# define the initial guesses for fitted flux and shifts",
"epsf",
".",
"flux",
"=",
"star",
".",
"flux",
"epsf",
".",
"x_0",
"=",
"0.0",
"epsf",
".",
"y_0",
"=",
"0.0",
"# create copy to avoid overwriting original oversampling factor",
"_epsf",
"=",
"epsf",
".",
"copy",
"(",
")",
"_epsf",
".",
"_oversampling",
"=",
"np",
".",
"array",
"(",
"[",
"1.",
",",
"1.",
"]",
")",
"try",
":",
"fitted_epsf",
"=",
"fitter",
"(",
"model",
"=",
"_epsf",
",",
"x",
"=",
"xx",
",",
"y",
"=",
"yy",
",",
"z",
"=",
"scaled_data",
",",
"weights",
"=",
"weights",
",",
"*",
"*",
"fitter_kwargs",
")",
"except",
"TypeError",
":",
"# fitter doesn't support weights",
"fitted_epsf",
"=",
"fitter",
"(",
"model",
"=",
"_epsf",
",",
"x",
"=",
"xx",
",",
"y",
"=",
"yy",
",",
"z",
"=",
"scaled_data",
",",
"*",
"*",
"fitter_kwargs",
")",
"fit_error_status",
"=",
"0",
"if",
"fitter_has_fit_info",
":",
"fit_info",
"=",
"copy",
".",
"copy",
"(",
"fitter",
".",
"fit_info",
")",
"if",
"'ierr'",
"in",
"fit_info",
"and",
"fit_info",
"[",
"'ierr'",
"]",
"not",
"in",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
"]",
":",
"fit_error_status",
"=",
"2",
"# fit solution was not found",
"else",
":",
"fit_info",
"=",
"None",
"# compute the star's fitted position",
"x_center",
"=",
"(",
"star",
".",
"cutout_center",
"[",
"0",
"]",
"+",
"(",
"fitted_epsf",
".",
"x_0",
".",
"value",
"/",
"epsf",
".",
"_oversampling",
"[",
"0",
"]",
")",
")",
"y_center",
"=",
"(",
"star",
".",
"cutout_center",
"[",
"1",
"]",
"+",
"(",
"fitted_epsf",
".",
"y_0",
".",
"value",
"/",
"epsf",
".",
"_oversampling",
"[",
"1",
"]",
")",
")",
"star",
"=",
"copy",
".",
"deepcopy",
"(",
"star",
")",
"star",
".",
"cutout_center",
"=",
"(",
"x_center",
",",
"y_center",
")",
"# set the star's flux to the ePSF-fitted flux",
"star",
".",
"flux",
"=",
"fitted_epsf",
".",
"flux",
".",
"value",
"star",
".",
"_fit_info",
"=",
"fit_info",
"star",
".",
"_fit_error_status",
"=",
"fit_error_status",
"return",
"star"
] | Fit an ePSF model to a single star.
The input ``epsf`` will usually be modified by the fitting
routine in this function. Make a copy before calling this
function if the original is needed. | [
"Fit",
"an",
"ePSF",
"model",
"to",
"a",
"single",
"star",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L146-L238 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFBuilder._init_img_params | def _init_img_params(param):
"""
Initialize 2D image-type parameters that can accept either a
single or two values.
"""
if param is not None:
param = np.atleast_1d(param)
if len(param) == 1:
param = np.repeat(param, 2)
return param | python | def _init_img_params(param):
"""
Initialize 2D image-type parameters that can accept either a
single or two values.
"""
if param is not None:
param = np.atleast_1d(param)
if len(param) == 1:
param = np.repeat(param, 2)
return param | [
"def",
"_init_img_params",
"(",
"param",
")",
":",
"if",
"param",
"is",
"not",
"None",
":",
"param",
"=",
"np",
".",
"atleast_1d",
"(",
"param",
")",
"if",
"len",
"(",
"param",
")",
"==",
"1",
":",
"param",
"=",
"np",
".",
"repeat",
"(",
"param",
",",
"2",
")",
"return",
"param"
] | Initialize 2D image-type parameters that can accept either a
single or two values. | [
"Initialize",
"2D",
"image",
"-",
"type",
"parameters",
"that",
"can",
"accept",
"either",
"a",
"single",
"or",
"two",
"values",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L369-L380 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFBuilder._create_initial_epsf | def _create_initial_epsf(self, stars):
"""
Create an initial `EPSFModel` object.
The initial ePSF data are all zeros.
If ``shape`` is not specified, the shape of the ePSF data array
is determined from the shape of the input ``stars`` and the
oversampling factor. If the size is even along any axis, it
will be made odd by adding one. The output ePSF will always
have odd sizes along both axes to ensure a central pixel.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
Returns
-------
epsf : `EPSFModel`
The initial ePSF model.
"""
oversampling = self.oversampling
shape = self.shape
# define the ePSF shape
if shape is not None:
shape = np.atleast_1d(shape).astype(int)
if len(shape) == 1:
shape = np.repeat(shape, 2)
else:
x_shape = np.int(np.ceil(stars._max_shape[1] *
oversampling[0]))
y_shape = np.int(np.ceil(stars._max_shape[0] *
oversampling[1]))
shape = np.array((y_shape, x_shape))
# ensure odd sizes
shape = [(i + 1) if i % 2 == 0 else i for i in shape]
data = np.zeros(shape, dtype=np.float)
xcenter = (shape[1] - 1) / 2.
ycenter = (shape[0] - 1) / 2.
epsf = EPSFModel(data=data, origin=(xcenter, ycenter),
normalize=False, oversampling=oversampling)
return epsf | python | def _create_initial_epsf(self, stars):
"""
Create an initial `EPSFModel` object.
The initial ePSF data are all zeros.
If ``shape`` is not specified, the shape of the ePSF data array
is determined from the shape of the input ``stars`` and the
oversampling factor. If the size is even along any axis, it
will be made odd by adding one. The output ePSF will always
have odd sizes along both axes to ensure a central pixel.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
Returns
-------
epsf : `EPSFModel`
The initial ePSF model.
"""
oversampling = self.oversampling
shape = self.shape
# define the ePSF shape
if shape is not None:
shape = np.atleast_1d(shape).astype(int)
if len(shape) == 1:
shape = np.repeat(shape, 2)
else:
x_shape = np.int(np.ceil(stars._max_shape[1] *
oversampling[0]))
y_shape = np.int(np.ceil(stars._max_shape[0] *
oversampling[1]))
shape = np.array((y_shape, x_shape))
# ensure odd sizes
shape = [(i + 1) if i % 2 == 0 else i for i in shape]
data = np.zeros(shape, dtype=np.float)
xcenter = (shape[1] - 1) / 2.
ycenter = (shape[0] - 1) / 2.
epsf = EPSFModel(data=data, origin=(xcenter, ycenter),
normalize=False, oversampling=oversampling)
return epsf | [
"def",
"_create_initial_epsf",
"(",
"self",
",",
"stars",
")",
":",
"oversampling",
"=",
"self",
".",
"oversampling",
"shape",
"=",
"self",
".",
"shape",
"# define the ePSF shape",
"if",
"shape",
"is",
"not",
"None",
":",
"shape",
"=",
"np",
".",
"atleast_1d",
"(",
"shape",
")",
".",
"astype",
"(",
"int",
")",
"if",
"len",
"(",
"shape",
")",
"==",
"1",
":",
"shape",
"=",
"np",
".",
"repeat",
"(",
"shape",
",",
"2",
")",
"else",
":",
"x_shape",
"=",
"np",
".",
"int",
"(",
"np",
".",
"ceil",
"(",
"stars",
".",
"_max_shape",
"[",
"1",
"]",
"*",
"oversampling",
"[",
"0",
"]",
")",
")",
"y_shape",
"=",
"np",
".",
"int",
"(",
"np",
".",
"ceil",
"(",
"stars",
".",
"_max_shape",
"[",
"0",
"]",
"*",
"oversampling",
"[",
"1",
"]",
")",
")",
"shape",
"=",
"np",
".",
"array",
"(",
"(",
"y_shape",
",",
"x_shape",
")",
")",
"# ensure odd sizes",
"shape",
"=",
"[",
"(",
"i",
"+",
"1",
")",
"if",
"i",
"%",
"2",
"==",
"0",
"else",
"i",
"for",
"i",
"in",
"shape",
"]",
"data",
"=",
"np",
".",
"zeros",
"(",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"xcenter",
"=",
"(",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"/",
"2.",
"ycenter",
"=",
"(",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"/",
"2.",
"epsf",
"=",
"EPSFModel",
"(",
"data",
"=",
"data",
",",
"origin",
"=",
"(",
"xcenter",
",",
"ycenter",
")",
",",
"normalize",
"=",
"False",
",",
"oversampling",
"=",
"oversampling",
")",
"return",
"epsf"
] | Create an initial `EPSFModel` object.
The initial ePSF data are all zeros.
If ``shape`` is not specified, the shape of the ePSF data array
is determined from the shape of the input ``stars`` and the
oversampling factor. If the size is even along any axis, it
will be made odd by adding one. The output ePSF will always
have odd sizes along both axes to ensure a central pixel.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
Returns
-------
epsf : `EPSFModel`
The initial ePSF model. | [
"Create",
"an",
"initial",
"EPSFModel",
"object",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L382-L430 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFBuilder._resample_residual | def _resample_residual(self, star, epsf):
"""
Compute a normalized residual image in the oversampled ePSF
grid.
A normalized residual image is calculated by subtracting the
normalized ePSF model from the normalized star at the location
of the star in the undersampled grid. The normalized residual
image is then resampled from the undersampled star grid to the
oversampled ePSF grid.
Parameters
----------
star : `EPSFStar` object
A single star object.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
image : 2D `~numpy.ndarray`
A 2D image containing the resampled residual image. The
image contains NaNs where there is no data.
"""
# find the integer index of EPSFStar pixels in the oversampled
# ePSF grid
x = epsf._oversampling[0] * star._xidx_centered
y = epsf._oversampling[1] * star._yidx_centered
epsf_xcenter, epsf_ycenter = epsf.origin
xidx = _py2intround(x + epsf_xcenter)
yidx = _py2intround(y + epsf_ycenter)
mask = np.logical_and(np.logical_and(xidx >= 0, xidx < epsf.shape[1]),
np.logical_and(yidx >= 0, yidx < epsf.shape[0]))
xidx = xidx[mask]
yidx = yidx[mask]
# Compute the normalized residual image by subtracting the
# normalized ePSF model from the normalized star at the location
# of the star in the undersampled grid. Then, resample the
# normalized residual image in the oversampled ePSF grid.
# [(star - (epsf * xov * yov)) / (xov * yov)]
# --> [(star / (xov * yov)) - epsf]
stardata = ((star._data_values_normalized / np.prod(epsf._oversampling)) -
epsf.evaluate(x=x, y=y, flux=1.0, x_0=0.0, y_0=0.0,
use_oversampling=False))
resampled_img = np.full(epsf.shape, np.nan)
resampled_img[yidx, xidx] = stardata[mask]
return resampled_img | python | def _resample_residual(self, star, epsf):
"""
Compute a normalized residual image in the oversampled ePSF
grid.
A normalized residual image is calculated by subtracting the
normalized ePSF model from the normalized star at the location
of the star in the undersampled grid. The normalized residual
image is then resampled from the undersampled star grid to the
oversampled ePSF grid.
Parameters
----------
star : `EPSFStar` object
A single star object.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
image : 2D `~numpy.ndarray`
A 2D image containing the resampled residual image. The
image contains NaNs where there is no data.
"""
# find the integer index of EPSFStar pixels in the oversampled
# ePSF grid
x = epsf._oversampling[0] * star._xidx_centered
y = epsf._oversampling[1] * star._yidx_centered
epsf_xcenter, epsf_ycenter = epsf.origin
xidx = _py2intround(x + epsf_xcenter)
yidx = _py2intround(y + epsf_ycenter)
mask = np.logical_and(np.logical_and(xidx >= 0, xidx < epsf.shape[1]),
np.logical_and(yidx >= 0, yidx < epsf.shape[0]))
xidx = xidx[mask]
yidx = yidx[mask]
# Compute the normalized residual image by subtracting the
# normalized ePSF model from the normalized star at the location
# of the star in the undersampled grid. Then, resample the
# normalized residual image in the oversampled ePSF grid.
# [(star - (epsf * xov * yov)) / (xov * yov)]
# --> [(star / (xov * yov)) - epsf]
stardata = ((star._data_values_normalized / np.prod(epsf._oversampling)) -
epsf.evaluate(x=x, y=y, flux=1.0, x_0=0.0, y_0=0.0,
use_oversampling=False))
resampled_img = np.full(epsf.shape, np.nan)
resampled_img[yidx, xidx] = stardata[mask]
return resampled_img | [
"def",
"_resample_residual",
"(",
"self",
",",
"star",
",",
"epsf",
")",
":",
"# find the integer index of EPSFStar pixels in the oversampled",
"# ePSF grid",
"x",
"=",
"epsf",
".",
"_oversampling",
"[",
"0",
"]",
"*",
"star",
".",
"_xidx_centered",
"y",
"=",
"epsf",
".",
"_oversampling",
"[",
"1",
"]",
"*",
"star",
".",
"_yidx_centered",
"epsf_xcenter",
",",
"epsf_ycenter",
"=",
"epsf",
".",
"origin",
"xidx",
"=",
"_py2intround",
"(",
"x",
"+",
"epsf_xcenter",
")",
"yidx",
"=",
"_py2intround",
"(",
"y",
"+",
"epsf_ycenter",
")",
"mask",
"=",
"np",
".",
"logical_and",
"(",
"np",
".",
"logical_and",
"(",
"xidx",
">=",
"0",
",",
"xidx",
"<",
"epsf",
".",
"shape",
"[",
"1",
"]",
")",
",",
"np",
".",
"logical_and",
"(",
"yidx",
">=",
"0",
",",
"yidx",
"<",
"epsf",
".",
"shape",
"[",
"0",
"]",
")",
")",
"xidx",
"=",
"xidx",
"[",
"mask",
"]",
"yidx",
"=",
"yidx",
"[",
"mask",
"]",
"# Compute the normalized residual image by subtracting the",
"# normalized ePSF model from the normalized star at the location",
"# of the star in the undersampled grid. Then, resample the",
"# normalized residual image in the oversampled ePSF grid.",
"# [(star - (epsf * xov * yov)) / (xov * yov)]",
"# --> [(star / (xov * yov)) - epsf]",
"stardata",
"=",
"(",
"(",
"star",
".",
"_data_values_normalized",
"/",
"np",
".",
"prod",
"(",
"epsf",
".",
"_oversampling",
")",
")",
"-",
"epsf",
".",
"evaluate",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"flux",
"=",
"1.0",
",",
"x_0",
"=",
"0.0",
",",
"y_0",
"=",
"0.0",
",",
"use_oversampling",
"=",
"False",
")",
")",
"resampled_img",
"=",
"np",
".",
"full",
"(",
"epsf",
".",
"shape",
",",
"np",
".",
"nan",
")",
"resampled_img",
"[",
"yidx",
",",
"xidx",
"]",
"=",
"stardata",
"[",
"mask",
"]",
"return",
"resampled_img"
] | Compute a normalized residual image in the oversampled ePSF
grid.
A normalized residual image is calculated by subtracting the
normalized ePSF model from the normalized star at the location
of the star in the undersampled grid. The normalized residual
image is then resampled from the undersampled star grid to the
oversampled ePSF grid.
Parameters
----------
star : `EPSFStar` object
A single star object.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
image : 2D `~numpy.ndarray`
A 2D image containing the resampled residual image. The
image contains NaNs where there is no data. | [
"Compute",
"a",
"normalized",
"residual",
"image",
"in",
"the",
"oversampled",
"ePSF",
"grid",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L432-L484 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFBuilder._resample_residuals | def _resample_residuals(self, stars, epsf):
"""
Compute normalized residual images for all the input stars.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
star_imgs : 3D `~numpy.ndarray`
A 3D cube containing the resampled residual images.
"""
shape = (stars.n_good_stars, epsf.shape[0], epsf.shape[1])
star_imgs = np.zeros(shape)
for i, star in enumerate(stars.all_good_stars):
star_imgs[i, :, :] = self._resample_residual(star, epsf)
return star_imgs | python | def _resample_residuals(self, stars, epsf):
"""
Compute normalized residual images for all the input stars.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
star_imgs : 3D `~numpy.ndarray`
A 3D cube containing the resampled residual images.
"""
shape = (stars.n_good_stars, epsf.shape[0], epsf.shape[1])
star_imgs = np.zeros(shape)
for i, star in enumerate(stars.all_good_stars):
star_imgs[i, :, :] = self._resample_residual(star, epsf)
return star_imgs | [
"def",
"_resample_residuals",
"(",
"self",
",",
"stars",
",",
"epsf",
")",
":",
"shape",
"=",
"(",
"stars",
".",
"n_good_stars",
",",
"epsf",
".",
"shape",
"[",
"0",
"]",
",",
"epsf",
".",
"shape",
"[",
"1",
"]",
")",
"star_imgs",
"=",
"np",
".",
"zeros",
"(",
"shape",
")",
"for",
"i",
",",
"star",
"in",
"enumerate",
"(",
"stars",
".",
"all_good_stars",
")",
":",
"star_imgs",
"[",
"i",
",",
":",
",",
":",
"]",
"=",
"self",
".",
"_resample_residual",
"(",
"star",
",",
"epsf",
")",
"return",
"star_imgs"
] | Compute normalized residual images for all the input stars.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
star_imgs : 3D `~numpy.ndarray`
A 3D cube containing the resampled residual images. | [
"Compute",
"normalized",
"residual",
"images",
"for",
"all",
"the",
"input",
"stars",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L486-L509 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFBuilder._smooth_epsf | def _smooth_epsf(self, epsf_data):
"""
Smooth the ePSF array by convolving it with a kernel.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
Returns
-------
result : 2D `~numpy.ndarray`
The smoothed (convolved) ePSF data.
"""
from scipy.ndimage import convolve
if self.smoothing_kernel is None:
return epsf_data
elif self.smoothing_kernel == 'quartic':
# from Polynomial2D fit with degree=4 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(4, c0_0=0.04163265, c1_0=-0.76326531,
# c2_0=0.99081633, c3_0=-0.4, c4_0=0.05,
# c0_1=-0.76326531, c0_2=0.99081633, c0_3=-0.4,
# c0_4=0.05, c1_1=0.32653061, c1_2=-0.08163265,
# c1_3=0., c2_1=-0.08163265, c2_2=0.02040816,
# c3_1=-0.)>
kernel = np.array(
[[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.078368, +0.200816, 0.441632, +0.200816, +0.078368],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632]])
elif self.smoothing_kernel == 'quadratic':
# from Polynomial2D fit with degree=2 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(2, c0_0=-0.07428571, c1_0=0.11428571,
# c2_0=-0.02857143, c0_1=0.11428571,
# c0_2=-0.02857143, c1_1=-0.)
kernel = np.array(
[[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[+0.03999952, 0.12571449, 0.15428215, 0.12571449,
+0.03999952],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311]])
elif isinstance(self.smoothing_kernel, np.ndarray):
kernel = self.kernel
else:
raise TypeError('Unsupported kernel.')
return convolve(epsf_data, kernel) | python | def _smooth_epsf(self, epsf_data):
"""
Smooth the ePSF array by convolving it with a kernel.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
Returns
-------
result : 2D `~numpy.ndarray`
The smoothed (convolved) ePSF data.
"""
from scipy.ndimage import convolve
if self.smoothing_kernel is None:
return epsf_data
elif self.smoothing_kernel == 'quartic':
# from Polynomial2D fit with degree=4 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(4, c0_0=0.04163265, c1_0=-0.76326531,
# c2_0=0.99081633, c3_0=-0.4, c4_0=0.05,
# c0_1=-0.76326531, c0_2=0.99081633, c0_3=-0.4,
# c0_4=0.05, c1_1=0.32653061, c1_2=-0.08163265,
# c1_3=0., c2_1=-0.08163265, c2_2=0.02040816,
# c3_1=-0.)>
kernel = np.array(
[[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.078368, +0.200816, 0.441632, +0.200816, +0.078368],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632]])
elif self.smoothing_kernel == 'quadratic':
# from Polynomial2D fit with degree=2 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(2, c0_0=-0.07428571, c1_0=0.11428571,
# c2_0=-0.02857143, c0_1=0.11428571,
# c0_2=-0.02857143, c1_1=-0.)
kernel = np.array(
[[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[+0.03999952, 0.12571449, 0.15428215, 0.12571449,
+0.03999952],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311]])
elif isinstance(self.smoothing_kernel, np.ndarray):
kernel = self.kernel
else:
raise TypeError('Unsupported kernel.')
return convolve(epsf_data, kernel) | [
"def",
"_smooth_epsf",
"(",
"self",
",",
"epsf_data",
")",
":",
"from",
"scipy",
".",
"ndimage",
"import",
"convolve",
"if",
"self",
".",
"smoothing_kernel",
"is",
"None",
":",
"return",
"epsf_data",
"elif",
"self",
".",
"smoothing_kernel",
"==",
"'quartic'",
":",
"# from Polynomial2D fit with degree=4 to 5x5 array of",
"# zeros with 1. at the center",
"# Polynomial2D(4, c0_0=0.04163265, c1_0=-0.76326531,",
"# c2_0=0.99081633, c3_0=-0.4, c4_0=0.05,",
"# c0_1=-0.76326531, c0_2=0.99081633, c0_3=-0.4,",
"# c0_4=0.05, c1_1=0.32653061, c1_2=-0.08163265,",
"# c1_3=0., c2_1=-0.08163265, c2_2=0.02040816,",
"# c3_1=-0.)>",
"kernel",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"+",
"0.041632",
",",
"-",
"0.080816",
",",
"0.078368",
",",
"-",
"0.080816",
",",
"+",
"0.041632",
"]",
",",
"[",
"-",
"0.080816",
",",
"-",
"0.019592",
",",
"0.200816",
",",
"-",
"0.019592",
",",
"-",
"0.080816",
"]",
",",
"[",
"+",
"0.078368",
",",
"+",
"0.200816",
",",
"0.441632",
",",
"+",
"0.200816",
",",
"+",
"0.078368",
"]",
",",
"[",
"-",
"0.080816",
",",
"-",
"0.019592",
",",
"0.200816",
",",
"-",
"0.019592",
",",
"-",
"0.080816",
"]",
",",
"[",
"+",
"0.041632",
",",
"-",
"0.080816",
",",
"0.078368",
",",
"-",
"0.080816",
",",
"+",
"0.041632",
"]",
"]",
")",
"elif",
"self",
".",
"smoothing_kernel",
"==",
"'quadratic'",
":",
"# from Polynomial2D fit with degree=2 to 5x5 array of",
"# zeros with 1. at the center",
"# Polynomial2D(2, c0_0=-0.07428571, c1_0=0.11428571,",
"# c2_0=-0.02857143, c0_1=0.11428571,",
"# c0_2=-0.02857143, c1_1=-0.)",
"kernel",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"-",
"0.07428311",
",",
"0.01142786",
",",
"0.03999952",
",",
"0.01142786",
",",
"-",
"0.07428311",
"]",
",",
"[",
"+",
"0.01142786",
",",
"0.09714283",
",",
"0.12571449",
",",
"0.09714283",
",",
"+",
"0.01142786",
"]",
",",
"[",
"+",
"0.03999952",
",",
"0.12571449",
",",
"0.15428215",
",",
"0.12571449",
",",
"+",
"0.03999952",
"]",
",",
"[",
"+",
"0.01142786",
",",
"0.09714283",
",",
"0.12571449",
",",
"0.09714283",
",",
"+",
"0.01142786",
"]",
",",
"[",
"-",
"0.07428311",
",",
"0.01142786",
",",
"0.03999952",
",",
"0.01142786",
",",
"-",
"0.07428311",
"]",
"]",
")",
"elif",
"isinstance",
"(",
"self",
".",
"smoothing_kernel",
",",
"np",
".",
"ndarray",
")",
":",
"kernel",
"=",
"self",
".",
"kernel",
"else",
":",
"raise",
"TypeError",
"(",
"'Unsupported kernel.'",
")",
"return",
"convolve",
"(",
"epsf_data",
",",
"kernel",
")"
] | Smooth the ePSF array by convolving it with a kernel.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
Returns
-------
result : 2D `~numpy.ndarray`
The smoothed (convolved) ePSF data. | [
"Smooth",
"the",
"ePSF",
"array",
"by",
"convolving",
"it",
"with",
"a",
"kernel",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L511-L571 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFBuilder._recenter_epsf | def _recenter_epsf(self, epsf_data, epsf, centroid_func=centroid_com,
box_size=5, maxiters=20, center_accuracy=1.0e-4):
"""
Calculate the center of the ePSF data and shift the data so the
ePSF center is at the center of the ePSF data array.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
epsf : `EPSFModel` object
The ePSF model.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The callable must
accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing
the x and y centroids. The default is
`~photutils.centroids.centroid_com`.
recentering_boxsize : float or tuple of two floats, optional
The size (in pixels) of the box used to calculate the
centroid of the ePSF during each build iteration. If a
single integer number is provided, then a square box will be
used. If two values are provided, then they should be in
``(ny, nx)`` order. The default is 5.
maxiters : int, optional
The maximum number of recentering iterations to perform.
The default is 20.
center_accuracy : float, optional
The desired accuracy for the centers of stars. The building
iterations will stop if the center of the ePSF changes by
less than ``center_accuracy`` pixels between iterations.
The default is 1.0e-4.
Returns
-------
result : 2D `~numpy.ndarray`
The recentered ePSF data.
"""
# Define an EPSFModel for the input data. This EPSFModel will be
# used to evaluate the model on a shifted pixel grid to place the
# centroid at the array center.
epsf = EPSFModel(data=epsf_data, origin=epsf.origin, normalize=False,
oversampling=epsf.oversampling)
epsf.fill_value = 0.0
xcenter, ycenter = epsf.origin
dx_total = 0
dy_total = 0
y, x = np.indices(epsf_data.shape, dtype=np.float)
iter_num = 0
center_accuracy_sq = center_accuracy ** 2
center_dist_sq = center_accuracy_sq + 1.e6
center_dist_sq_prev = center_dist_sq + 1
while (iter_num < maxiters and
center_dist_sq >= center_accuracy_sq):
iter_num += 1
# extract a cutout from the ePSF
slices_large, slices_small = overlap_slices(epsf_data.shape,
box_size,
(ycenter, xcenter))
epsf_cutout = epsf_data[slices_large]
mask = ~np.isfinite(epsf_cutout)
# find a new center position
xcenter_new, ycenter_new = centroid_func(epsf_cutout, mask=mask)
xcenter_new += slices_large[1].start
ycenter_new += slices_large[0].start
# calculate the shift
dx = xcenter - xcenter_new
dy = ycenter - ycenter_new
center_dist_sq = dx**2 + dy**2
if center_dist_sq >= center_dist_sq_prev: # don't shift
break
center_dist_sq_prev = center_dist_sq
# Resample the ePSF data to a shifted grid to place the
# centroid in the center of the central pixel. The shift is
# always performed on the input epsf_data.
dx_total += dx # accumulated shifts for the input epsf_data
dy_total += dy
epsf_data = epsf.evaluate(x=x, y=y, flux=1.0,
x_0=xcenter + dx_total,
y_0=ycenter + dy_total,
use_oversampling=False)
return epsf_data | python | def _recenter_epsf(self, epsf_data, epsf, centroid_func=centroid_com,
box_size=5, maxiters=20, center_accuracy=1.0e-4):
"""
Calculate the center of the ePSF data and shift the data so the
ePSF center is at the center of the ePSF data array.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
epsf : `EPSFModel` object
The ePSF model.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The callable must
accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing
the x and y centroids. The default is
`~photutils.centroids.centroid_com`.
recentering_boxsize : float or tuple of two floats, optional
The size (in pixels) of the box used to calculate the
centroid of the ePSF during each build iteration. If a
single integer number is provided, then a square box will be
used. If two values are provided, then they should be in
``(ny, nx)`` order. The default is 5.
maxiters : int, optional
The maximum number of recentering iterations to perform.
The default is 20.
center_accuracy : float, optional
The desired accuracy for the centers of stars. The building
iterations will stop if the center of the ePSF changes by
less than ``center_accuracy`` pixels between iterations.
The default is 1.0e-4.
Returns
-------
result : 2D `~numpy.ndarray`
The recentered ePSF data.
"""
# Define an EPSFModel for the input data. This EPSFModel will be
# used to evaluate the model on a shifted pixel grid to place the
# centroid at the array center.
epsf = EPSFModel(data=epsf_data, origin=epsf.origin, normalize=False,
oversampling=epsf.oversampling)
epsf.fill_value = 0.0
xcenter, ycenter = epsf.origin
dx_total = 0
dy_total = 0
y, x = np.indices(epsf_data.shape, dtype=np.float)
iter_num = 0
center_accuracy_sq = center_accuracy ** 2
center_dist_sq = center_accuracy_sq + 1.e6
center_dist_sq_prev = center_dist_sq + 1
while (iter_num < maxiters and
center_dist_sq >= center_accuracy_sq):
iter_num += 1
# extract a cutout from the ePSF
slices_large, slices_small = overlap_slices(epsf_data.shape,
box_size,
(ycenter, xcenter))
epsf_cutout = epsf_data[slices_large]
mask = ~np.isfinite(epsf_cutout)
# find a new center position
xcenter_new, ycenter_new = centroid_func(epsf_cutout, mask=mask)
xcenter_new += slices_large[1].start
ycenter_new += slices_large[0].start
# calculate the shift
dx = xcenter - xcenter_new
dy = ycenter - ycenter_new
center_dist_sq = dx**2 + dy**2
if center_dist_sq >= center_dist_sq_prev: # don't shift
break
center_dist_sq_prev = center_dist_sq
# Resample the ePSF data to a shifted grid to place the
# centroid in the center of the central pixel. The shift is
# always performed on the input epsf_data.
dx_total += dx # accumulated shifts for the input epsf_data
dy_total += dy
epsf_data = epsf.evaluate(x=x, y=y, flux=1.0,
x_0=xcenter + dx_total,
y_0=ycenter + dy_total,
use_oversampling=False)
return epsf_data | [
"def",
"_recenter_epsf",
"(",
"self",
",",
"epsf_data",
",",
"epsf",
",",
"centroid_func",
"=",
"centroid_com",
",",
"box_size",
"=",
"5",
",",
"maxiters",
"=",
"20",
",",
"center_accuracy",
"=",
"1.0e-4",
")",
":",
"# Define an EPSFModel for the input data. This EPSFModel will be",
"# used to evaluate the model on a shifted pixel grid to place the",
"# centroid at the array center.",
"epsf",
"=",
"EPSFModel",
"(",
"data",
"=",
"epsf_data",
",",
"origin",
"=",
"epsf",
".",
"origin",
",",
"normalize",
"=",
"False",
",",
"oversampling",
"=",
"epsf",
".",
"oversampling",
")",
"epsf",
".",
"fill_value",
"=",
"0.0",
"xcenter",
",",
"ycenter",
"=",
"epsf",
".",
"origin",
"dx_total",
"=",
"0",
"dy_total",
"=",
"0",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"epsf_data",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"iter_num",
"=",
"0",
"center_accuracy_sq",
"=",
"center_accuracy",
"**",
"2",
"center_dist_sq",
"=",
"center_accuracy_sq",
"+",
"1.e6",
"center_dist_sq_prev",
"=",
"center_dist_sq",
"+",
"1",
"while",
"(",
"iter_num",
"<",
"maxiters",
"and",
"center_dist_sq",
">=",
"center_accuracy_sq",
")",
":",
"iter_num",
"+=",
"1",
"# extract a cutout from the ePSF",
"slices_large",
",",
"slices_small",
"=",
"overlap_slices",
"(",
"epsf_data",
".",
"shape",
",",
"box_size",
",",
"(",
"ycenter",
",",
"xcenter",
")",
")",
"epsf_cutout",
"=",
"epsf_data",
"[",
"slices_large",
"]",
"mask",
"=",
"~",
"np",
".",
"isfinite",
"(",
"epsf_cutout",
")",
"# find a new center position",
"xcenter_new",
",",
"ycenter_new",
"=",
"centroid_func",
"(",
"epsf_cutout",
",",
"mask",
"=",
"mask",
")",
"xcenter_new",
"+=",
"slices_large",
"[",
"1",
"]",
".",
"start",
"ycenter_new",
"+=",
"slices_large",
"[",
"0",
"]",
".",
"start",
"# calculate the shift",
"dx",
"=",
"xcenter",
"-",
"xcenter_new",
"dy",
"=",
"ycenter",
"-",
"ycenter_new",
"center_dist_sq",
"=",
"dx",
"**",
"2",
"+",
"dy",
"**",
"2",
"if",
"center_dist_sq",
">=",
"center_dist_sq_prev",
":",
"# don't shift",
"break",
"center_dist_sq_prev",
"=",
"center_dist_sq",
"# Resample the ePSF data to a shifted grid to place the",
"# centroid in the center of the central pixel. The shift is",
"# always performed on the input epsf_data.",
"dx_total",
"+=",
"dx",
"# accumulated shifts for the input epsf_data",
"dy_total",
"+=",
"dy",
"epsf_data",
"=",
"epsf",
".",
"evaluate",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"flux",
"=",
"1.0",
",",
"x_0",
"=",
"xcenter",
"+",
"dx_total",
",",
"y_0",
"=",
"ycenter",
"+",
"dy_total",
",",
"use_oversampling",
"=",
"False",
")",
"return",
"epsf_data"
] | Calculate the center of the ePSF data and shift the data so the
ePSF center is at the center of the ePSF data array.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
epsf : `EPSFModel` object
The ePSF model.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The callable must
accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing
the x and y centroids. The default is
`~photutils.centroids.centroid_com`.
recentering_boxsize : float or tuple of two floats, optional
The size (in pixels) of the box used to calculate the
centroid of the ePSF during each build iteration. If a
single integer number is provided, then a square box will be
used. If two values are provided, then they should be in
``(ny, nx)`` order. The default is 5.
maxiters : int, optional
The maximum number of recentering iterations to perform.
The default is 20.
center_accuracy : float, optional
The desired accuracy for the centers of stars. The building
iterations will stop if the center of the ePSF changes by
less than ``center_accuracy`` pixels between iterations.
The default is 1.0e-4.
Returns
-------
result : 2D `~numpy.ndarray`
The recentered ePSF data. | [
"Calculate",
"the",
"center",
"of",
"the",
"ePSF",
"data",
"and",
"shift",
"the",
"data",
"so",
"the",
"ePSF",
"center",
"is",
"at",
"the",
"center",
"of",
"the",
"ePSF",
"data",
"array",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L573-L671 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFBuilder._build_epsf_step | def _build_epsf_step(self, stars, epsf=None):
"""
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
"""
if len(stars) < 1:
raise ValueError('stars must contain at least one EPSFStar or '
'LinkedEPSFStar object.')
if epsf is None:
# create an initial ePSF (array of zeros)
epsf = self._create_initial_epsf(stars)
else:
# improve the input ePSF
epsf = copy.deepcopy(epsf)
# compute a 3D stack of 2D residual images
residuals = self._resample_residuals(stars, epsf)
self._residuals.append(residuals)
# compute the sigma-clipped median along the 3D stack
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
residuals = self.sigclip(residuals, axis=0, masked=False,
return_bounds=False)
if HAS_BOTTLENECK:
residuals = bottleneck.nanmedian(residuals, axis=0)
else:
residuals = np.nanmedian(residuals, axis=0)
self._residuals_sigclip.append(residuals)
# interpolate any missing data (np.nan)
mask = ~np.isfinite(residuals)
if np.any(mask):
residuals = _interpolate_missing_data(residuals, mask,
method='cubic')
# fill any remaining nans (outer points) with zeros
residuals[~np.isfinite(residuals)] = 0.
self._residuals_interp.append(residuals)
# add the residuals to the previous ePSF image
new_epsf = epsf.normalized_data + residuals
# smooth the ePSF
new_epsf = self._smooth_epsf(new_epsf)
# recenter the ePSF
new_epsf = self._recenter_epsf(new_epsf, epsf,
centroid_func=self.recentering_func,
box_size=self.recentering_boxsize,
maxiters=self.recentering_maxiters,
center_accuracy=1.0e-4)
# normalize the ePSF data
new_epsf /= np.sum(new_epsf, dtype=np.float64)
# return the new ePSF object
xcenter = (new_epsf.shape[1] - 1) / 2.
ycenter = (new_epsf.shape[0] - 1) / 2.
epsf_new = EPSFModel(data=new_epsf, origin=(xcenter, ycenter),
normalize=False, oversampling=epsf.oversampling)
return epsf_new | python | def _build_epsf_step(self, stars, epsf=None):
"""
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
"""
if len(stars) < 1:
raise ValueError('stars must contain at least one EPSFStar or '
'LinkedEPSFStar object.')
if epsf is None:
# create an initial ePSF (array of zeros)
epsf = self._create_initial_epsf(stars)
else:
# improve the input ePSF
epsf = copy.deepcopy(epsf)
# compute a 3D stack of 2D residual images
residuals = self._resample_residuals(stars, epsf)
self._residuals.append(residuals)
# compute the sigma-clipped median along the 3D stack
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
residuals = self.sigclip(residuals, axis=0, masked=False,
return_bounds=False)
if HAS_BOTTLENECK:
residuals = bottleneck.nanmedian(residuals, axis=0)
else:
residuals = np.nanmedian(residuals, axis=0)
self._residuals_sigclip.append(residuals)
# interpolate any missing data (np.nan)
mask = ~np.isfinite(residuals)
if np.any(mask):
residuals = _interpolate_missing_data(residuals, mask,
method='cubic')
# fill any remaining nans (outer points) with zeros
residuals[~np.isfinite(residuals)] = 0.
self._residuals_interp.append(residuals)
# add the residuals to the previous ePSF image
new_epsf = epsf.normalized_data + residuals
# smooth the ePSF
new_epsf = self._smooth_epsf(new_epsf)
# recenter the ePSF
new_epsf = self._recenter_epsf(new_epsf, epsf,
centroid_func=self.recentering_func,
box_size=self.recentering_boxsize,
maxiters=self.recentering_maxiters,
center_accuracy=1.0e-4)
# normalize the ePSF data
new_epsf /= np.sum(new_epsf, dtype=np.float64)
# return the new ePSF object
xcenter = (new_epsf.shape[1] - 1) / 2.
ycenter = (new_epsf.shape[0] - 1) / 2.
epsf_new = EPSFModel(data=new_epsf, origin=(xcenter, ycenter),
normalize=False, oversampling=epsf.oversampling)
return epsf_new | [
"def",
"_build_epsf_step",
"(",
"self",
",",
"stars",
",",
"epsf",
"=",
"None",
")",
":",
"if",
"len",
"(",
"stars",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'stars must contain at least one EPSFStar or '",
"'LinkedEPSFStar object.'",
")",
"if",
"epsf",
"is",
"None",
":",
"# create an initial ePSF (array of zeros)",
"epsf",
"=",
"self",
".",
"_create_initial_epsf",
"(",
"stars",
")",
"else",
":",
"# improve the input ePSF",
"epsf",
"=",
"copy",
".",
"deepcopy",
"(",
"epsf",
")",
"# compute a 3D stack of 2D residual images",
"residuals",
"=",
"self",
".",
"_resample_residuals",
"(",
"stars",
",",
"epsf",
")",
"self",
".",
"_residuals",
".",
"append",
"(",
"residuals",
")",
"# compute the sigma-clipped median along the 3D stack",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"category",
"=",
"RuntimeWarning",
")",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"category",
"=",
"AstropyUserWarning",
")",
"residuals",
"=",
"self",
".",
"sigclip",
"(",
"residuals",
",",
"axis",
"=",
"0",
",",
"masked",
"=",
"False",
",",
"return_bounds",
"=",
"False",
")",
"if",
"HAS_BOTTLENECK",
":",
"residuals",
"=",
"bottleneck",
".",
"nanmedian",
"(",
"residuals",
",",
"axis",
"=",
"0",
")",
"else",
":",
"residuals",
"=",
"np",
".",
"nanmedian",
"(",
"residuals",
",",
"axis",
"=",
"0",
")",
"self",
".",
"_residuals_sigclip",
".",
"append",
"(",
"residuals",
")",
"# interpolate any missing data (np.nan)",
"mask",
"=",
"~",
"np",
".",
"isfinite",
"(",
"residuals",
")",
"if",
"np",
".",
"any",
"(",
"mask",
")",
":",
"residuals",
"=",
"_interpolate_missing_data",
"(",
"residuals",
",",
"mask",
",",
"method",
"=",
"'cubic'",
")",
"# fill any remaining nans (outer points) with zeros",
"residuals",
"[",
"~",
"np",
".",
"isfinite",
"(",
"residuals",
")",
"]",
"=",
"0.",
"self",
".",
"_residuals_interp",
".",
"append",
"(",
"residuals",
")",
"# add the residuals to the previous ePSF image",
"new_epsf",
"=",
"epsf",
".",
"normalized_data",
"+",
"residuals",
"# smooth the ePSF",
"new_epsf",
"=",
"self",
".",
"_smooth_epsf",
"(",
"new_epsf",
")",
"# recenter the ePSF",
"new_epsf",
"=",
"self",
".",
"_recenter_epsf",
"(",
"new_epsf",
",",
"epsf",
",",
"centroid_func",
"=",
"self",
".",
"recentering_func",
",",
"box_size",
"=",
"self",
".",
"recentering_boxsize",
",",
"maxiters",
"=",
"self",
".",
"recentering_maxiters",
",",
"center_accuracy",
"=",
"1.0e-4",
")",
"# normalize the ePSF data",
"new_epsf",
"/=",
"np",
".",
"sum",
"(",
"new_epsf",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"# return the new ePSF object",
"xcenter",
"=",
"(",
"new_epsf",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"/",
"2.",
"ycenter",
"=",
"(",
"new_epsf",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"/",
"2.",
"epsf_new",
"=",
"EPSFModel",
"(",
"data",
"=",
"new_epsf",
",",
"origin",
"=",
"(",
"xcenter",
",",
"ycenter",
")",
",",
"normalize",
"=",
"False",
",",
"oversampling",
"=",
"epsf",
".",
"oversampling",
")",
"return",
"epsf_new"
] | A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF. | [
"A",
"single",
"iteration",
"of",
"improving",
"an",
"ePSF",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L673-L755 | train |
astropy/photutils | photutils/psf/epsf.py | EPSFBuilder.build_epsf | def build_epsf(self, stars, init_epsf=None):
"""
Iteratively build an ePSF from star cutouts.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
init_epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The constructed ePSF.
fitted_stars : `EPSFStars` object
The input stars with updated centers and fluxes derived
from fitting the output ``epsf``.
"""
iter_num = 0
center_dist_sq = self.center_accuracy_sq + 1.
centers = stars.cutout_center_flat
n_stars = stars.n_stars
fit_failed = np.zeros(n_stars, dtype=bool)
dx_dy = np.zeros((n_stars, 2), dtype=np.float)
epsf = init_epsf
dt = 0.
while (iter_num < self.maxiters and
np.max(center_dist_sq) >= self.center_accuracy_sq and
not np.all(fit_failed)):
t_start = time.time()
iter_num += 1
if self.progress_bar:
if iter_num == 1:
dt_str = ' [? s/iter]'
else:
dt_str = ' [{:.1f} s/iter]'.format(dt)
print('PROGRESS: iteration {0:d} (of max {1}){2}'
.format(iter_num, self.maxiters, dt_str), end='\r')
# build/improve the ePSF
epsf = self._build_epsf_step(stars, epsf=epsf)
# fit the new ePSF to the stars to find improved centers
# we catch fit warnings here -- stars with unsuccessful fits
# are excluded from the ePSF build process
with warnings.catch_warnings():
message = '.*The fit may be unsuccessful;.*'
warnings.filterwarnings('ignore', message=message,
category=AstropyUserWarning)
stars = self.fitter(epsf, stars)
# find all stars where the fit failed
fit_failed = np.array([star._fit_error_status > 0
for star in stars.all_stars])
if np.all(fit_failed):
raise ValueError('The ePSF fitting failed for all stars.')
# permanently exclude fitting any star where the fit fails
# after 3 iterations
if iter_num > 3 and np.any(fit_failed):
idx = fit_failed.nonzero()[0]
for i in idx:
stars.all_stars[i]._excluded_from_fit = True
dx_dy = stars.cutout_center_flat - centers
dx_dy = dx_dy[np.logical_not(fit_failed)]
center_dist_sq = np.sum(dx_dy * dx_dy, axis=1, dtype=np.float64)
centers = stars.cutout_center_flat
self._nfit_failed.append(np.count_nonzero(fit_failed))
self._center_dist_sq.append(center_dist_sq)
self._max_center_dist_sq.append(np.max(center_dist_sq))
self._epsf.append(epsf)
dt = time.time() - t_start
return epsf, stars | python | def build_epsf(self, stars, init_epsf=None):
"""
Iteratively build an ePSF from star cutouts.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
init_epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The constructed ePSF.
fitted_stars : `EPSFStars` object
The input stars with updated centers and fluxes derived
from fitting the output ``epsf``.
"""
iter_num = 0
center_dist_sq = self.center_accuracy_sq + 1.
centers = stars.cutout_center_flat
n_stars = stars.n_stars
fit_failed = np.zeros(n_stars, dtype=bool)
dx_dy = np.zeros((n_stars, 2), dtype=np.float)
epsf = init_epsf
dt = 0.
while (iter_num < self.maxiters and
np.max(center_dist_sq) >= self.center_accuracy_sq and
not np.all(fit_failed)):
t_start = time.time()
iter_num += 1
if self.progress_bar:
if iter_num == 1:
dt_str = ' [? s/iter]'
else:
dt_str = ' [{:.1f} s/iter]'.format(dt)
print('PROGRESS: iteration {0:d} (of max {1}){2}'
.format(iter_num, self.maxiters, dt_str), end='\r')
# build/improve the ePSF
epsf = self._build_epsf_step(stars, epsf=epsf)
# fit the new ePSF to the stars to find improved centers
# we catch fit warnings here -- stars with unsuccessful fits
# are excluded from the ePSF build process
with warnings.catch_warnings():
message = '.*The fit may be unsuccessful;.*'
warnings.filterwarnings('ignore', message=message,
category=AstropyUserWarning)
stars = self.fitter(epsf, stars)
# find all stars where the fit failed
fit_failed = np.array([star._fit_error_status > 0
for star in stars.all_stars])
if np.all(fit_failed):
raise ValueError('The ePSF fitting failed for all stars.')
# permanently exclude fitting any star where the fit fails
# after 3 iterations
if iter_num > 3 and np.any(fit_failed):
idx = fit_failed.nonzero()[0]
for i in idx:
stars.all_stars[i]._excluded_from_fit = True
dx_dy = stars.cutout_center_flat - centers
dx_dy = dx_dy[np.logical_not(fit_failed)]
center_dist_sq = np.sum(dx_dy * dx_dy, axis=1, dtype=np.float64)
centers = stars.cutout_center_flat
self._nfit_failed.append(np.count_nonzero(fit_failed))
self._center_dist_sq.append(center_dist_sq)
self._max_center_dist_sq.append(np.max(center_dist_sq))
self._epsf.append(epsf)
dt = time.time() - t_start
return epsf, stars | [
"def",
"build_epsf",
"(",
"self",
",",
"stars",
",",
"init_epsf",
"=",
"None",
")",
":",
"iter_num",
"=",
"0",
"center_dist_sq",
"=",
"self",
".",
"center_accuracy_sq",
"+",
"1.",
"centers",
"=",
"stars",
".",
"cutout_center_flat",
"n_stars",
"=",
"stars",
".",
"n_stars",
"fit_failed",
"=",
"np",
".",
"zeros",
"(",
"n_stars",
",",
"dtype",
"=",
"bool",
")",
"dx_dy",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_stars",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"epsf",
"=",
"init_epsf",
"dt",
"=",
"0.",
"while",
"(",
"iter_num",
"<",
"self",
".",
"maxiters",
"and",
"np",
".",
"max",
"(",
"center_dist_sq",
")",
">=",
"self",
".",
"center_accuracy_sq",
"and",
"not",
"np",
".",
"all",
"(",
"fit_failed",
")",
")",
":",
"t_start",
"=",
"time",
".",
"time",
"(",
")",
"iter_num",
"+=",
"1",
"if",
"self",
".",
"progress_bar",
":",
"if",
"iter_num",
"==",
"1",
":",
"dt_str",
"=",
"' [? s/iter]'",
"else",
":",
"dt_str",
"=",
"' [{:.1f} s/iter]'",
".",
"format",
"(",
"dt",
")",
"print",
"(",
"'PROGRESS: iteration {0:d} (of max {1}){2}'",
".",
"format",
"(",
"iter_num",
",",
"self",
".",
"maxiters",
",",
"dt_str",
")",
",",
"end",
"=",
"'\\r'",
")",
"# build/improve the ePSF",
"epsf",
"=",
"self",
".",
"_build_epsf_step",
"(",
"stars",
",",
"epsf",
"=",
"epsf",
")",
"# fit the new ePSF to the stars to find improved centers",
"# we catch fit warnings here -- stars with unsuccessful fits",
"# are excluded from the ePSF build process",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"message",
"=",
"'.*The fit may be unsuccessful;.*'",
"warnings",
".",
"filterwarnings",
"(",
"'ignore'",
",",
"message",
"=",
"message",
",",
"category",
"=",
"AstropyUserWarning",
")",
"stars",
"=",
"self",
".",
"fitter",
"(",
"epsf",
",",
"stars",
")",
"# find all stars where the fit failed",
"fit_failed",
"=",
"np",
".",
"array",
"(",
"[",
"star",
".",
"_fit_error_status",
">",
"0",
"for",
"star",
"in",
"stars",
".",
"all_stars",
"]",
")",
"if",
"np",
".",
"all",
"(",
"fit_failed",
")",
":",
"raise",
"ValueError",
"(",
"'The ePSF fitting failed for all stars.'",
")",
"# permanently exclude fitting any star where the fit fails",
"# after 3 iterations",
"if",
"iter_num",
">",
"3",
"and",
"np",
".",
"any",
"(",
"fit_failed",
")",
":",
"idx",
"=",
"fit_failed",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"idx",
":",
"stars",
".",
"all_stars",
"[",
"i",
"]",
".",
"_excluded_from_fit",
"=",
"True",
"dx_dy",
"=",
"stars",
".",
"cutout_center_flat",
"-",
"centers",
"dx_dy",
"=",
"dx_dy",
"[",
"np",
".",
"logical_not",
"(",
"fit_failed",
")",
"]",
"center_dist_sq",
"=",
"np",
".",
"sum",
"(",
"dx_dy",
"*",
"dx_dy",
",",
"axis",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"centers",
"=",
"stars",
".",
"cutout_center_flat",
"self",
".",
"_nfit_failed",
".",
"append",
"(",
"np",
".",
"count_nonzero",
"(",
"fit_failed",
")",
")",
"self",
".",
"_center_dist_sq",
".",
"append",
"(",
"center_dist_sq",
")",
"self",
".",
"_max_center_dist_sq",
".",
"append",
"(",
"np",
".",
"max",
"(",
"center_dist_sq",
")",
")",
"self",
".",
"_epsf",
".",
"append",
"(",
"epsf",
")",
"dt",
"=",
"time",
".",
"time",
"(",
")",
"-",
"t_start",
"return",
"epsf",
",",
"stars"
] | Iteratively build an ePSF from star cutouts.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
init_epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The constructed ePSF.
fitted_stars : `EPSFStars` object
The input stars with updated centers and fluxes derived
from fitting the output ``epsf``. | [
"Iteratively",
"build",
"an",
"ePSF",
"from",
"star",
"cutouts",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L757-L842 | train |
astropy/photutils | photutils/psf/models.py | FittableImageModel._set_oversampling | def _set_oversampling(self, value):
"""
This is a private method because it's used in the initializer by the
``oversampling``
"""
try:
value = np.atleast_1d(value).astype(float)
if len(value) == 1:
value = np.repeat(value, 2)
except ValueError:
raise ValueError('Oversampling factors must be float')
if np.any(value <= 0):
raise ValueError('Oversampling factors must be greater than 0')
self._oversampling = value | python | def _set_oversampling(self, value):
"""
This is a private method because it's used in the initializer by the
``oversampling``
"""
try:
value = np.atleast_1d(value).astype(float)
if len(value) == 1:
value = np.repeat(value, 2)
except ValueError:
raise ValueError('Oversampling factors must be float')
if np.any(value <= 0):
raise ValueError('Oversampling factors must be greater than 0')
self._oversampling = value | [
"def",
"_set_oversampling",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"value",
"=",
"np",
".",
"atleast_1d",
"(",
"value",
")",
".",
"astype",
"(",
"float",
")",
"if",
"len",
"(",
"value",
")",
"==",
"1",
":",
"value",
"=",
"np",
".",
"repeat",
"(",
"value",
",",
"2",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Oversampling factors must be float'",
")",
"if",
"np",
".",
"any",
"(",
"value",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Oversampling factors must be greater than 0'",
")",
"self",
".",
"_oversampling",
"=",
"value"
] | This is a private method because it's used in the initializer by the
``oversampling`` | [
"This",
"is",
"a",
"private",
"method",
"because",
"it",
"s",
"used",
"in",
"the",
"initializer",
"by",
"the",
"oversampling"
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L234-L249 | train |
astropy/photutils | photutils/psf/models.py | FittableImageModel.evaluate | def evaluate(self, x, y, flux, x_0, y_0, use_oversampling=True):
"""
Evaluate the model on some input variables and provided model
parameters.
Parameters
----------
use_oversampling : bool, optional
Whether to use the oversampling factor to calculate the
model pixel indices. The default is `True`, which means the
input indices will be multipled by this factor.
"""
if use_oversampling:
xi = self._oversampling[0] * (np.asarray(x) - x_0)
yi = self._oversampling[1] * (np.asarray(y) - y_0)
else:
xi = np.asarray(x) - x_0
yi = np.asarray(y) - y_0
xi += self._x_origin
yi += self._y_origin
f = flux * self._normalization_constant
evaluated_model = f * self.interpolator.ev(xi, yi)
if self._fill_value is not None:
# find indices of pixels that are outside the input pixel grid and
# set these pixels to the 'fill_value':
invalid = (((xi < 0) | (xi > self._nx - 1)) |
((yi < 0) | (yi > self._ny - 1)))
evaluated_model[invalid] = self._fill_value
return evaluated_model | python | def evaluate(self, x, y, flux, x_0, y_0, use_oversampling=True):
"""
Evaluate the model on some input variables and provided model
parameters.
Parameters
----------
use_oversampling : bool, optional
Whether to use the oversampling factor to calculate the
model pixel indices. The default is `True`, which means the
input indices will be multipled by this factor.
"""
if use_oversampling:
xi = self._oversampling[0] * (np.asarray(x) - x_0)
yi = self._oversampling[1] * (np.asarray(y) - y_0)
else:
xi = np.asarray(x) - x_0
yi = np.asarray(y) - y_0
xi += self._x_origin
yi += self._y_origin
f = flux * self._normalization_constant
evaluated_model = f * self.interpolator.ev(xi, yi)
if self._fill_value is not None:
# find indices of pixels that are outside the input pixel grid and
# set these pixels to the 'fill_value':
invalid = (((xi < 0) | (xi > self._nx - 1)) |
((yi < 0) | (yi > self._ny - 1)))
evaluated_model[invalid] = self._fill_value
return evaluated_model | [
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
",",
"use_oversampling",
"=",
"True",
")",
":",
"if",
"use_oversampling",
":",
"xi",
"=",
"self",
".",
"_oversampling",
"[",
"0",
"]",
"*",
"(",
"np",
".",
"asarray",
"(",
"x",
")",
"-",
"x_0",
")",
"yi",
"=",
"self",
".",
"_oversampling",
"[",
"1",
"]",
"*",
"(",
"np",
".",
"asarray",
"(",
"y",
")",
"-",
"y_0",
")",
"else",
":",
"xi",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"-",
"x_0",
"yi",
"=",
"np",
".",
"asarray",
"(",
"y",
")",
"-",
"y_0",
"xi",
"+=",
"self",
".",
"_x_origin",
"yi",
"+=",
"self",
".",
"_y_origin",
"f",
"=",
"flux",
"*",
"self",
".",
"_normalization_constant",
"evaluated_model",
"=",
"f",
"*",
"self",
".",
"interpolator",
".",
"ev",
"(",
"xi",
",",
"yi",
")",
"if",
"self",
".",
"_fill_value",
"is",
"not",
"None",
":",
"# find indices of pixels that are outside the input pixel grid and",
"# set these pixels to the 'fill_value':",
"invalid",
"=",
"(",
"(",
"(",
"xi",
"<",
"0",
")",
"|",
"(",
"xi",
">",
"self",
".",
"_nx",
"-",
"1",
")",
")",
"|",
"(",
"(",
"yi",
"<",
"0",
")",
"|",
"(",
"yi",
">",
"self",
".",
"_ny",
"-",
"1",
")",
")",
")",
"evaluated_model",
"[",
"invalid",
"]",
"=",
"self",
".",
"_fill_value",
"return",
"evaluated_model"
] | Evaluate the model on some input variables and provided model
parameters.
Parameters
----------
use_oversampling : bool, optional
Whether to use the oversampling factor to calculate the
model pixel indices. The default is `True`, which means the
input indices will be multipled by this factor. | [
"Evaluate",
"the",
"model",
"on",
"some",
"input",
"variables",
"and",
"provided",
"model",
"parameters",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L453-L486 | train |
astropy/photutils | photutils/psf/models.py | GriddedPSFModel._find_bounds_1d | def _find_bounds_1d(data, x):
"""
Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound.
"""
idx = np.searchsorted(data, x)
if idx == 0:
idx0 = 0
elif idx == len(data): # pragma: no cover
idx0 = idx - 2
else:
idx0 = idx - 1
return idx0 | python | def _find_bounds_1d(data, x):
"""
Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound.
"""
idx = np.searchsorted(data, x)
if idx == 0:
idx0 = 0
elif idx == len(data): # pragma: no cover
idx0 = idx - 2
else:
idx0 = idx - 1
return idx0 | [
"def",
"_find_bounds_1d",
"(",
"data",
",",
"x",
")",
":",
"idx",
"=",
"np",
".",
"searchsorted",
"(",
"data",
",",
"x",
")",
"if",
"idx",
"==",
"0",
":",
"idx0",
"=",
"0",
"elif",
"idx",
"==",
"len",
"(",
"data",
")",
":",
"# pragma: no cover",
"idx0",
"=",
"idx",
"-",
"2",
"else",
":",
"idx0",
"=",
"idx",
"-",
"1",
"return",
"idx0"
] | Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound. | [
"Find",
"the",
"index",
"of",
"the",
"lower",
"bound",
"where",
"x",
"should",
"be",
"inserted",
"into",
"a",
"to",
"maintain",
"order",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L582-L612 | train |
astropy/photutils | photutils/psf/models.py | GriddedPSFModel._bilinear_interp | def _bilinear_interp(xyref, zref, xi, yi):
"""
Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array.
"""
if len(xyref) != 4:
raise ValueError('xyref must contain only 4 (x, y) pairs')
if zref.shape[0] != 4:
raise ValueError('zref must have a length of 4 on the first '
'axis.')
xyref = [tuple(i) for i in xyref]
idx = sorted(range(len(xyref)), key=xyref.__getitem__)
xyref = sorted(xyref) # sort by x, then y
(x0, y0), (_x0, y1), (x1, _y0), (_x1, _y1) = xyref
if x0 != _x0 or x1 != _x1 or y0 != _y0 or y1 != _y1:
raise ValueError('The refxy points do not form a rectangle.')
if not np.isscalar(xi):
xi = xi[0]
if not np.isscalar(yi):
yi = yi[0]
if not x0 <= xi <= x1 or not y0 <= yi <= y1:
raise ValueError('The (x, y) input is not within the rectangle '
'defined by xyref.')
data = np.asarray(zref)[idx]
weights = np.array([(x1 - xi) * (y1 - yi), (x1 - xi) * (yi - y0),
(xi - x0) * (y1 - yi), (xi - x0) * (yi - y0)])
norm = (x1 - x0) * (y1 - y0)
return np.sum(data * weights[:, None, None], axis=0) / norm | python | def _bilinear_interp(xyref, zref, xi, yi):
"""
Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array.
"""
if len(xyref) != 4:
raise ValueError('xyref must contain only 4 (x, y) pairs')
if zref.shape[0] != 4:
raise ValueError('zref must have a length of 4 on the first '
'axis.')
xyref = [tuple(i) for i in xyref]
idx = sorted(range(len(xyref)), key=xyref.__getitem__)
xyref = sorted(xyref) # sort by x, then y
(x0, y0), (_x0, y1), (x1, _y0), (_x1, _y1) = xyref
if x0 != _x0 or x1 != _x1 or y0 != _y0 or y1 != _y1:
raise ValueError('The refxy points do not form a rectangle.')
if not np.isscalar(xi):
xi = xi[0]
if not np.isscalar(yi):
yi = yi[0]
if not x0 <= xi <= x1 or not y0 <= yi <= y1:
raise ValueError('The (x, y) input is not within the rectangle '
'defined by xyref.')
data = np.asarray(zref)[idx]
weights = np.array([(x1 - xi) * (y1 - yi), (x1 - xi) * (yi - y0),
(xi - x0) * (y1 - yi), (xi - x0) * (yi - y0)])
norm = (x1 - x0) * (y1 - y0)
return np.sum(data * weights[:, None, None], axis=0) / norm | [
"def",
"_bilinear_interp",
"(",
"xyref",
",",
"zref",
",",
"xi",
",",
"yi",
")",
":",
"if",
"len",
"(",
"xyref",
")",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"'xyref must contain only 4 (x, y) pairs'",
")",
"if",
"zref",
".",
"shape",
"[",
"0",
"]",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"'zref must have a length of 4 on the first '",
"'axis.'",
")",
"xyref",
"=",
"[",
"tuple",
"(",
"i",
")",
"for",
"i",
"in",
"xyref",
"]",
"idx",
"=",
"sorted",
"(",
"range",
"(",
"len",
"(",
"xyref",
")",
")",
",",
"key",
"=",
"xyref",
".",
"__getitem__",
")",
"xyref",
"=",
"sorted",
"(",
"xyref",
")",
"# sort by x, then y",
"(",
"x0",
",",
"y0",
")",
",",
"(",
"_x0",
",",
"y1",
")",
",",
"(",
"x1",
",",
"_y0",
")",
",",
"(",
"_x1",
",",
"_y1",
")",
"=",
"xyref",
"if",
"x0",
"!=",
"_x0",
"or",
"x1",
"!=",
"_x1",
"or",
"y0",
"!=",
"_y0",
"or",
"y1",
"!=",
"_y1",
":",
"raise",
"ValueError",
"(",
"'The refxy points do not form a rectangle.'",
")",
"if",
"not",
"np",
".",
"isscalar",
"(",
"xi",
")",
":",
"xi",
"=",
"xi",
"[",
"0",
"]",
"if",
"not",
"np",
".",
"isscalar",
"(",
"yi",
")",
":",
"yi",
"=",
"yi",
"[",
"0",
"]",
"if",
"not",
"x0",
"<=",
"xi",
"<=",
"x1",
"or",
"not",
"y0",
"<=",
"yi",
"<=",
"y1",
":",
"raise",
"ValueError",
"(",
"'The (x, y) input is not within the rectangle '",
"'defined by xyref.'",
")",
"data",
"=",
"np",
".",
"asarray",
"(",
"zref",
")",
"[",
"idx",
"]",
"weights",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"x1",
"-",
"xi",
")",
"*",
"(",
"y1",
"-",
"yi",
")",
",",
"(",
"x1",
"-",
"xi",
")",
"*",
"(",
"yi",
"-",
"y0",
")",
",",
"(",
"xi",
"-",
"x0",
")",
"*",
"(",
"y1",
"-",
"yi",
")",
",",
"(",
"xi",
"-",
"x0",
")",
"*",
"(",
"yi",
"-",
"y0",
")",
"]",
")",
"norm",
"=",
"(",
"x1",
"-",
"x0",
")",
"*",
"(",
"y1",
"-",
"y0",
")",
"return",
"np",
".",
"sum",
"(",
"data",
"*",
"weights",
"[",
":",
",",
"None",
",",
"None",
"]",
",",
"axis",
"=",
"0",
")",
"/",
"norm"
] | Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array. | [
"Perform",
"bilinear",
"interpolation",
"of",
"four",
"2D",
"arrays",
"located",
"at",
"points",
"on",
"a",
"regular",
"grid",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L651-L706 | train |
astropy/photutils | photutils/psf/models.py | GriddedPSFModel.evaluate | def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the `GriddedPSFModel` for the input parameters.
"""
# NOTE: this is needed because the PSF photometry routines input
# length-1 values instead of scalars. TODO: fix the photometry
# routines.
if not np.isscalar(x_0):
x_0 = x_0[0]
if not np.isscalar(y_0):
y_0 = y_0[0]
if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
y_0 < self._ygrid_min or y_0 > self._ygrid_max):
# position is outside of the grid, so simply use the
# closest reference PSF
self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
self._grid_ypos - y_0))[0]
self._psf_interp = self.data[self._ref_indices, :, :]
else:
# find the four bounding reference PSFs and interpolate
self._ref_indices = self._find_bounding_points(x_0, y_0)
xyref = np.array(self.grid_xypos)[self._ref_indices]
psfs = self.data[self._ref_indices, :, :]
self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)
# now evaluate the PSF at the (x_0, y_0) subpixel position on
# the input (x, y) values
psfmodel = FittableImageModel(self._psf_interp,
oversampling=self.oversampling)
return psfmodel.evaluate(x, y, flux, x_0, y_0) | python | def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the `GriddedPSFModel` for the input parameters.
"""
# NOTE: this is needed because the PSF photometry routines input
# length-1 values instead of scalars. TODO: fix the photometry
# routines.
if not np.isscalar(x_0):
x_0 = x_0[0]
if not np.isscalar(y_0):
y_0 = y_0[0]
if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
y_0 < self._ygrid_min or y_0 > self._ygrid_max):
# position is outside of the grid, so simply use the
# closest reference PSF
self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
self._grid_ypos - y_0))[0]
self._psf_interp = self.data[self._ref_indices, :, :]
else:
# find the four bounding reference PSFs and interpolate
self._ref_indices = self._find_bounding_points(x_0, y_0)
xyref = np.array(self.grid_xypos)[self._ref_indices]
psfs = self.data[self._ref_indices, :, :]
self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)
# now evaluate the PSF at the (x_0, y_0) subpixel position on
# the input (x, y) values
psfmodel = FittableImageModel(self._psf_interp,
oversampling=self.oversampling)
return psfmodel.evaluate(x, y, flux, x_0, y_0) | [
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")",
":",
"# NOTE: this is needed because the PSF photometry routines input",
"# length-1 values instead of scalars. TODO: fix the photometry",
"# routines.",
"if",
"not",
"np",
".",
"isscalar",
"(",
"x_0",
")",
":",
"x_0",
"=",
"x_0",
"[",
"0",
"]",
"if",
"not",
"np",
".",
"isscalar",
"(",
"y_0",
")",
":",
"y_0",
"=",
"y_0",
"[",
"0",
"]",
"if",
"(",
"x_0",
"<",
"self",
".",
"_xgrid_min",
"or",
"x_0",
">",
"self",
".",
"_xgrid_max",
"or",
"y_0",
"<",
"self",
".",
"_ygrid_min",
"or",
"y_0",
">",
"self",
".",
"_ygrid_max",
")",
":",
"# position is outside of the grid, so simply use the",
"# closest reference PSF",
"self",
".",
"_ref_indices",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"hypot",
"(",
"self",
".",
"_grid_xpos",
"-",
"x_0",
",",
"self",
".",
"_grid_ypos",
"-",
"y_0",
")",
")",
"[",
"0",
"]",
"self",
".",
"_psf_interp",
"=",
"self",
".",
"data",
"[",
"self",
".",
"_ref_indices",
",",
":",
",",
":",
"]",
"else",
":",
"# find the four bounding reference PSFs and interpolate",
"self",
".",
"_ref_indices",
"=",
"self",
".",
"_find_bounding_points",
"(",
"x_0",
",",
"y_0",
")",
"xyref",
"=",
"np",
".",
"array",
"(",
"self",
".",
"grid_xypos",
")",
"[",
"self",
".",
"_ref_indices",
"]",
"psfs",
"=",
"self",
".",
"data",
"[",
"self",
".",
"_ref_indices",
",",
":",
",",
":",
"]",
"self",
".",
"_psf_interp",
"=",
"self",
".",
"_bilinear_interp",
"(",
"xyref",
",",
"psfs",
",",
"x_0",
",",
"y_0",
")",
"# now evaluate the PSF at the (x_0, y_0) subpixel position on",
"# the input (x, y) values",
"psfmodel",
"=",
"FittableImageModel",
"(",
"self",
".",
"_psf_interp",
",",
"oversampling",
"=",
"self",
".",
"oversampling",
")",
"return",
"psfmodel",
".",
"evaluate",
"(",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")"
] | Evaluate the `GriddedPSFModel` for the input parameters. | [
"Evaluate",
"the",
"GriddedPSFModel",
"for",
"the",
"input",
"parameters",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L708-L742 | train |
astropy/photutils | photutils/psf/models.py | IntegratedGaussianPRF.evaluate | def evaluate(self, x, y, flux, x_0, y_0, sigma):
"""Model function Gaussian PSF model."""
return (flux / 4 *
((self._erf((x - x_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((x - x_0 - 0.5) / (np.sqrt(2) * sigma))) *
(self._erf((y - y_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((y - y_0 - 0.5) / (np.sqrt(2) * sigma))))) | python | def evaluate(self, x, y, flux, x_0, y_0, sigma):
"""Model function Gaussian PSF model."""
return (flux / 4 *
((self._erf((x - x_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((x - x_0 - 0.5) / (np.sqrt(2) * sigma))) *
(self._erf((y - y_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((y - y_0 - 0.5) / (np.sqrt(2) * sigma))))) | [
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
",",
"sigma",
")",
":",
"return",
"(",
"flux",
"/",
"4",
"*",
"(",
"(",
"self",
".",
"_erf",
"(",
"(",
"x",
"-",
"x_0",
"+",
"0.5",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"sigma",
")",
")",
"-",
"self",
".",
"_erf",
"(",
"(",
"x",
"-",
"x_0",
"-",
"0.5",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"sigma",
")",
")",
")",
"*",
"(",
"self",
".",
"_erf",
"(",
"(",
"y",
"-",
"y_0",
"+",
"0.5",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"sigma",
")",
")",
"-",
"self",
".",
"_erf",
"(",
"(",
"y",
"-",
"y_0",
"-",
"0.5",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"sigma",
")",
")",
")",
")",
")"
] | Model function Gaussian PSF model. | [
"Model",
"function",
"Gaussian",
"PSF",
"model",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L819-L826 | train |
astropy/photutils | photutils/psf/models.py | PRFAdapter.evaluate | def evaluate(self, x, y, flux, x_0, y_0):
"""The evaluation function for PRFAdapter."""
if self.xname is None:
dx = x - x_0
else:
dx = x
setattr(self.psfmodel, self.xname, x_0)
if self.xname is None:
dy = y - y_0
else:
dy = y
setattr(self.psfmodel, self.yname, y_0)
if self.fluxname is None:
return (flux * self._psf_scale_factor *
self._integrated_psfmodel(dx, dy))
else:
setattr(self.psfmodel, self.yname, flux * self._psf_scale_factor)
return self._integrated_psfmodel(dx, dy) | python | def evaluate(self, x, y, flux, x_0, y_0):
"""The evaluation function for PRFAdapter."""
if self.xname is None:
dx = x - x_0
else:
dx = x
setattr(self.psfmodel, self.xname, x_0)
if self.xname is None:
dy = y - y_0
else:
dy = y
setattr(self.psfmodel, self.yname, y_0)
if self.fluxname is None:
return (flux * self._psf_scale_factor *
self._integrated_psfmodel(dx, dy))
else:
setattr(self.psfmodel, self.yname, flux * self._psf_scale_factor)
return self._integrated_psfmodel(dx, dy) | [
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")",
":",
"if",
"self",
".",
"xname",
"is",
"None",
":",
"dx",
"=",
"x",
"-",
"x_0",
"else",
":",
"dx",
"=",
"x",
"setattr",
"(",
"self",
".",
"psfmodel",
",",
"self",
".",
"xname",
",",
"x_0",
")",
"if",
"self",
".",
"xname",
"is",
"None",
":",
"dy",
"=",
"y",
"-",
"y_0",
"else",
":",
"dy",
"=",
"y",
"setattr",
"(",
"self",
".",
"psfmodel",
",",
"self",
".",
"yname",
",",
"y_0",
")",
"if",
"self",
".",
"fluxname",
"is",
"None",
":",
"return",
"(",
"flux",
"*",
"self",
".",
"_psf_scale_factor",
"*",
"self",
".",
"_integrated_psfmodel",
"(",
"dx",
",",
"dy",
")",
")",
"else",
":",
"setattr",
"(",
"self",
".",
"psfmodel",
",",
"self",
".",
"yname",
",",
"flux",
"*",
"self",
".",
"_psf_scale_factor",
")",
"return",
"self",
".",
"_integrated_psfmodel",
"(",
"dx",
",",
"dy",
")"
] | The evaluation function for PRFAdapter. | [
"The",
"evaluation",
"function",
"for",
"PRFAdapter",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L895-L915 | train |
astropy/photutils | photutils/isophote/isophote.py | _isophote_list_to_table | def _isophote_list_to_table(isophote_list):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
"""
properties = OrderedDict()
properties['sma'] = 'sma'
properties['intens'] = 'intens'
properties['int_err'] = 'intens_err'
properties['eps'] = 'ellipticity'
properties['ellip_err'] = 'ellipticity_err'
properties['pa'] = 'pa'
properties['pa_err'] = 'pa_err'
properties['grad_r_error'] = 'grad_rerr'
properties['ndata'] = 'ndata'
properties['nflag'] = 'flag'
properties['niter'] = 'niter'
properties['stop_code'] = 'stop_code'
isotable = QTable()
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable | python | def _isophote_list_to_table(isophote_list):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
"""
properties = OrderedDict()
properties['sma'] = 'sma'
properties['intens'] = 'intens'
properties['int_err'] = 'intens_err'
properties['eps'] = 'ellipticity'
properties['ellip_err'] = 'ellipticity_err'
properties['pa'] = 'pa'
properties['pa_err'] = 'pa_err'
properties['grad_r_error'] = 'grad_rerr'
properties['ndata'] = 'ndata'
properties['nflag'] = 'flag'
properties['niter'] = 'niter'
properties['stop_code'] = 'stop_code'
isotable = QTable()
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable | [
"def",
"_isophote_list_to_table",
"(",
"isophote_list",
")",
":",
"properties",
"=",
"OrderedDict",
"(",
")",
"properties",
"[",
"'sma'",
"]",
"=",
"'sma'",
"properties",
"[",
"'intens'",
"]",
"=",
"'intens'",
"properties",
"[",
"'int_err'",
"]",
"=",
"'intens_err'",
"properties",
"[",
"'eps'",
"]",
"=",
"'ellipticity'",
"properties",
"[",
"'ellip_err'",
"]",
"=",
"'ellipticity_err'",
"properties",
"[",
"'pa'",
"]",
"=",
"'pa'",
"properties",
"[",
"'pa_err'",
"]",
"=",
"'pa_err'",
"properties",
"[",
"'grad_r_error'",
"]",
"=",
"'grad_rerr'",
"properties",
"[",
"'ndata'",
"]",
"=",
"'ndata'",
"properties",
"[",
"'nflag'",
"]",
"=",
"'flag'",
"properties",
"[",
"'niter'",
"]",
"=",
"'niter'",
"properties",
"[",
"'stop_code'",
"]",
"=",
"'stop_code'",
"isotable",
"=",
"QTable",
"(",
")",
"for",
"k",
",",
"v",
"in",
"properties",
".",
"items",
"(",
")",
":",
"isotable",
"[",
"v",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"getattr",
"(",
"iso",
",",
"k",
")",
"for",
"iso",
"in",
"isophote_list",
"]",
")",
"if",
"k",
"in",
"(",
"'pa'",
",",
"'pa_err'",
")",
":",
"isotable",
"[",
"v",
"]",
"=",
"isotable",
"[",
"v",
"]",
"*",
"180.",
"/",
"np",
".",
"pi",
"*",
"u",
".",
"deg",
"return",
"isotable"
] | Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters. | [
"Convert",
"an",
"~photutils",
".",
"isophote",
".",
"IsophoteList",
"instance",
"to",
"a",
"~astropy",
".",
"table",
".",
"QTable",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L730-L768 | train |
astropy/photutils | photutils/isophote/isophote.py | Isophote._compute_fluxes | def _compute_fluxes(self):
"""
Compute integrated flux inside ellipse, as well as inside a
circle defined with the same semimajor axis.
Pixels in a square section enclosing circle are scanned; the
distance of each pixel to the isophote center is compared both
with the semimajor axis length and with the length of the
ellipse radius vector, and integrals are updated if the pixel
distance is smaller.
"""
# Compute limits of square array that encloses circle.
sma = self.sample.geometry.sma
x0 = self.sample.geometry.x0
y0 = self.sample.geometry.y0
xsize = self.sample.image.shape[1]
ysize = self.sample.image.shape[0]
imin = max(0, int(x0 - sma - 0.5) - 1)
jmin = max(0, int(y0 - sma - 0.5) - 1)
imax = min(xsize, int(x0 + sma + 0.5) + 1)
jmax = min(ysize, int(y0 + sma + 0.5) + 1)
# Integrate
if (jmax-jmin > 1) and (imax-imin) > 1:
y, x = np.mgrid[jmin:jmax, imin:imax]
radius, angle = self.sample.geometry.to_polar(x, y)
radius_e = self.sample.geometry.radius(angle)
midx = (radius <= sma)
values = self.sample.image[y[midx], x[midx]]
tflux_c = np.ma.sum(values)
npix_c = np.ma.count(values)
midx2 = (radius <= radius_e)
values = self.sample.image[y[midx2], x[midx2]]
tflux_e = np.ma.sum(values)
npix_e = np.ma.count(values)
else:
tflux_e = 0.
tflux_c = 0.
npix_e = 0
npix_c = 0
return tflux_e, tflux_c, npix_e, npix_c | python | def _compute_fluxes(self):
"""
Compute integrated flux inside ellipse, as well as inside a
circle defined with the same semimajor axis.
Pixels in a square section enclosing circle are scanned; the
distance of each pixel to the isophote center is compared both
with the semimajor axis length and with the length of the
ellipse radius vector, and integrals are updated if the pixel
distance is smaller.
"""
# Compute limits of square array that encloses circle.
sma = self.sample.geometry.sma
x0 = self.sample.geometry.x0
y0 = self.sample.geometry.y0
xsize = self.sample.image.shape[1]
ysize = self.sample.image.shape[0]
imin = max(0, int(x0 - sma - 0.5) - 1)
jmin = max(0, int(y0 - sma - 0.5) - 1)
imax = min(xsize, int(x0 + sma + 0.5) + 1)
jmax = min(ysize, int(y0 + sma + 0.5) + 1)
# Integrate
if (jmax-jmin > 1) and (imax-imin) > 1:
y, x = np.mgrid[jmin:jmax, imin:imax]
radius, angle = self.sample.geometry.to_polar(x, y)
radius_e = self.sample.geometry.radius(angle)
midx = (radius <= sma)
values = self.sample.image[y[midx], x[midx]]
tflux_c = np.ma.sum(values)
npix_c = np.ma.count(values)
midx2 = (radius <= radius_e)
values = self.sample.image[y[midx2], x[midx2]]
tflux_e = np.ma.sum(values)
npix_e = np.ma.count(values)
else:
tflux_e = 0.
tflux_c = 0.
npix_e = 0
npix_c = 0
return tflux_e, tflux_c, npix_e, npix_c | [
"def",
"_compute_fluxes",
"(",
"self",
")",
":",
"# Compute limits of square array that encloses circle.",
"sma",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"sma",
"x0",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"x0",
"y0",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"y0",
"xsize",
"=",
"self",
".",
"sample",
".",
"image",
".",
"shape",
"[",
"1",
"]",
"ysize",
"=",
"self",
".",
"sample",
".",
"image",
".",
"shape",
"[",
"0",
"]",
"imin",
"=",
"max",
"(",
"0",
",",
"int",
"(",
"x0",
"-",
"sma",
"-",
"0.5",
")",
"-",
"1",
")",
"jmin",
"=",
"max",
"(",
"0",
",",
"int",
"(",
"y0",
"-",
"sma",
"-",
"0.5",
")",
"-",
"1",
")",
"imax",
"=",
"min",
"(",
"xsize",
",",
"int",
"(",
"x0",
"+",
"sma",
"+",
"0.5",
")",
"+",
"1",
")",
"jmax",
"=",
"min",
"(",
"ysize",
",",
"int",
"(",
"y0",
"+",
"sma",
"+",
"0.5",
")",
"+",
"1",
")",
"# Integrate",
"if",
"(",
"jmax",
"-",
"jmin",
">",
"1",
")",
"and",
"(",
"imax",
"-",
"imin",
")",
">",
"1",
":",
"y",
",",
"x",
"=",
"np",
".",
"mgrid",
"[",
"jmin",
":",
"jmax",
",",
"imin",
":",
"imax",
"]",
"radius",
",",
"angle",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"to_polar",
"(",
"x",
",",
"y",
")",
"radius_e",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"radius",
"(",
"angle",
")",
"midx",
"=",
"(",
"radius",
"<=",
"sma",
")",
"values",
"=",
"self",
".",
"sample",
".",
"image",
"[",
"y",
"[",
"midx",
"]",
",",
"x",
"[",
"midx",
"]",
"]",
"tflux_c",
"=",
"np",
".",
"ma",
".",
"sum",
"(",
"values",
")",
"npix_c",
"=",
"np",
".",
"ma",
".",
"count",
"(",
"values",
")",
"midx2",
"=",
"(",
"radius",
"<=",
"radius_e",
")",
"values",
"=",
"self",
".",
"sample",
".",
"image",
"[",
"y",
"[",
"midx2",
"]",
",",
"x",
"[",
"midx2",
"]",
"]",
"tflux_e",
"=",
"np",
".",
"ma",
".",
"sum",
"(",
"values",
")",
"npix_e",
"=",
"np",
".",
"ma",
".",
"count",
"(",
"values",
")",
"else",
":",
"tflux_e",
"=",
"0.",
"tflux_c",
"=",
"0.",
"npix_e",
"=",
"0",
"npix_c",
"=",
"0",
"return",
"tflux_e",
",",
"tflux_c",
",",
"npix_e",
",",
"npix_c"
] | Compute integrated flux inside ellipse, as well as inside a
circle defined with the same semimajor axis.
Pixels in a square section enclosing circle are scanned; the
distance of each pixel to the isophote center is compared both
with the semimajor axis length and with the length of the
ellipse radius vector, and integrals are updated if the pixel
distance is smaller. | [
"Compute",
"integrated",
"flux",
"inside",
"ellipse",
"as",
"well",
"as",
"inside",
"a",
"circle",
"defined",
"with",
"the",
"same",
"semimajor",
"axis",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L176-L221 | train |
astropy/photutils | photutils/isophote/isophote.py | Isophote._compute_deviations | def _compute_deviations(self, sample, n):
"""
Compute deviations from a perfect ellipse, based on the
amplitudes and errors for harmonic "n". Note that we first
subtract the first and second harmonics from the raw data.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual = self.sample.values[2] - model
c = fit_upper_harmonic(residual, sample.values[2], n)
covariance = c[1]
ce = np.diagonal(covariance)
c = c[0]
a = c[1] / self.sma / sample.gradient
b = c[2] / self.sma / sample.gradient
# this comes from the old code. Likely it was based on
# empirical experience with the STSDAS task, so we leave
# it here without too much thought.
gre = self.grad_r_error if self.grad_r_error is not None else 0.64
a_err = abs(a) * np.sqrt((ce[1] / c[1])**2 + gre**2)
b_err = abs(b) * np.sqrt((ce[2] / c[2])**2 + gre**2)
except Exception: # we want to catch everything
a = b = a_err = b_err = None
return a, b, a_err, b_err | python | def _compute_deviations(self, sample, n):
"""
Compute deviations from a perfect ellipse, based on the
amplitudes and errors for harmonic "n". Note that we first
subtract the first and second harmonics from the raw data.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual = self.sample.values[2] - model
c = fit_upper_harmonic(residual, sample.values[2], n)
covariance = c[1]
ce = np.diagonal(covariance)
c = c[0]
a = c[1] / self.sma / sample.gradient
b = c[2] / self.sma / sample.gradient
# this comes from the old code. Likely it was based on
# empirical experience with the STSDAS task, so we leave
# it here without too much thought.
gre = self.grad_r_error if self.grad_r_error is not None else 0.64
a_err = abs(a) * np.sqrt((ce[1] / c[1])**2 + gre**2)
b_err = abs(b) * np.sqrt((ce[2] / c[2])**2 + gre**2)
except Exception: # we want to catch everything
a = b = a_err = b_err = None
return a, b, a_err, b_err | [
"def",
"_compute_deviations",
"(",
"self",
",",
"sample",
",",
"n",
")",
":",
"try",
":",
"coeffs",
"=",
"fit_first_and_second_harmonics",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"0",
"]",
",",
"self",
".",
"sample",
".",
"values",
"[",
"2",
"]",
")",
"coeffs",
"=",
"coeffs",
"[",
"0",
"]",
"model",
"=",
"first_and_second_harmonic_function",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"0",
"]",
",",
"coeffs",
")",
"residual",
"=",
"self",
".",
"sample",
".",
"values",
"[",
"2",
"]",
"-",
"model",
"c",
"=",
"fit_upper_harmonic",
"(",
"residual",
",",
"sample",
".",
"values",
"[",
"2",
"]",
",",
"n",
")",
"covariance",
"=",
"c",
"[",
"1",
"]",
"ce",
"=",
"np",
".",
"diagonal",
"(",
"covariance",
")",
"c",
"=",
"c",
"[",
"0",
"]",
"a",
"=",
"c",
"[",
"1",
"]",
"/",
"self",
".",
"sma",
"/",
"sample",
".",
"gradient",
"b",
"=",
"c",
"[",
"2",
"]",
"/",
"self",
".",
"sma",
"/",
"sample",
".",
"gradient",
"# this comes from the old code. Likely it was based on",
"# empirical experience with the STSDAS task, so we leave",
"# it here without too much thought.",
"gre",
"=",
"self",
".",
"grad_r_error",
"if",
"self",
".",
"grad_r_error",
"is",
"not",
"None",
"else",
"0.64",
"a_err",
"=",
"abs",
"(",
"a",
")",
"*",
"np",
".",
"sqrt",
"(",
"(",
"ce",
"[",
"1",
"]",
"/",
"c",
"[",
"1",
"]",
")",
"**",
"2",
"+",
"gre",
"**",
"2",
")",
"b_err",
"=",
"abs",
"(",
"b",
")",
"*",
"np",
".",
"sqrt",
"(",
"(",
"ce",
"[",
"2",
"]",
"/",
"c",
"[",
"2",
"]",
")",
"**",
"2",
"+",
"gre",
"**",
"2",
")",
"except",
"Exception",
":",
"# we want to catch everything",
"a",
"=",
"b",
"=",
"a_err",
"=",
"b_err",
"=",
"None",
"return",
"a",
",",
"b",
",",
"a_err",
",",
"b_err"
] | Compute deviations from a perfect ellipse, based on the
amplitudes and errors for harmonic "n". Note that we first
subtract the first and second harmonics from the raw data. | [
"Compute",
"deviations",
"from",
"a",
"perfect",
"ellipse",
"based",
"on",
"the",
"amplitudes",
"and",
"errors",
"for",
"harmonic",
"n",
".",
"Note",
"that",
"we",
"first",
"subtract",
"the",
"first",
"and",
"second",
"harmonics",
"from",
"the",
"raw",
"data",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L223-L257 | train |
astropy/photutils | photutils/isophote/isophote.py | Isophote._compute_errors | def _compute_errors(self):
"""
Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
covariance = coeffs[1]
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual_rms = np.std(self.sample.values[2] - model)
errors = np.diagonal(covariance) * residual_rms
eps = self.sample.geometry.eps
pa = self.sample.geometry.pa
# parameter errors result from direct projection of
# coefficient errors. These showed to be the error estimators
# that best convey the errors measured in Monte Carlo
# experiments (see Busko 1996; ASPC 101, 139).
ea = abs(errors[2] / self.grad)
eb = abs(errors[1] * (1. - eps) / self.grad)
self.x0_err = np.sqrt((ea * np.cos(pa))**2 + (eb * np.sin(pa))**2)
self.y0_err = np.sqrt((ea * np.sin(pa))**2 + (eb * np.cos(pa))**2)
self.ellip_err = (abs(2. * errors[4] * (1. - eps) / self.sma /
self.grad))
if (abs(eps) > np.finfo(float).resolution):
self.pa_err = (abs(2. * errors[3] * (1. - eps) / self.sma /
self.grad / (1. - (1. - eps)**2)))
else:
self.pa_err = 0.
except Exception: # we want to catch everything
self.x0_err = self.y0_err = self.pa_err = self.ellip_err = 0. | python | def _compute_errors(self):
"""
Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
covariance = coeffs[1]
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual_rms = np.std(self.sample.values[2] - model)
errors = np.diagonal(covariance) * residual_rms
eps = self.sample.geometry.eps
pa = self.sample.geometry.pa
# parameter errors result from direct projection of
# coefficient errors. These showed to be the error estimators
# that best convey the errors measured in Monte Carlo
# experiments (see Busko 1996; ASPC 101, 139).
ea = abs(errors[2] / self.grad)
eb = abs(errors[1] * (1. - eps) / self.grad)
self.x0_err = np.sqrt((ea * np.cos(pa))**2 + (eb * np.sin(pa))**2)
self.y0_err = np.sqrt((ea * np.sin(pa))**2 + (eb * np.cos(pa))**2)
self.ellip_err = (abs(2. * errors[4] * (1. - eps) / self.sma /
self.grad))
if (abs(eps) > np.finfo(float).resolution):
self.pa_err = (abs(2. * errors[3] * (1. - eps) / self.sma /
self.grad / (1. - (1. - eps)**2)))
else:
self.pa_err = 0.
except Exception: # we want to catch everything
self.x0_err = self.y0_err = self.pa_err = self.ellip_err = 0. | [
"def",
"_compute_errors",
"(",
"self",
")",
":",
"try",
":",
"coeffs",
"=",
"fit_first_and_second_harmonics",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"0",
"]",
",",
"self",
".",
"sample",
".",
"values",
"[",
"2",
"]",
")",
"covariance",
"=",
"coeffs",
"[",
"1",
"]",
"coeffs",
"=",
"coeffs",
"[",
"0",
"]",
"model",
"=",
"first_and_second_harmonic_function",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"0",
"]",
",",
"coeffs",
")",
"residual_rms",
"=",
"np",
".",
"std",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"2",
"]",
"-",
"model",
")",
"errors",
"=",
"np",
".",
"diagonal",
"(",
"covariance",
")",
"*",
"residual_rms",
"eps",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"eps",
"pa",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"pa",
"# parameter errors result from direct projection of",
"# coefficient errors. These showed to be the error estimators",
"# that best convey the errors measured in Monte Carlo",
"# experiments (see Busko 1996; ASPC 101, 139).",
"ea",
"=",
"abs",
"(",
"errors",
"[",
"2",
"]",
"/",
"self",
".",
"grad",
")",
"eb",
"=",
"abs",
"(",
"errors",
"[",
"1",
"]",
"*",
"(",
"1.",
"-",
"eps",
")",
"/",
"self",
".",
"grad",
")",
"self",
".",
"x0_err",
"=",
"np",
".",
"sqrt",
"(",
"(",
"ea",
"*",
"np",
".",
"cos",
"(",
"pa",
")",
")",
"**",
"2",
"+",
"(",
"eb",
"*",
"np",
".",
"sin",
"(",
"pa",
")",
")",
"**",
"2",
")",
"self",
".",
"y0_err",
"=",
"np",
".",
"sqrt",
"(",
"(",
"ea",
"*",
"np",
".",
"sin",
"(",
"pa",
")",
")",
"**",
"2",
"+",
"(",
"eb",
"*",
"np",
".",
"cos",
"(",
"pa",
")",
")",
"**",
"2",
")",
"self",
".",
"ellip_err",
"=",
"(",
"abs",
"(",
"2.",
"*",
"errors",
"[",
"4",
"]",
"*",
"(",
"1.",
"-",
"eps",
")",
"/",
"self",
".",
"sma",
"/",
"self",
".",
"grad",
")",
")",
"if",
"(",
"abs",
"(",
"eps",
")",
">",
"np",
".",
"finfo",
"(",
"float",
")",
".",
"resolution",
")",
":",
"self",
".",
"pa_err",
"=",
"(",
"abs",
"(",
"2.",
"*",
"errors",
"[",
"3",
"]",
"*",
"(",
"1.",
"-",
"eps",
")",
"/",
"self",
".",
"sma",
"/",
"self",
".",
"grad",
"/",
"(",
"1.",
"-",
"(",
"1.",
"-",
"eps",
")",
"**",
"2",
")",
")",
")",
"else",
":",
"self",
".",
"pa_err",
"=",
"0.",
"except",
"Exception",
":",
"# we want to catch everything",
"self",
".",
"x0_err",
"=",
"self",
".",
"y0_err",
"=",
"self",
".",
"pa_err",
"=",
"self",
".",
"ellip_err",
"=",
"0."
] | Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2. | [
"Compute",
"parameter",
"errors",
"based",
"on",
"the",
"diagonal",
"of",
"the",
"covariance",
"matrix",
"of",
"the",
"four",
"harmonic",
"coefficients",
"for",
"harmonics",
"n",
"=",
"1",
"and",
"n",
"=",
"2",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L259-L295 | train |
astropy/photutils | photutils/isophote/isophote.py | Isophote.fix_geometry | def fix_geometry(self, isophote):
"""
Fix the geometry of a problematic isophote to be identical to
the input isophote.
This method should be called when the fitting goes berserk and
delivers an isophote with bad geometry, such as ellipticity > 1
or another meaningless situation. This is not a problem in
itself when fitting any given isophote, but will create an error
when the affected isophote is used as starting guess for the
next fit.
Parameters
----------
isophote : `~photutils.isophote.Isophote` instance
The isophote from which to take the geometry information.
"""
self.sample.geometry.eps = isophote.sample.geometry.eps
self.sample.geometry.pa = isophote.sample.geometry.pa
self.sample.geometry.x0 = isophote.sample.geometry.x0
self.sample.geometry.y0 = isophote.sample.geometry.y0 | python | def fix_geometry(self, isophote):
"""
Fix the geometry of a problematic isophote to be identical to
the input isophote.
This method should be called when the fitting goes berserk and
delivers an isophote with bad geometry, such as ellipticity > 1
or another meaningless situation. This is not a problem in
itself when fitting any given isophote, but will create an error
when the affected isophote is used as starting guess for the
next fit.
Parameters
----------
isophote : `~photutils.isophote.Isophote` instance
The isophote from which to take the geometry information.
"""
self.sample.geometry.eps = isophote.sample.geometry.eps
self.sample.geometry.pa = isophote.sample.geometry.pa
self.sample.geometry.x0 = isophote.sample.geometry.x0
self.sample.geometry.y0 = isophote.sample.geometry.y0 | [
"def",
"fix_geometry",
"(",
"self",
",",
"isophote",
")",
":",
"self",
".",
"sample",
".",
"geometry",
".",
"eps",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"eps",
"self",
".",
"sample",
".",
"geometry",
".",
"pa",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"pa",
"self",
".",
"sample",
".",
"geometry",
".",
"x0",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"x0",
"self",
".",
"sample",
".",
"geometry",
".",
"y0",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"y0"
] | Fix the geometry of a problematic isophote to be identical to
the input isophote.
This method should be called when the fitting goes berserk and
delivers an isophote with bad geometry, such as ellipticity > 1
or another meaningless situation. This is not a problem in
itself when fitting any given isophote, but will create an error
when the affected isophote is used as starting guess for the
next fit.
Parameters
----------
isophote : `~photutils.isophote.Isophote` instance
The isophote from which to take the geometry information. | [
"Fix",
"the",
"geometry",
"of",
"a",
"problematic",
"isophote",
"to",
"be",
"identical",
"to",
"the",
"input",
"isophote",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L297-L318 | train |
astropy/photutils | photutils/isophote/isophote.py | IsophoteList.get_closest | def get_closest(self, sma):
"""
Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value.
"""
index = (np.abs(self.sma - sma)).argmin()
return self._list[index] | python | def get_closest(self, sma):
"""
Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value.
"""
index = (np.abs(self.sma - sma)).argmin()
return self._list[index] | [
"def",
"get_closest",
"(",
"self",
",",
"sma",
")",
":",
"index",
"=",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"sma",
"-",
"sma",
")",
")",
".",
"argmin",
"(",
")",
"return",
"self",
".",
"_list",
"[",
"index",
"]"
] | Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value. | [
"Return",
"the",
"~photutils",
".",
"isophote",
".",
"Isophote",
"instance",
"that",
"has",
"the",
"closest",
"semimajor",
"axis",
"length",
"to",
"the",
"input",
"semimajor",
"axis",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L468-L485 | train |
astropy/photutils | photutils/utils/interpolation.py | interpolate_masked_data | def interpolate_masked_data(data, mask, error=None, background=None):
"""
Interpolate over masked pixels in data and optional error or
background images.
The value of masked pixels are replaced by the mean value of the
connected neighboring non-masked pixels. This function is intended
for single, isolated masked pixels (e.g. hot/warm pixels).
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
mask : array_like (bool)
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``.
background : array_like, or `~astropy.units.Quantity`, optional
The pixel-wise background level of the input ``data``.
``background`` must have the same shape as ``data``.
Returns
-------
data : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``data`` with interpolated masked pixels.
error : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``error`` with interpolated masked pixels. `None` if
input ``error`` is not input.
background : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``background`` with interpolated masked pixels. `None` if
input ``background`` is not input.
"""
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape')
data_out = np.copy(data) # do not alter input data
mask_idx = mask.nonzero()
if mask_idx[0].size == 0:
raise ValueError('All items in data are masked')
for x in zip(*mask_idx):
X = np.array([[max(x[i] - 1, 0), min(x[i] + 1, data.shape[i] - 1)]
for i in range(len(data.shape))])
goodpix = ~mask[X]
if not np.any(goodpix):
warnings.warn('The masked pixel at "{}" is completely '
'surrounded by (connected) masked pixels, '
'thus unable to interpolate'.format(x,),
AstropyUserWarning)
continue
data_out[x] = np.mean(data[X][goodpix])
if background is not None:
if background.shape != data.shape:
raise ValueError('background and data must have the same '
'shape')
background_out = np.copy(background)
background_out[x] = np.mean(background[X][goodpix])
else:
background_out = None
if error is not None:
if error.shape != data.shape:
raise ValueError('error and data must have the same '
'shape')
error_out = np.copy(error)
error_out[x] = np.sqrt(np.mean(error[X][goodpix]**2))
else:
error_out = None
return data_out, error_out, background_out | python | def interpolate_masked_data(data, mask, error=None, background=None):
"""
Interpolate over masked pixels in data and optional error or
background images.
The value of masked pixels are replaced by the mean value of the
connected neighboring non-masked pixels. This function is intended
for single, isolated masked pixels (e.g. hot/warm pixels).
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
mask : array_like (bool)
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``.
background : array_like, or `~astropy.units.Quantity`, optional
The pixel-wise background level of the input ``data``.
``background`` must have the same shape as ``data``.
Returns
-------
data : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``data`` with interpolated masked pixels.
error : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``error`` with interpolated masked pixels. `None` if
input ``error`` is not input.
background : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``background`` with interpolated masked pixels. `None` if
input ``background`` is not input.
"""
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape')
data_out = np.copy(data) # do not alter input data
mask_idx = mask.nonzero()
if mask_idx[0].size == 0:
raise ValueError('All items in data are masked')
for x in zip(*mask_idx):
X = np.array([[max(x[i] - 1, 0), min(x[i] + 1, data.shape[i] - 1)]
for i in range(len(data.shape))])
goodpix = ~mask[X]
if not np.any(goodpix):
warnings.warn('The masked pixel at "{}" is completely '
'surrounded by (connected) masked pixels, '
'thus unable to interpolate'.format(x,),
AstropyUserWarning)
continue
data_out[x] = np.mean(data[X][goodpix])
if background is not None:
if background.shape != data.shape:
raise ValueError('background and data must have the same '
'shape')
background_out = np.copy(background)
background_out[x] = np.mean(background[X][goodpix])
else:
background_out = None
if error is not None:
if error.shape != data.shape:
raise ValueError('error and data must have the same '
'shape')
error_out = np.copy(error)
error_out[x] = np.sqrt(np.mean(error[X][goodpix]**2))
else:
error_out = None
return data_out, error_out, background_out | [
"def",
"interpolate_masked_data",
"(",
"data",
",",
"mask",
",",
"error",
"=",
"None",
",",
"background",
"=",
"None",
")",
":",
"if",
"data",
".",
"shape",
"!=",
"mask",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and mask must have the same shape'",
")",
"data_out",
"=",
"np",
".",
"copy",
"(",
"data",
")",
"# do not alter input data",
"mask_idx",
"=",
"mask",
".",
"nonzero",
"(",
")",
"if",
"mask_idx",
"[",
"0",
"]",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'All items in data are masked'",
")",
"for",
"x",
"in",
"zip",
"(",
"*",
"mask_idx",
")",
":",
"X",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"max",
"(",
"x",
"[",
"i",
"]",
"-",
"1",
",",
"0",
")",
",",
"min",
"(",
"x",
"[",
"i",
"]",
"+",
"1",
",",
"data",
".",
"shape",
"[",
"i",
"]",
"-",
"1",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data",
".",
"shape",
")",
")",
"]",
")",
"goodpix",
"=",
"~",
"mask",
"[",
"X",
"]",
"if",
"not",
"np",
".",
"any",
"(",
"goodpix",
")",
":",
"warnings",
".",
"warn",
"(",
"'The masked pixel at \"{}\" is completely '",
"'surrounded by (connected) masked pixels, '",
"'thus unable to interpolate'",
".",
"format",
"(",
"x",
",",
")",
",",
"AstropyUserWarning",
")",
"continue",
"data_out",
"[",
"x",
"]",
"=",
"np",
".",
"mean",
"(",
"data",
"[",
"X",
"]",
"[",
"goodpix",
"]",
")",
"if",
"background",
"is",
"not",
"None",
":",
"if",
"background",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'background and data must have the same '",
"'shape'",
")",
"background_out",
"=",
"np",
".",
"copy",
"(",
"background",
")",
"background_out",
"[",
"x",
"]",
"=",
"np",
".",
"mean",
"(",
"background",
"[",
"X",
"]",
"[",
"goodpix",
"]",
")",
"else",
":",
"background_out",
"=",
"None",
"if",
"error",
"is",
"not",
"None",
":",
"if",
"error",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'error and data must have the same '",
"'shape'",
")",
"error_out",
"=",
"np",
".",
"copy",
"(",
"error",
")",
"error_out",
"[",
"x",
"]",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"error",
"[",
"X",
"]",
"[",
"goodpix",
"]",
"**",
"2",
")",
")",
"else",
":",
"error_out",
"=",
"None",
"return",
"data_out",
",",
"error_out",
",",
"background_out"
] | Interpolate over masked pixels in data and optional error or
background images.
The value of masked pixels are replaced by the mean value of the
connected neighboring non-masked pixels. This function is intended
for single, isolated masked pixels (e.g. hot/warm pixels).
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
mask : array_like (bool)
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``.
background : array_like, or `~astropy.units.Quantity`, optional
The pixel-wise background level of the input ``data``.
``background`` must have the same shape as ``data``.
Returns
-------
data : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``data`` with interpolated masked pixels.
error : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``error`` with interpolated masked pixels. `None` if
input ``error`` is not input.
background : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``background`` with interpolated masked pixels. `None` if
input ``background`` is not input. | [
"Interpolate",
"over",
"masked",
"pixels",
"in",
"data",
"and",
"optional",
"error",
"or",
"background",
"images",
"."
] | cc9bb4534ab76bac98cb5f374a348a2573d10401 | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/interpolation.py#L289-L370 | train |
google/pyringe | pyringe/plugins/inject_sentinel.py | SentinelInjectPlugin.ThreadsWithRunningExecServers | def ThreadsWithRunningExecServers(self):
"""Returns a list of tids of inferior threads with open exec servers."""
socket_dir = '/tmp/pyringe_%s' % self.inferior.pid
if os.path.isdir(socket_dir):
return [int(fname[:-9])
for fname in os.listdir(socket_dir)
if fname.endswith('.execsock')]
return [] | python | def ThreadsWithRunningExecServers(self):
"""Returns a list of tids of inferior threads with open exec servers."""
socket_dir = '/tmp/pyringe_%s' % self.inferior.pid
if os.path.isdir(socket_dir):
return [int(fname[:-9])
for fname in os.listdir(socket_dir)
if fname.endswith('.execsock')]
return [] | [
"def",
"ThreadsWithRunningExecServers",
"(",
"self",
")",
":",
"socket_dir",
"=",
"'/tmp/pyringe_%s'",
"%",
"self",
".",
"inferior",
".",
"pid",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"socket_dir",
")",
":",
"return",
"[",
"int",
"(",
"fname",
"[",
":",
"-",
"9",
"]",
")",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"socket_dir",
")",
"if",
"fname",
".",
"endswith",
"(",
"'.execsock'",
")",
"]",
"return",
"[",
"]"
] | Returns a list of tids of inferior threads with open exec servers. | [
"Returns",
"a",
"list",
"of",
"tids",
"of",
"inferior",
"threads",
"with",
"open",
"exec",
"servers",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/inject_sentinel.py#L46-L53 | train |
google/pyringe | pyringe/plugins/inject_sentinel.py | SentinelInjectPlugin.SendToExecSocket | def SendToExecSocket(self, code, tid=None):
"""Inject python code into exec socket."""
response = self._SendToExecSocketRaw(json.dumps(code), tid)
return json.loads(response) | python | def SendToExecSocket(self, code, tid=None):
"""Inject python code into exec socket."""
response = self._SendToExecSocketRaw(json.dumps(code), tid)
return json.loads(response) | [
"def",
"SendToExecSocket",
"(",
"self",
",",
"code",
",",
"tid",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"_SendToExecSocketRaw",
"(",
"json",
".",
"dumps",
"(",
"code",
")",
",",
"tid",
")",
"return",
"json",
".",
"loads",
"(",
"response",
")"
] | Inject python code into exec socket. | [
"Inject",
"python",
"code",
"into",
"exec",
"socket",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/inject_sentinel.py#L55-L58 | train |
google/pyringe | pyringe/plugins/inject_sentinel.py | SentinelInjectPlugin.CloseExecSocket | def CloseExecSocket(self, tid=None):
"""Send closing request to exec socket."""
response = self._SendToExecSocketRaw('__kill__', tid)
if response != '__kill_ack__':
logging.warning('May not have succeeded in closing socket, make sure '
'using execsocks().') | python | def CloseExecSocket(self, tid=None):
"""Send closing request to exec socket."""
response = self._SendToExecSocketRaw('__kill__', tid)
if response != '__kill_ack__':
logging.warning('May not have succeeded in closing socket, make sure '
'using execsocks().') | [
"def",
"CloseExecSocket",
"(",
"self",
",",
"tid",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"_SendToExecSocketRaw",
"(",
"'__kill__'",
",",
"tid",
")",
"if",
"response",
"!=",
"'__kill_ack__'",
":",
"logging",
".",
"warning",
"(",
"'May not have succeeded in closing socket, make sure '",
"'using execsocks().'",
")"
] | Send closing request to exec socket. | [
"Send",
"closing",
"request",
"to",
"exec",
"socket",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/inject_sentinel.py#L79-L84 | train |
google/pyringe | pyringe/plugins/read_only.py | ReadonlyPlugin.Backtrace | def Backtrace(self, to_string=False):
"""Get a backtrace of the current position."""
if self.inferior.is_running:
res = self.inferior.Backtrace()
if to_string:
return res
print res
else:
logging.error('Not attached to any process.') | python | def Backtrace(self, to_string=False):
"""Get a backtrace of the current position."""
if self.inferior.is_running:
res = self.inferior.Backtrace()
if to_string:
return res
print res
else:
logging.error('Not attached to any process.') | [
"def",
"Backtrace",
"(",
"self",
",",
"to_string",
"=",
"False",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
":",
"res",
"=",
"self",
".",
"inferior",
".",
"Backtrace",
"(",
")",
"if",
"to_string",
":",
"return",
"res",
"print",
"res",
"else",
":",
"logging",
".",
"error",
"(",
"'Not attached to any process.'",
")"
] | Get a backtrace of the current position. | [
"Get",
"a",
"backtrace",
"of",
"the",
"current",
"position",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/read_only.py#L52-L60 | train |
google/pyringe | pyringe/plugins/read_only.py | ReadonlyPlugin.ListThreads | def ListThreads(self):
"""List the currently running python threads.
Returns:
A list of the inferior's thread idents, or None if the debugger is not
attached to any process.
"""
if self.inferior.is_running:
return self.inferior.threads
logging.error('Not attached to any process.')
return [] | python | def ListThreads(self):
"""List the currently running python threads.
Returns:
A list of the inferior's thread idents, or None if the debugger is not
attached to any process.
"""
if self.inferior.is_running:
return self.inferior.threads
logging.error('Not attached to any process.')
return [] | [
"def",
"ListThreads",
"(",
"self",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
":",
"return",
"self",
".",
"inferior",
".",
"threads",
"logging",
".",
"error",
"(",
"'Not attached to any process.'",
")",
"return",
"[",
"]"
] | List the currently running python threads.
Returns:
A list of the inferior's thread idents, or None if the debugger is not
attached to any process. | [
"List",
"the",
"currently",
"running",
"python",
"threads",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/read_only.py#L86-L96 | train |
google/pyringe | pyringe/payload/gdb_service.py | PyFrameObjectPtr.extract_filename | def extract_filename(self):
"""Alternative way of getting the executed file which inspects globals."""
globals_gdbval = self._gdbval['f_globals'].cast(GdbCache.DICT)
global_dict = libpython.PyDictObjectPtr(globals_gdbval)
for key, value in global_dict.iteritems():
if str(key.proxyval(set())) == '__file__':
return str(value.proxyval(set())) | python | def extract_filename(self):
"""Alternative way of getting the executed file which inspects globals."""
globals_gdbval = self._gdbval['f_globals'].cast(GdbCache.DICT)
global_dict = libpython.PyDictObjectPtr(globals_gdbval)
for key, value in global_dict.iteritems():
if str(key.proxyval(set())) == '__file__':
return str(value.proxyval(set())) | [
"def",
"extract_filename",
"(",
"self",
")",
":",
"globals_gdbval",
"=",
"self",
".",
"_gdbval",
"[",
"'f_globals'",
"]",
".",
"cast",
"(",
"GdbCache",
".",
"DICT",
")",
"global_dict",
"=",
"libpython",
".",
"PyDictObjectPtr",
"(",
"globals_gdbval",
")",
"for",
"key",
",",
"value",
"in",
"global_dict",
".",
"iteritems",
"(",
")",
":",
"if",
"str",
"(",
"key",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
")",
"==",
"'__file__'",
":",
"return",
"str",
"(",
"value",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
")"
] | Alternative way of getting the executed file which inspects globals. | [
"Alternative",
"way",
"of",
"getting",
"the",
"executed",
"file",
"which",
"inspects",
"globals",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L157-L163 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService._UnserializableObjectFallback | def _UnserializableObjectFallback(self, obj):
"""Handles sanitizing of unserializable objects for Json.
For instances of heap types, we take the class dict, augment it with the
instance's __dict__, tag it and transmit it over to the RPC client to be
reconstructed there. (Works with both old and new style classes)
Args:
obj: The object to Json-serialize
Returns:
A Json-serializable version of the parameter
"""
if isinstance(obj, libpython.PyInstanceObjectPtr):
# old-style classes use 'classobj'/'instance'
# get class attribute dictionary
in_class = obj.pyop_field('in_class')
result_dict = in_class.pyop_field('cl_dict').proxyval(set())
# let libpython.py do the work of getting the instance dict
instanceproxy = obj.proxyval(set())
result_dict.update(instanceproxy.attrdict)
result_dict['__pyringe_type_name__'] = instanceproxy.cl_name
result_dict['__pyringe_address__'] = instanceproxy.address
return result_dict
if isinstance(obj, libpython.HeapTypeObjectPtr):
# interestingly enough, HeapTypeObjectPtr seems to handle all pointers to
# heap type PyObjects, not only pointers to PyHeapTypeObject. This
# corresponds to new-style class instances. However, as all instances of
# new-style classes are simple PyObject pointers to the interpreter,
# libpython.py tends to give us HeapTypeObjectPtrs for things we can't
# handle properly.
try:
# get class attribute dictionary
type_ptr = obj.field('ob_type')
tp_dict = type_ptr.cast(GdbCache.TYPE)['tp_dict'].cast(GdbCache.DICT)
result_dict = libpython.PyDictObjectPtr(tp_dict).proxyval(set())
except gdb.error:
# There was probably a type mismatch triggered by wrong assumptions in
# libpython.py
result_dict = {}
try:
# get instance attributes
result_dict.update(obj.get_attr_dict().proxyval(set()))
result_dict['__pyringe_type_name__'] = obj.safe_tp_name()
result_dict['__pyringe_address__'] = long(obj._gdbval) # pylint: disable=protected-access
return result_dict
except TypeError:
# This happens in the case where we're not really looking at a heap type
# instance. There isn't really anything we can do, so we fall back to
# the default handling.
pass
# Default handler -- this does not result in proxy objects or fancy dicts,
# but most of the time, we end up emitting strings of the format
# '<object at remote 0x345a235>'
try:
proxy = obj.proxyval(set())
# json doesn't accept non-strings as keys, so we're helping along
if isinstance(proxy, dict):
return {str(key): val for key, val in proxy.iteritems()}
return proxy
except AttributeError:
return str(obj) | python | def _UnserializableObjectFallback(self, obj):
"""Handles sanitizing of unserializable objects for Json.
For instances of heap types, we take the class dict, augment it with the
instance's __dict__, tag it and transmit it over to the RPC client to be
reconstructed there. (Works with both old and new style classes)
Args:
obj: The object to Json-serialize
Returns:
A Json-serializable version of the parameter
"""
if isinstance(obj, libpython.PyInstanceObjectPtr):
# old-style classes use 'classobj'/'instance'
# get class attribute dictionary
in_class = obj.pyop_field('in_class')
result_dict = in_class.pyop_field('cl_dict').proxyval(set())
# let libpython.py do the work of getting the instance dict
instanceproxy = obj.proxyval(set())
result_dict.update(instanceproxy.attrdict)
result_dict['__pyringe_type_name__'] = instanceproxy.cl_name
result_dict['__pyringe_address__'] = instanceproxy.address
return result_dict
if isinstance(obj, libpython.HeapTypeObjectPtr):
# interestingly enough, HeapTypeObjectPtr seems to handle all pointers to
# heap type PyObjects, not only pointers to PyHeapTypeObject. This
# corresponds to new-style class instances. However, as all instances of
# new-style classes are simple PyObject pointers to the interpreter,
# libpython.py tends to give us HeapTypeObjectPtrs for things we can't
# handle properly.
try:
# get class attribute dictionary
type_ptr = obj.field('ob_type')
tp_dict = type_ptr.cast(GdbCache.TYPE)['tp_dict'].cast(GdbCache.DICT)
result_dict = libpython.PyDictObjectPtr(tp_dict).proxyval(set())
except gdb.error:
# There was probably a type mismatch triggered by wrong assumptions in
# libpython.py
result_dict = {}
try:
# get instance attributes
result_dict.update(obj.get_attr_dict().proxyval(set()))
result_dict['__pyringe_type_name__'] = obj.safe_tp_name()
result_dict['__pyringe_address__'] = long(obj._gdbval) # pylint: disable=protected-access
return result_dict
except TypeError:
# This happens in the case where we're not really looking at a heap type
# instance. There isn't really anything we can do, so we fall back to
# the default handling.
pass
# Default handler -- this does not result in proxy objects or fancy dicts,
# but most of the time, we end up emitting strings of the format
# '<object at remote 0x345a235>'
try:
proxy = obj.proxyval(set())
# json doesn't accept non-strings as keys, so we're helping along
if isinstance(proxy, dict):
return {str(key): val for key, val in proxy.iteritems()}
return proxy
except AttributeError:
return str(obj) | [
"def",
"_UnserializableObjectFallback",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"libpython",
".",
"PyInstanceObjectPtr",
")",
":",
"# old-style classes use 'classobj'/'instance'",
"# get class attribute dictionary",
"in_class",
"=",
"obj",
".",
"pyop_field",
"(",
"'in_class'",
")",
"result_dict",
"=",
"in_class",
".",
"pyop_field",
"(",
"'cl_dict'",
")",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"# let libpython.py do the work of getting the instance dict",
"instanceproxy",
"=",
"obj",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"result_dict",
".",
"update",
"(",
"instanceproxy",
".",
"attrdict",
")",
"result_dict",
"[",
"'__pyringe_type_name__'",
"]",
"=",
"instanceproxy",
".",
"cl_name",
"result_dict",
"[",
"'__pyringe_address__'",
"]",
"=",
"instanceproxy",
".",
"address",
"return",
"result_dict",
"if",
"isinstance",
"(",
"obj",
",",
"libpython",
".",
"HeapTypeObjectPtr",
")",
":",
"# interestingly enough, HeapTypeObjectPtr seems to handle all pointers to",
"# heap type PyObjects, not only pointers to PyHeapTypeObject. This",
"# corresponds to new-style class instances. However, as all instances of",
"# new-style classes are simple PyObject pointers to the interpreter,",
"# libpython.py tends to give us HeapTypeObjectPtrs for things we can't",
"# handle properly.",
"try",
":",
"# get class attribute dictionary",
"type_ptr",
"=",
"obj",
".",
"field",
"(",
"'ob_type'",
")",
"tp_dict",
"=",
"type_ptr",
".",
"cast",
"(",
"GdbCache",
".",
"TYPE",
")",
"[",
"'tp_dict'",
"]",
".",
"cast",
"(",
"GdbCache",
".",
"DICT",
")",
"result_dict",
"=",
"libpython",
".",
"PyDictObjectPtr",
"(",
"tp_dict",
")",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"except",
"gdb",
".",
"error",
":",
"# There was probably a type mismatch triggered by wrong assumptions in",
"# libpython.py",
"result_dict",
"=",
"{",
"}",
"try",
":",
"# get instance attributes",
"result_dict",
".",
"update",
"(",
"obj",
".",
"get_attr_dict",
"(",
")",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
")",
"result_dict",
"[",
"'__pyringe_type_name__'",
"]",
"=",
"obj",
".",
"safe_tp_name",
"(",
")",
"result_dict",
"[",
"'__pyringe_address__'",
"]",
"=",
"long",
"(",
"obj",
".",
"_gdbval",
")",
"# pylint: disable=protected-access",
"return",
"result_dict",
"except",
"TypeError",
":",
"# This happens in the case where we're not really looking at a heap type",
"# instance. There isn't really anything we can do, so we fall back to",
"# the default handling.",
"pass",
"# Default handler -- this does not result in proxy objects or fancy dicts,",
"# but most of the time, we end up emitting strings of the format",
"# '<object at remote 0x345a235>'",
"try",
":",
"proxy",
"=",
"obj",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"# json doesn't accept non-strings as keys, so we're helping along",
"if",
"isinstance",
"(",
"proxy",
",",
"dict",
")",
":",
"return",
"{",
"str",
"(",
"key",
")",
":",
"val",
"for",
"key",
",",
"val",
"in",
"proxy",
".",
"iteritems",
"(",
")",
"}",
"return",
"proxy",
"except",
"AttributeError",
":",
"return",
"str",
"(",
"obj",
")"
] | Handles sanitizing of unserializable objects for Json.
For instances of heap types, we take the class dict, augment it with the
instance's __dict__, tag it and transmit it over to the RPC client to be
reconstructed there. (Works with both old and new style classes)
Args:
obj: The object to Json-serialize
Returns:
A Json-serializable version of the parameter | [
"Handles",
"sanitizing",
"of",
"unserializable",
"objects",
"for",
"Json",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L208-L271 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService._AcceptRPC | def _AcceptRPC(self):
"""Reads RPC request from stdin and processes it, writing result to stdout.
Returns:
True as long as execution is to be continued, False otherwise.
Raises:
RpcException: if no function was specified in the RPC or no such API
function exists.
"""
request = self._ReadObject()
if request['func'] == '__kill__':
self.ClearBreakpoints()
self._WriteObject('__kill_ack__')
return False
if 'func' not in request or request['func'].startswith('_'):
raise RpcException('Not a valid public API function.')
rpc_result = getattr(self, request['func'])(*request['args'])
self._WriteObject(rpc_result)
return True | python | def _AcceptRPC(self):
"""Reads RPC request from stdin and processes it, writing result to stdout.
Returns:
True as long as execution is to be continued, False otherwise.
Raises:
RpcException: if no function was specified in the RPC or no such API
function exists.
"""
request = self._ReadObject()
if request['func'] == '__kill__':
self.ClearBreakpoints()
self._WriteObject('__kill_ack__')
return False
if 'func' not in request or request['func'].startswith('_'):
raise RpcException('Not a valid public API function.')
rpc_result = getattr(self, request['func'])(*request['args'])
self._WriteObject(rpc_result)
return True | [
"def",
"_AcceptRPC",
"(",
"self",
")",
":",
"request",
"=",
"self",
".",
"_ReadObject",
"(",
")",
"if",
"request",
"[",
"'func'",
"]",
"==",
"'__kill__'",
":",
"self",
".",
"ClearBreakpoints",
"(",
")",
"self",
".",
"_WriteObject",
"(",
"'__kill_ack__'",
")",
"return",
"False",
"if",
"'func'",
"not",
"in",
"request",
"or",
"request",
"[",
"'func'",
"]",
".",
"startswith",
"(",
"'_'",
")",
":",
"raise",
"RpcException",
"(",
"'Not a valid public API function.'",
")",
"rpc_result",
"=",
"getattr",
"(",
"self",
",",
"request",
"[",
"'func'",
"]",
")",
"(",
"*",
"request",
"[",
"'args'",
"]",
")",
"self",
".",
"_WriteObject",
"(",
"rpc_result",
")",
"return",
"True"
] | Reads RPC request from stdin and processes it, writing result to stdout.
Returns:
True as long as execution is to be continued, False otherwise.
Raises:
RpcException: if no function was specified in the RPC or no such API
function exists. | [
"Reads",
"RPC",
"request",
"from",
"stdin",
"and",
"processes",
"it",
"writing",
"result",
"to",
"stdout",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L293-L311 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService._UnpackGdbVal | def _UnpackGdbVal(self, gdb_value):
"""Unpacks gdb.Value objects and returns the best-matched python object."""
val_type = gdb_value.type.code
if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM:
return int(gdb_value)
if val_type == gdb.TYPE_CODE_VOID:
return None
if val_type == gdb.TYPE_CODE_PTR:
return long(gdb_value)
if val_type == gdb.TYPE_CODE_ARRAY:
# This is probably a string
return str(gdb_value)
# I'm out of ideas, let's return it as a string
return str(gdb_value) | python | def _UnpackGdbVal(self, gdb_value):
"""Unpacks gdb.Value objects and returns the best-matched python object."""
val_type = gdb_value.type.code
if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM:
return int(gdb_value)
if val_type == gdb.TYPE_CODE_VOID:
return None
if val_type == gdb.TYPE_CODE_PTR:
return long(gdb_value)
if val_type == gdb.TYPE_CODE_ARRAY:
# This is probably a string
return str(gdb_value)
# I'm out of ideas, let's return it as a string
return str(gdb_value) | [
"def",
"_UnpackGdbVal",
"(",
"self",
",",
"gdb_value",
")",
":",
"val_type",
"=",
"gdb_value",
".",
"type",
".",
"code",
"if",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_INT",
"or",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_ENUM",
":",
"return",
"int",
"(",
"gdb_value",
")",
"if",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_VOID",
":",
"return",
"None",
"if",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_PTR",
":",
"return",
"long",
"(",
"gdb_value",
")",
"if",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_ARRAY",
":",
"# This is probably a string",
"return",
"str",
"(",
"gdb_value",
")",
"# I'm out of ideas, let's return it as a string",
"return",
"str",
"(",
"gdb_value",
")"
] | Unpacks gdb.Value objects and returns the best-matched python object. | [
"Unpacks",
"gdb",
".",
"Value",
"objects",
"and",
"returns",
"the",
"best",
"-",
"matched",
"python",
"object",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L313-L326 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService.EnsureGdbPosition | def EnsureGdbPosition(self, pid, tid, frame_depth):
"""Make sure our position matches the request.
Args:
pid: The process ID of the target process
tid: The python thread ident of the target thread
frame_depth: The 'depth' of the requested frame in the frame stack
Raises:
PositionUnavailableException: If the requested process, thread or frame
can't be found or accessed.
"""
position = [pid, tid, frame_depth]
if not pid:
return
if not self.IsAttached():
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if gdb.selected_inferior().pid != pid:
self.Detach()
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if tid:
tstate_head = GdbCache.INTERP_HEAD['tstate_head']
for tstate in self._IterateChainedList(tstate_head, 'next'):
if tid == tstate['thread_id']:
self.selected_tstate = tstate
break
else:
raise PositionUnavailableException('Thread %s does not exist.' %
str(tid))
stack_head = self.selected_tstate['frame']
if frame_depth is not None:
frames = list(self._IterateChainedList(stack_head, 'f_back'))
frames.reverse()
try:
self.selected_frame = frames[frame_depth]
except IndexError:
raise PositionUnavailableException('Stack is not %s frames deep' %
str(frame_depth + 1)) | python | def EnsureGdbPosition(self, pid, tid, frame_depth):
"""Make sure our position matches the request.
Args:
pid: The process ID of the target process
tid: The python thread ident of the target thread
frame_depth: The 'depth' of the requested frame in the frame stack
Raises:
PositionUnavailableException: If the requested process, thread or frame
can't be found or accessed.
"""
position = [pid, tid, frame_depth]
if not pid:
return
if not self.IsAttached():
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if gdb.selected_inferior().pid != pid:
self.Detach()
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if tid:
tstate_head = GdbCache.INTERP_HEAD['tstate_head']
for tstate in self._IterateChainedList(tstate_head, 'next'):
if tid == tstate['thread_id']:
self.selected_tstate = tstate
break
else:
raise PositionUnavailableException('Thread %s does not exist.' %
str(tid))
stack_head = self.selected_tstate['frame']
if frame_depth is not None:
frames = list(self._IterateChainedList(stack_head, 'f_back'))
frames.reverse()
try:
self.selected_frame = frames[frame_depth]
except IndexError:
raise PositionUnavailableException('Stack is not %s frames deep' %
str(frame_depth + 1)) | [
"def",
"EnsureGdbPosition",
"(",
"self",
",",
"pid",
",",
"tid",
",",
"frame_depth",
")",
":",
"position",
"=",
"[",
"pid",
",",
"tid",
",",
"frame_depth",
"]",
"if",
"not",
"pid",
":",
"return",
"if",
"not",
"self",
".",
"IsAttached",
"(",
")",
":",
"try",
":",
"self",
".",
"Attach",
"(",
"position",
")",
"except",
"gdb",
".",
"error",
"as",
"exc",
":",
"raise",
"PositionUnavailableException",
"(",
"exc",
".",
"message",
")",
"if",
"gdb",
".",
"selected_inferior",
"(",
")",
".",
"pid",
"!=",
"pid",
":",
"self",
".",
"Detach",
"(",
")",
"try",
":",
"self",
".",
"Attach",
"(",
"position",
")",
"except",
"gdb",
".",
"error",
"as",
"exc",
":",
"raise",
"PositionUnavailableException",
"(",
"exc",
".",
"message",
")",
"if",
"tid",
":",
"tstate_head",
"=",
"GdbCache",
".",
"INTERP_HEAD",
"[",
"'tstate_head'",
"]",
"for",
"tstate",
"in",
"self",
".",
"_IterateChainedList",
"(",
"tstate_head",
",",
"'next'",
")",
":",
"if",
"tid",
"==",
"tstate",
"[",
"'thread_id'",
"]",
":",
"self",
".",
"selected_tstate",
"=",
"tstate",
"break",
"else",
":",
"raise",
"PositionUnavailableException",
"(",
"'Thread %s does not exist.'",
"%",
"str",
"(",
"tid",
")",
")",
"stack_head",
"=",
"self",
".",
"selected_tstate",
"[",
"'frame'",
"]",
"if",
"frame_depth",
"is",
"not",
"None",
":",
"frames",
"=",
"list",
"(",
"self",
".",
"_IterateChainedList",
"(",
"stack_head",
",",
"'f_back'",
")",
")",
"frames",
".",
"reverse",
"(",
")",
"try",
":",
"self",
".",
"selected_frame",
"=",
"frames",
"[",
"frame_depth",
"]",
"except",
"IndexError",
":",
"raise",
"PositionUnavailableException",
"(",
"'Stack is not %s frames deep'",
"%",
"str",
"(",
"frame_depth",
"+",
"1",
")",
")"
] | Make sure our position matches the request.
Args:
pid: The process ID of the target process
tid: The python thread ident of the target thread
frame_depth: The 'depth' of the requested frame in the frame stack
Raises:
PositionUnavailableException: If the requested process, thread or frame
can't be found or accessed. | [
"Make",
"sure",
"our",
"position",
"matches",
"the",
"request",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L335-L378 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService.IsSymbolFileSane | def IsSymbolFileSane(self, position):
"""Performs basic sanity check by trying to look up a bunch of symbols."""
pos = [position[0], None, None]
self.EnsureGdbPosition(*pos)
try:
if GdbCache.DICT and GdbCache.TYPE and GdbCache.INTERP_HEAD:
# pylint: disable=pointless-statement
tstate = GdbCache.INTERP_HEAD['tstate_head']
tstate['thread_id']
frame = tstate['frame']
frame_attrs = ['f_back',
'f_locals',
'f_localsplus',
'f_globals',
'f_builtins',
'f_lineno',
'f_lasti']
for attr_name in frame_attrs:
# This lookup shouldn't throw an exception
frame[attr_name]
code = frame['f_code']
code_attrs = ['co_name',
'co_filename',
'co_nlocals',
'co_varnames',
'co_lnotab',
'co_firstlineno']
for attr_name in code_attrs:
# Same as above, just checking whether the lookup succeeds.
code[attr_name]
# if we've gotten this far, we should be fine, as it means gdb managed
# to look up values for all of these. They might still be null, the
# symbol file might still be bogus, but making gdb check for null values
# and letting it run into access violations is the best we can do. We
# haven't checked any of the python types (dict, etc.), but this symbol
# file seems to be useful for some things, so let's give it our seal of
# approval.
return True
except gdb.error:
return False
# looks like the initial GdbCache refresh failed. That's no good.
return False | python | def IsSymbolFileSane(self, position):
"""Performs basic sanity check by trying to look up a bunch of symbols."""
pos = [position[0], None, None]
self.EnsureGdbPosition(*pos)
try:
if GdbCache.DICT and GdbCache.TYPE and GdbCache.INTERP_HEAD:
# pylint: disable=pointless-statement
tstate = GdbCache.INTERP_HEAD['tstate_head']
tstate['thread_id']
frame = tstate['frame']
frame_attrs = ['f_back',
'f_locals',
'f_localsplus',
'f_globals',
'f_builtins',
'f_lineno',
'f_lasti']
for attr_name in frame_attrs:
# This lookup shouldn't throw an exception
frame[attr_name]
code = frame['f_code']
code_attrs = ['co_name',
'co_filename',
'co_nlocals',
'co_varnames',
'co_lnotab',
'co_firstlineno']
for attr_name in code_attrs:
# Same as above, just checking whether the lookup succeeds.
code[attr_name]
# if we've gotten this far, we should be fine, as it means gdb managed
# to look up values for all of these. They might still be null, the
# symbol file might still be bogus, but making gdb check for null values
# and letting it run into access violations is the best we can do. We
# haven't checked any of the python types (dict, etc.), but this symbol
# file seems to be useful for some things, so let's give it our seal of
# approval.
return True
except gdb.error:
return False
# looks like the initial GdbCache refresh failed. That's no good.
return False | [
"def",
"IsSymbolFileSane",
"(",
"self",
",",
"position",
")",
":",
"pos",
"=",
"[",
"position",
"[",
"0",
"]",
",",
"None",
",",
"None",
"]",
"self",
".",
"EnsureGdbPosition",
"(",
"*",
"pos",
")",
"try",
":",
"if",
"GdbCache",
".",
"DICT",
"and",
"GdbCache",
".",
"TYPE",
"and",
"GdbCache",
".",
"INTERP_HEAD",
":",
"# pylint: disable=pointless-statement",
"tstate",
"=",
"GdbCache",
".",
"INTERP_HEAD",
"[",
"'tstate_head'",
"]",
"tstate",
"[",
"'thread_id'",
"]",
"frame",
"=",
"tstate",
"[",
"'frame'",
"]",
"frame_attrs",
"=",
"[",
"'f_back'",
",",
"'f_locals'",
",",
"'f_localsplus'",
",",
"'f_globals'",
",",
"'f_builtins'",
",",
"'f_lineno'",
",",
"'f_lasti'",
"]",
"for",
"attr_name",
"in",
"frame_attrs",
":",
"# This lookup shouldn't throw an exception",
"frame",
"[",
"attr_name",
"]",
"code",
"=",
"frame",
"[",
"'f_code'",
"]",
"code_attrs",
"=",
"[",
"'co_name'",
",",
"'co_filename'",
",",
"'co_nlocals'",
",",
"'co_varnames'",
",",
"'co_lnotab'",
",",
"'co_firstlineno'",
"]",
"for",
"attr_name",
"in",
"code_attrs",
":",
"# Same as above, just checking whether the lookup succeeds.",
"code",
"[",
"attr_name",
"]",
"# if we've gotten this far, we should be fine, as it means gdb managed",
"# to look up values for all of these. They might still be null, the",
"# symbol file might still be bogus, but making gdb check for null values",
"# and letting it run into access violations is the best we can do. We",
"# haven't checked any of the python types (dict, etc.), but this symbol",
"# file seems to be useful for some things, so let's give it our seal of",
"# approval.",
"return",
"True",
"except",
"gdb",
".",
"error",
":",
"return",
"False",
"# looks like the initial GdbCache refresh failed. That's no good.",
"return",
"False"
] | Performs basic sanity check by trying to look up a bunch of symbols. | [
"Performs",
"basic",
"sanity",
"check",
"by",
"trying",
"to",
"look",
"up",
"a",
"bunch",
"of",
"symbols",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L392-L433 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService.Detach | def Detach(self):
"""Detaches from the inferior. If not attached, this is a no-op."""
# We have to work around the python APIs weirdness :\
if not self.IsAttached():
return None
# Gdb doesn't drain any pending SIGINTs it may have sent to the inferior
# when it simply detaches. We can do this by letting the inferior continue,
# and gdb will intercept any SIGINT that's still to-be-delivered; as soon as
# we do so however, we may lose control of gdb (if we're running in
# synchronous mode). So we queue an interruption and continue gdb right
# afterwards, it will waitpid() for its inferior and collect all signals
# that may have been queued.
pid = gdb.selected_inferior().pid
self.Interrupt([pid, None, None])
self.Continue([pid, None, None])
result = gdb.execute('detach', to_string=True)
if not result:
return None
return result | python | def Detach(self):
"""Detaches from the inferior. If not attached, this is a no-op."""
# We have to work around the python APIs weirdness :\
if not self.IsAttached():
return None
# Gdb doesn't drain any pending SIGINTs it may have sent to the inferior
# when it simply detaches. We can do this by letting the inferior continue,
# and gdb will intercept any SIGINT that's still to-be-delivered; as soon as
# we do so however, we may lose control of gdb (if we're running in
# synchronous mode). So we queue an interruption and continue gdb right
# afterwards, it will waitpid() for its inferior and collect all signals
# that may have been queued.
pid = gdb.selected_inferior().pid
self.Interrupt([pid, None, None])
self.Continue([pid, None, None])
result = gdb.execute('detach', to_string=True)
if not result:
return None
return result | [
"def",
"Detach",
"(",
"self",
")",
":",
"# We have to work around the python APIs weirdness :\\",
"if",
"not",
"self",
".",
"IsAttached",
"(",
")",
":",
"return",
"None",
"# Gdb doesn't drain any pending SIGINTs it may have sent to the inferior",
"# when it simply detaches. We can do this by letting the inferior continue,",
"# and gdb will intercept any SIGINT that's still to-be-delivered; as soon as",
"# we do so however, we may lose control of gdb (if we're running in",
"# synchronous mode). So we queue an interruption and continue gdb right",
"# afterwards, it will waitpid() for its inferior and collect all signals",
"# that may have been queued.",
"pid",
"=",
"gdb",
".",
"selected_inferior",
"(",
")",
".",
"pid",
"self",
".",
"Interrupt",
"(",
"[",
"pid",
",",
"None",
",",
"None",
"]",
")",
"self",
".",
"Continue",
"(",
"[",
"pid",
",",
"None",
",",
"None",
"]",
")",
"result",
"=",
"gdb",
".",
"execute",
"(",
"'detach'",
",",
"to_string",
"=",
"True",
")",
"if",
"not",
"result",
":",
"return",
"None",
"return",
"result"
] | Detaches from the inferior. If not attached, this is a no-op. | [
"Detaches",
"from",
"the",
"inferior",
".",
"If",
"not",
"attached",
"this",
"is",
"a",
"no",
"-",
"op",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L449-L467 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService.Call | def Call(self, position, function_call):
"""Perform a function call in the inferior.
WARNING: Since Gdb's concept of threads can't be directly identified with
python threads, the function call will be made from what has to be assumed
is an arbitrary thread. This *will* interrupt the inferior. Continuing it
after the call is the responsibility of the caller.
Args:
position: the context of the inferior to call the function from.
function_call: A string corresponding to a function call. Format:
'foo(0,0)'
Returns:
Thre return value of the called function.
"""
self.EnsureGdbPosition(position[0], None, None)
if not gdb.selected_thread().is_stopped():
self.Interrupt(position)
result_value = gdb.parse_and_eval(function_call)
return self._UnpackGdbVal(result_value) | python | def Call(self, position, function_call):
"""Perform a function call in the inferior.
WARNING: Since Gdb's concept of threads can't be directly identified with
python threads, the function call will be made from what has to be assumed
is an arbitrary thread. This *will* interrupt the inferior. Continuing it
after the call is the responsibility of the caller.
Args:
position: the context of the inferior to call the function from.
function_call: A string corresponding to a function call. Format:
'foo(0,0)'
Returns:
Thre return value of the called function.
"""
self.EnsureGdbPosition(position[0], None, None)
if not gdb.selected_thread().is_stopped():
self.Interrupt(position)
result_value = gdb.parse_and_eval(function_call)
return self._UnpackGdbVal(result_value) | [
"def",
"Call",
"(",
"self",
",",
"position",
",",
"function_call",
")",
":",
"self",
".",
"EnsureGdbPosition",
"(",
"position",
"[",
"0",
"]",
",",
"None",
",",
"None",
")",
"if",
"not",
"gdb",
".",
"selected_thread",
"(",
")",
".",
"is_stopped",
"(",
")",
":",
"self",
".",
"Interrupt",
"(",
"position",
")",
"result_value",
"=",
"gdb",
".",
"parse_and_eval",
"(",
"function_call",
")",
"return",
"self",
".",
"_UnpackGdbVal",
"(",
"result_value",
")"
] | Perform a function call in the inferior.
WARNING: Since Gdb's concept of threads can't be directly identified with
python threads, the function call will be made from what has to be assumed
is an arbitrary thread. This *will* interrupt the inferior. Continuing it
after the call is the responsibility of the caller.
Args:
position: the context of the inferior to call the function from.
function_call: A string corresponding to a function call. Format:
'foo(0,0)'
Returns:
Thre return value of the called function. | [
"Perform",
"a",
"function",
"call",
"in",
"the",
"inferior",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L492-L511 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService.ExecuteRaw | def ExecuteRaw(self, position, command):
"""Send a command string to gdb."""
self.EnsureGdbPosition(position[0], None, None)
return gdb.execute(command, to_string=True) | python | def ExecuteRaw(self, position, command):
"""Send a command string to gdb."""
self.EnsureGdbPosition(position[0], None, None)
return gdb.execute(command, to_string=True) | [
"def",
"ExecuteRaw",
"(",
"self",
",",
"position",
",",
"command",
")",
":",
"self",
".",
"EnsureGdbPosition",
"(",
"position",
"[",
"0",
"]",
",",
"None",
",",
"None",
")",
"return",
"gdb",
".",
"execute",
"(",
"command",
",",
"to_string",
"=",
"True",
")"
] | Send a command string to gdb. | [
"Send",
"a",
"command",
"string",
"to",
"gdb",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L513-L516 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService._GetGdbThreadMapping | def _GetGdbThreadMapping(self, position):
"""Gets a mapping from python tid to gdb thread num.
There's no way to get the thread ident from a gdb thread. We only get the
"ID of the thread, as assigned by GDB", which is completely useless for
everything except talking to gdb. So in order to translate between these
two, we have to execute 'info threads' and parse its output. Note that this
may only work on linux, and only when python was compiled to use pthreads.
It may work elsewhere, but we won't guarantee it.
Args:
position: array of pid, tid, framedepth specifying the requested position.
Returns:
A dictionary of the form {python_tid: gdb_threadnum}.
"""
if len(gdb.selected_inferior().threads()) == 1:
# gdb's output for info threads changes and only displays PID. We cheat.
return {position[1]: 1}
# example:
# 8 Thread 0x7f0a637fe700 (LWP 11894) "test.py" 0x00007f0a69563e63 in
# select () from /usr/lib64/libc.so.6
thread_line_regexp = r'\s*\**\s*([0-9]+)\s+[a-zA-Z]+\s+([x0-9a-fA-F]+)\s.*'
output = gdb.execute('info threads', to_string=True)
matches = [re.match(thread_line_regexp, line) for line
in output.split('\n')[1:]]
return {int(match.group(2), 16): int(match.group(1))
for match in matches if match} | python | def _GetGdbThreadMapping(self, position):
"""Gets a mapping from python tid to gdb thread num.
There's no way to get the thread ident from a gdb thread. We only get the
"ID of the thread, as assigned by GDB", which is completely useless for
everything except talking to gdb. So in order to translate between these
two, we have to execute 'info threads' and parse its output. Note that this
may only work on linux, and only when python was compiled to use pthreads.
It may work elsewhere, but we won't guarantee it.
Args:
position: array of pid, tid, framedepth specifying the requested position.
Returns:
A dictionary of the form {python_tid: gdb_threadnum}.
"""
if len(gdb.selected_inferior().threads()) == 1:
# gdb's output for info threads changes and only displays PID. We cheat.
return {position[1]: 1}
# example:
# 8 Thread 0x7f0a637fe700 (LWP 11894) "test.py" 0x00007f0a69563e63 in
# select () from /usr/lib64/libc.so.6
thread_line_regexp = r'\s*\**\s*([0-9]+)\s+[a-zA-Z]+\s+([x0-9a-fA-F]+)\s.*'
output = gdb.execute('info threads', to_string=True)
matches = [re.match(thread_line_regexp, line) for line
in output.split('\n')[1:]]
return {int(match.group(2), 16): int(match.group(1))
for match in matches if match} | [
"def",
"_GetGdbThreadMapping",
"(",
"self",
",",
"position",
")",
":",
"if",
"len",
"(",
"gdb",
".",
"selected_inferior",
"(",
")",
".",
"threads",
"(",
")",
")",
"==",
"1",
":",
"# gdb's output for info threads changes and only displays PID. We cheat.",
"return",
"{",
"position",
"[",
"1",
"]",
":",
"1",
"}",
"# example:",
"# 8 Thread 0x7f0a637fe700 (LWP 11894) \"test.py\" 0x00007f0a69563e63 in",
"# select () from /usr/lib64/libc.so.6",
"thread_line_regexp",
"=",
"r'\\s*\\**\\s*([0-9]+)\\s+[a-zA-Z]+\\s+([x0-9a-fA-F]+)\\s.*'",
"output",
"=",
"gdb",
".",
"execute",
"(",
"'info threads'",
",",
"to_string",
"=",
"True",
")",
"matches",
"=",
"[",
"re",
".",
"match",
"(",
"thread_line_regexp",
",",
"line",
")",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
"[",
"1",
":",
"]",
"]",
"return",
"{",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
",",
"16",
")",
":",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"for",
"match",
"in",
"matches",
"if",
"match",
"}"
] | Gets a mapping from python tid to gdb thread num.
There's no way to get the thread ident from a gdb thread. We only get the
"ID of the thread, as assigned by GDB", which is completely useless for
everything except talking to gdb. So in order to translate between these
two, we have to execute 'info threads' and parse its output. Note that this
may only work on linux, and only when python was compiled to use pthreads.
It may work elsewhere, but we won't guarantee it.
Args:
position: array of pid, tid, framedepth specifying the requested position.
Returns:
A dictionary of the form {python_tid: gdb_threadnum}. | [
"Gets",
"a",
"mapping",
"from",
"python",
"tid",
"to",
"gdb",
"thread",
"num",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L518-L545 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService._Inject | def _Inject(self, position, call):
"""Injects evaluation of 'call' in a safe location in the inferior.
Due to the way these injected function calls work, gdb must not be killed
until the call has returned. If that happens, the inferior will be sent
SIGTRAP upon attempting to return from the dummy frame gdb constructs for
us, and will most probably crash.
Args:
position: array of pid, tid, framedepth specifying the requested position.
call: Any expression gdb can evaluate. Usually a function call.
Raises:
RuntimeError: if gdb is not being run in synchronous exec mode.
"""
self.EnsureGdbPosition(position[0], position[1], None)
self.ClearBreakpoints()
self._AddThreadSpecificBreakpoint(position)
gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGCALLS_TO_DO)
gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGBUSY)
try:
# We're "armed", risk the blocking call to Continue
self.Continue(position)
# Breakpoint was hit!
if not gdb.selected_thread().is_stopped():
# This should not happen. Depending on how gdb is being used, the
# semantics of self.Continue change, so I'd rather leave this check in
# here, in case we ever *do* end up changing to async mode.
raise RuntimeError('Gdb is not acting as expected, is it being run in '
'async mode?')
finally:
gdb.parse_and_eval('%s = 0' % GdbCache.PENDINGBUSY)
self.Call(position, call) | python | def _Inject(self, position, call):
"""Injects evaluation of 'call' in a safe location in the inferior.
Due to the way these injected function calls work, gdb must not be killed
until the call has returned. If that happens, the inferior will be sent
SIGTRAP upon attempting to return from the dummy frame gdb constructs for
us, and will most probably crash.
Args:
position: array of pid, tid, framedepth specifying the requested position.
call: Any expression gdb can evaluate. Usually a function call.
Raises:
RuntimeError: if gdb is not being run in synchronous exec mode.
"""
self.EnsureGdbPosition(position[0], position[1], None)
self.ClearBreakpoints()
self._AddThreadSpecificBreakpoint(position)
gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGCALLS_TO_DO)
gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGBUSY)
try:
# We're "armed", risk the blocking call to Continue
self.Continue(position)
# Breakpoint was hit!
if not gdb.selected_thread().is_stopped():
# This should not happen. Depending on how gdb is being used, the
# semantics of self.Continue change, so I'd rather leave this check in
# here, in case we ever *do* end up changing to async mode.
raise RuntimeError('Gdb is not acting as expected, is it being run in '
'async mode?')
finally:
gdb.parse_and_eval('%s = 0' % GdbCache.PENDINGBUSY)
self.Call(position, call) | [
"def",
"_Inject",
"(",
"self",
",",
"position",
",",
"call",
")",
":",
"self",
".",
"EnsureGdbPosition",
"(",
"position",
"[",
"0",
"]",
",",
"position",
"[",
"1",
"]",
",",
"None",
")",
"self",
".",
"ClearBreakpoints",
"(",
")",
"self",
".",
"_AddThreadSpecificBreakpoint",
"(",
"position",
")",
"gdb",
".",
"parse_and_eval",
"(",
"'%s = 1'",
"%",
"GdbCache",
".",
"PENDINGCALLS_TO_DO",
")",
"gdb",
".",
"parse_and_eval",
"(",
"'%s = 1'",
"%",
"GdbCache",
".",
"PENDINGBUSY",
")",
"try",
":",
"# We're \"armed\", risk the blocking call to Continue",
"self",
".",
"Continue",
"(",
"position",
")",
"# Breakpoint was hit!",
"if",
"not",
"gdb",
".",
"selected_thread",
"(",
")",
".",
"is_stopped",
"(",
")",
":",
"# This should not happen. Depending on how gdb is being used, the",
"# semantics of self.Continue change, so I'd rather leave this check in",
"# here, in case we ever *do* end up changing to async mode.",
"raise",
"RuntimeError",
"(",
"'Gdb is not acting as expected, is it being run in '",
"'async mode?'",
")",
"finally",
":",
"gdb",
".",
"parse_and_eval",
"(",
"'%s = 0'",
"%",
"GdbCache",
".",
"PENDINGBUSY",
")",
"self",
".",
"Call",
"(",
"position",
",",
"call",
")"
] | Injects evaluation of 'call' in a safe location in the inferior.
Due to the way these injected function calls work, gdb must not be killed
until the call has returned. If that happens, the inferior will be sent
SIGTRAP upon attempting to return from the dummy frame gdb constructs for
us, and will most probably crash.
Args:
position: array of pid, tid, framedepth specifying the requested position.
call: Any expression gdb can evaluate. Usually a function call.
Raises:
RuntimeError: if gdb is not being run in synchronous exec mode. | [
"Injects",
"evaluation",
"of",
"call",
"in",
"a",
"safe",
"location",
"in",
"the",
"inferior",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L557-L587 | train |
google/pyringe | pyringe/payload/gdb_service.py | GdbService._BacktraceFromFramePtr | def _BacktraceFromFramePtr(self, frame_ptr):
"""Assembles and returns what looks exactly like python's backtraces."""
# expects frame_ptr to be a gdb.Value
frame_objs = [PyFrameObjectPtr(frame) for frame
in self._IterateChainedList(frame_ptr, 'f_back')]
# We want to output tracebacks in the same format python uses, so we have to
# reverse the stack
frame_objs.reverse()
tb_strings = ['Traceback (most recent call last):']
for frame in frame_objs:
line_string = (' File "%s", line %s, in %s' %
(frame.filename(),
str(frame.current_line_num()),
frame.co_name.proxyval(set())))
tb_strings.append(line_string)
line_string = ' %s' % frame.current_line().strip()
tb_strings.append(line_string)
return '\n'.join(tb_strings) | python | def _BacktraceFromFramePtr(self, frame_ptr):
"""Assembles and returns what looks exactly like python's backtraces."""
# expects frame_ptr to be a gdb.Value
frame_objs = [PyFrameObjectPtr(frame) for frame
in self._IterateChainedList(frame_ptr, 'f_back')]
# We want to output tracebacks in the same format python uses, so we have to
# reverse the stack
frame_objs.reverse()
tb_strings = ['Traceback (most recent call last):']
for frame in frame_objs:
line_string = (' File "%s", line %s, in %s' %
(frame.filename(),
str(frame.current_line_num()),
frame.co_name.proxyval(set())))
tb_strings.append(line_string)
line_string = ' %s' % frame.current_line().strip()
tb_strings.append(line_string)
return '\n'.join(tb_strings) | [
"def",
"_BacktraceFromFramePtr",
"(",
"self",
",",
"frame_ptr",
")",
":",
"# expects frame_ptr to be a gdb.Value",
"frame_objs",
"=",
"[",
"PyFrameObjectPtr",
"(",
"frame",
")",
"for",
"frame",
"in",
"self",
".",
"_IterateChainedList",
"(",
"frame_ptr",
",",
"'f_back'",
")",
"]",
"# We want to output tracebacks in the same format python uses, so we have to",
"# reverse the stack",
"frame_objs",
".",
"reverse",
"(",
")",
"tb_strings",
"=",
"[",
"'Traceback (most recent call last):'",
"]",
"for",
"frame",
"in",
"frame_objs",
":",
"line_string",
"=",
"(",
"' File \"%s\", line %s, in %s'",
"%",
"(",
"frame",
".",
"filename",
"(",
")",
",",
"str",
"(",
"frame",
".",
"current_line_num",
"(",
")",
")",
",",
"frame",
".",
"co_name",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
")",
")",
"tb_strings",
".",
"append",
"(",
"line_string",
")",
"line_string",
"=",
"' %s'",
"%",
"frame",
".",
"current_line",
"(",
")",
".",
"strip",
"(",
")",
"tb_strings",
".",
"append",
"(",
"line_string",
")",
"return",
"'\\n'",
".",
"join",
"(",
"tb_strings",
")"
] | Assembles and returns what looks exactly like python's backtraces. | [
"Assembles",
"and",
"returns",
"what",
"looks",
"exactly",
"like",
"python",
"s",
"backtraces",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L597-L615 | train |
google/pyringe | pyringe/inferior.py | GdbProxy.Kill | def Kill(self):
"""Send death pill to Gdb and forcefully kill it if that doesn't work."""
try:
if self.is_running:
self.Detach()
if self._Execute('__kill__') == '__kill_ack__':
# acknowledged, let's give it some time to die in peace
time.sleep(0.1)
except (TimeoutError, ProxyError):
logging.debug('Termination request not acknowledged, killing gdb.')
if self.is_running:
# death pill didn't seem to work. We don't want the inferior to get killed
# the next time it hits a dangling breakpoint, so we send a SIGINT to gdb,
# which makes it disable instruction breakpoints for the time being.
os.kill(self._process.pid, signal.SIGINT)
# Since SIGINT has higher priority (with signal number 2) than SIGTERM
# (signal 15), SIGTERM cannot preempt the signal handler for SIGINT.
self._process.terminate()
self._process.wait()
self._errfile_r.close()
self._outfile_r.close() | python | def Kill(self):
"""Send death pill to Gdb and forcefully kill it if that doesn't work."""
try:
if self.is_running:
self.Detach()
if self._Execute('__kill__') == '__kill_ack__':
# acknowledged, let's give it some time to die in peace
time.sleep(0.1)
except (TimeoutError, ProxyError):
logging.debug('Termination request not acknowledged, killing gdb.')
if self.is_running:
# death pill didn't seem to work. We don't want the inferior to get killed
# the next time it hits a dangling breakpoint, so we send a SIGINT to gdb,
# which makes it disable instruction breakpoints for the time being.
os.kill(self._process.pid, signal.SIGINT)
# Since SIGINT has higher priority (with signal number 2) than SIGTERM
# (signal 15), SIGTERM cannot preempt the signal handler for SIGINT.
self._process.terminate()
self._process.wait()
self._errfile_r.close()
self._outfile_r.close() | [
"def",
"Kill",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"is_running",
":",
"self",
".",
"Detach",
"(",
")",
"if",
"self",
".",
"_Execute",
"(",
"'__kill__'",
")",
"==",
"'__kill_ack__'",
":",
"# acknowledged, let's give it some time to die in peace",
"time",
".",
"sleep",
"(",
"0.1",
")",
"except",
"(",
"TimeoutError",
",",
"ProxyError",
")",
":",
"logging",
".",
"debug",
"(",
"'Termination request not acknowledged, killing gdb.'",
")",
"if",
"self",
".",
"is_running",
":",
"# death pill didn't seem to work. We don't want the inferior to get killed",
"# the next time it hits a dangling breakpoint, so we send a SIGINT to gdb,",
"# which makes it disable instruction breakpoints for the time being.",
"os",
".",
"kill",
"(",
"self",
".",
"_process",
".",
"pid",
",",
"signal",
".",
"SIGINT",
")",
"# Since SIGINT has higher priority (with signal number 2) than SIGTERM",
"# (signal 15), SIGTERM cannot preempt the signal handler for SIGINT.",
"self",
".",
"_process",
".",
"terminate",
"(",
")",
"self",
".",
"_process",
".",
"wait",
"(",
")",
"self",
".",
"_errfile_r",
".",
"close",
"(",
")",
"self",
".",
"_outfile_r",
".",
"close",
"(",
")"
] | Send death pill to Gdb and forcefully kill it if that doesn't work. | [
"Send",
"death",
"pill",
"to",
"Gdb",
"and",
"forcefully",
"kill",
"it",
"if",
"that",
"doesn",
"t",
"work",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L202-L222 | train |
google/pyringe | pyringe/inferior.py | GdbProxy.Version | def Version():
"""Gets the version of gdb as a 3-tuple.
The gdb devs seem to think it's a good idea to make --version
output multiple lines of welcome text instead of just the actual version,
so we ignore everything it outputs after the first line.
Returns:
The installed version of gdb in the form
(<major>, <minor or None>, <micro or None>)
gdb 7.7 would hence show up as version (7,7)
"""
output = subprocess.check_output(['gdb', '--version']).split('\n')[0]
# Example output (Arch linux):
# GNU gdb (GDB) 7.7
# Example output (Debian sid):
# GNU gdb (GDB) 7.6.2 (Debian 7.6.2-1)
# Example output (Debian wheezy):
# GNU gdb (GDB) 7.4.1-debian
# Example output (centos 2.6.32):
# GNU gdb (GDB) Red Hat Enterprise Linux (7.2-56.el6)
# As we've seen in the examples above, versions may be named very liberally
# So we assume every part of that string may be the "real" version string
# and try to parse them all. This too isn't perfect (later strings will
# overwrite information gathered from previous ones), but it should be
# flexible enough for everything out there.
major = None
minor = None
micro = None
for potential_versionstring in output.split():
version = re.split('[^0-9]', potential_versionstring)
try:
major = int(version[0])
except (IndexError, ValueError):
pass
try:
minor = int(version[1])
except (IndexError, ValueError):
pass
try:
micro = int(version[2])
except (IndexError, ValueError):
pass
return (major, minor, micro) | python | def Version():
"""Gets the version of gdb as a 3-tuple.
The gdb devs seem to think it's a good idea to make --version
output multiple lines of welcome text instead of just the actual version,
so we ignore everything it outputs after the first line.
Returns:
The installed version of gdb in the form
(<major>, <minor or None>, <micro or None>)
gdb 7.7 would hence show up as version (7,7)
"""
output = subprocess.check_output(['gdb', '--version']).split('\n')[0]
# Example output (Arch linux):
# GNU gdb (GDB) 7.7
# Example output (Debian sid):
# GNU gdb (GDB) 7.6.2 (Debian 7.6.2-1)
# Example output (Debian wheezy):
# GNU gdb (GDB) 7.4.1-debian
# Example output (centos 2.6.32):
# GNU gdb (GDB) Red Hat Enterprise Linux (7.2-56.el6)
# As we've seen in the examples above, versions may be named very liberally
# So we assume every part of that string may be the "real" version string
# and try to parse them all. This too isn't perfect (later strings will
# overwrite information gathered from previous ones), but it should be
# flexible enough for everything out there.
major = None
minor = None
micro = None
for potential_versionstring in output.split():
version = re.split('[^0-9]', potential_versionstring)
try:
major = int(version[0])
except (IndexError, ValueError):
pass
try:
minor = int(version[1])
except (IndexError, ValueError):
pass
try:
micro = int(version[2])
except (IndexError, ValueError):
pass
return (major, minor, micro) | [
"def",
"Version",
"(",
")",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'gdb'",
",",
"'--version'",
"]",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
"# Example output (Arch linux):",
"# GNU gdb (GDB) 7.7",
"# Example output (Debian sid):",
"# GNU gdb (GDB) 7.6.2 (Debian 7.6.2-1)",
"# Example output (Debian wheezy):",
"# GNU gdb (GDB) 7.4.1-debian",
"# Example output (centos 2.6.32):",
"# GNU gdb (GDB) Red Hat Enterprise Linux (7.2-56.el6)",
"# As we've seen in the examples above, versions may be named very liberally",
"# So we assume every part of that string may be the \"real\" version string",
"# and try to parse them all. This too isn't perfect (later strings will",
"# overwrite information gathered from previous ones), but it should be",
"# flexible enough for everything out there.",
"major",
"=",
"None",
"minor",
"=",
"None",
"micro",
"=",
"None",
"for",
"potential_versionstring",
"in",
"output",
".",
"split",
"(",
")",
":",
"version",
"=",
"re",
".",
"split",
"(",
"'[^0-9]'",
",",
"potential_versionstring",
")",
"try",
":",
"major",
"=",
"int",
"(",
"version",
"[",
"0",
"]",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"try",
":",
"minor",
"=",
"int",
"(",
"version",
"[",
"1",
"]",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"try",
":",
"micro",
"=",
"int",
"(",
"version",
"[",
"2",
"]",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"return",
"(",
"major",
",",
"minor",
",",
"micro",
")"
] | Gets the version of gdb as a 3-tuple.
The gdb devs seem to think it's a good idea to make --version
output multiple lines of welcome text instead of just the actual version,
so we ignore everything it outputs after the first line.
Returns:
The installed version of gdb in the form
(<major>, <minor or None>, <micro or None>)
gdb 7.7 would hence show up as version (7,7) | [
"Gets",
"the",
"version",
"of",
"gdb",
"as",
"a",
"3",
"-",
"tuple",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L229-L272 | train |
google/pyringe | pyringe/inferior.py | GdbProxy._JsonDecodeDict | def _JsonDecodeDict(self, data):
"""Json object decode hook that automatically converts unicode objects."""
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = self._TryStr(key)
if isinstance(value, unicode):
value = self._TryStr(value)
elif isinstance(value, list):
value = self._JsonDecodeList(value)
rv[key] = value
if '__pyringe_type_name__' in data:
# We're looking at a proxyobject
rv = ProxyObject(rv)
return rv | python | def _JsonDecodeDict(self, data):
"""Json object decode hook that automatically converts unicode objects."""
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = self._TryStr(key)
if isinstance(value, unicode):
value = self._TryStr(value)
elif isinstance(value, list):
value = self._JsonDecodeList(value)
rv[key] = value
if '__pyringe_type_name__' in data:
# We're looking at a proxyobject
rv = ProxyObject(rv)
return rv | [
"def",
"_JsonDecodeDict",
"(",
"self",
",",
"data",
")",
":",
"rv",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"data",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"unicode",
")",
":",
"key",
"=",
"self",
".",
"_TryStr",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"unicode",
")",
":",
"value",
"=",
"self",
".",
"_TryStr",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"self",
".",
"_JsonDecodeList",
"(",
"value",
")",
"rv",
"[",
"key",
"]",
"=",
"value",
"if",
"'__pyringe_type_name__'",
"in",
"data",
":",
"# We're looking at a proxyobject",
"rv",
"=",
"ProxyObject",
"(",
"rv",
")",
"return",
"rv"
] | Json object decode hook that automatically converts unicode objects. | [
"Json",
"object",
"decode",
"hook",
"that",
"automatically",
"converts",
"unicode",
"objects",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L305-L319 | train |
google/pyringe | pyringe/inferior.py | GdbProxy._Execute | def _Execute(self, funcname, *args, **kwargs):
"""Send an RPC request to the gdb-internal python.
Blocks for 3 seconds by default and returns any results.
Args:
funcname: the name of the function to call.
*args: the function's arguments.
**kwargs: Only the key 'wait_for_completion' is inspected, which decides
whether to wait forever for completion or just 3 seconds.
Returns:
The result of the function call.
"""
wait_for_completion = kwargs.get('wait_for_completion', False)
rpc_dict = {'func': funcname, 'args': args}
self._Send(json.dumps(rpc_dict))
timeout = TIMEOUT_FOREVER if wait_for_completion else TIMEOUT_DEFAULT
result_string = self._Recv(timeout)
try:
result = json.loads(result_string, object_hook=self._JsonDecodeDict)
if isinstance(result, unicode):
result = self._TryStr(result)
elif isinstance(result, list):
result = self._JsonDecodeList(result)
except ValueError:
raise ValueError('Response JSON invalid: ' + str(result_string))
except TypeError:
raise ValueError('Response JSON invalid: ' + str(result_string))
return result | python | def _Execute(self, funcname, *args, **kwargs):
"""Send an RPC request to the gdb-internal python.
Blocks for 3 seconds by default and returns any results.
Args:
funcname: the name of the function to call.
*args: the function's arguments.
**kwargs: Only the key 'wait_for_completion' is inspected, which decides
whether to wait forever for completion or just 3 seconds.
Returns:
The result of the function call.
"""
wait_for_completion = kwargs.get('wait_for_completion', False)
rpc_dict = {'func': funcname, 'args': args}
self._Send(json.dumps(rpc_dict))
timeout = TIMEOUT_FOREVER if wait_for_completion else TIMEOUT_DEFAULT
result_string = self._Recv(timeout)
try:
result = json.loads(result_string, object_hook=self._JsonDecodeDict)
if isinstance(result, unicode):
result = self._TryStr(result)
elif isinstance(result, list):
result = self._JsonDecodeList(result)
except ValueError:
raise ValueError('Response JSON invalid: ' + str(result_string))
except TypeError:
raise ValueError('Response JSON invalid: ' + str(result_string))
return result | [
"def",
"_Execute",
"(",
"self",
",",
"funcname",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"wait_for_completion",
"=",
"kwargs",
".",
"get",
"(",
"'wait_for_completion'",
",",
"False",
")",
"rpc_dict",
"=",
"{",
"'func'",
":",
"funcname",
",",
"'args'",
":",
"args",
"}",
"self",
".",
"_Send",
"(",
"json",
".",
"dumps",
"(",
"rpc_dict",
")",
")",
"timeout",
"=",
"TIMEOUT_FOREVER",
"if",
"wait_for_completion",
"else",
"TIMEOUT_DEFAULT",
"result_string",
"=",
"self",
".",
"_Recv",
"(",
"timeout",
")",
"try",
":",
"result",
"=",
"json",
".",
"loads",
"(",
"result_string",
",",
"object_hook",
"=",
"self",
".",
"_JsonDecodeDict",
")",
"if",
"isinstance",
"(",
"result",
",",
"unicode",
")",
":",
"result",
"=",
"self",
".",
"_TryStr",
"(",
"result",
")",
"elif",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"result",
"=",
"self",
".",
"_JsonDecodeList",
"(",
"result",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Response JSON invalid: '",
"+",
"str",
"(",
"result_string",
")",
")",
"except",
"TypeError",
":",
"raise",
"ValueError",
"(",
"'Response JSON invalid: '",
"+",
"str",
"(",
"result_string",
")",
")",
"return",
"result"
] | Send an RPC request to the gdb-internal python.
Blocks for 3 seconds by default and returns any results.
Args:
funcname: the name of the function to call.
*args: the function's arguments.
**kwargs: Only the key 'wait_for_completion' is inspected, which decides
whether to wait forever for completion or just 3 seconds.
Returns:
The result of the function call. | [
"Send",
"an",
"RPC",
"request",
"to",
"the",
"gdb",
"-",
"internal",
"python",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L325-L355 | train |
google/pyringe | pyringe/inferior.py | GdbProxy._Recv | def _Recv(self, timeout):
"""Receive output from gdb.
This reads gdb's stdout and stderr streams, returns a single line of gdb's
stdout or rethrows any exceptions thrown from within gdb as well as it can.
Args:
timeout: floating point number of seconds after which to abort.
A value of None or TIMEOUT_FOREVER means "there is no timeout", i.e.
this might block forever.
Raises:
ProxyError: All exceptions received from the gdb service are generically
reraised as this.
TimeoutError: Raised if no answer is received from gdb in after the
specified time.
Returns:
The current contents of gdb's stdout buffer, read until the next newline,
or `None`, should the read fail or timeout.
"""
buf = ''
# The messiness of this stems from the "duck-typiness" of this function.
# The timeout parameter of poll has different semantics depending on whether
# it's <=0, >0, or None. Yay.
wait_for_line = timeout is TIMEOUT_FOREVER
deadline = time.time() + (timeout if not wait_for_line else 0)
def TimeLeft():
return max(1000 * (deadline - time.time()), 0)
continue_reading = True
while continue_reading:
poll_timeout = None if wait_for_line else TimeLeft()
fd_list = [event[0] for event in self._poller.poll(poll_timeout)
if event[1] & (select.POLLIN | select.POLLPRI)]
if not wait_for_line and TimeLeft() == 0:
continue_reading = False
if self._outfile_r.fileno() in fd_list:
buf += self._outfile_r.readline()
if buf.endswith('\n'):
return buf
# GDB-internal exception passing
if self._errfile_r.fileno() in fd_list:
exc = self._errfile_r.readline()
if exc:
exc_text = '\n-----------------------------------\n'
exc_text += 'Error occurred within GdbService:\n'
try:
exc_text += json.loads(exc)
except ValueError:
# whatever we got back wasn't valid JSON.
# This usually means we've run into an exception before the special
# exception handling was turned on. The first line we read up there
# will have been "Traceback (most recent call last):". Obviously, we
# want the rest, too, so we wait a bit and read it.
deadline = time.time() + 0.5
while self.is_running and TimeLeft() > 0:
exc += self._errfile_r.read()
try:
exc_text += json.loads(exc)
except ValueError:
exc_text = exc
raise ProxyError(exc_text)
# timeout
raise TimeoutError() | python | def _Recv(self, timeout):
"""Receive output from gdb.
This reads gdb's stdout and stderr streams, returns a single line of gdb's
stdout or rethrows any exceptions thrown from within gdb as well as it can.
Args:
timeout: floating point number of seconds after which to abort.
A value of None or TIMEOUT_FOREVER means "there is no timeout", i.e.
this might block forever.
Raises:
ProxyError: All exceptions received from the gdb service are generically
reraised as this.
TimeoutError: Raised if no answer is received from gdb in after the
specified time.
Returns:
The current contents of gdb's stdout buffer, read until the next newline,
or `None`, should the read fail or timeout.
"""
buf = ''
# The messiness of this stems from the "duck-typiness" of this function.
# The timeout parameter of poll has different semantics depending on whether
# it's <=0, >0, or None. Yay.
wait_for_line = timeout is TIMEOUT_FOREVER
deadline = time.time() + (timeout if not wait_for_line else 0)
def TimeLeft():
return max(1000 * (deadline - time.time()), 0)
continue_reading = True
while continue_reading:
poll_timeout = None if wait_for_line else TimeLeft()
fd_list = [event[0] for event in self._poller.poll(poll_timeout)
if event[1] & (select.POLLIN | select.POLLPRI)]
if not wait_for_line and TimeLeft() == 0:
continue_reading = False
if self._outfile_r.fileno() in fd_list:
buf += self._outfile_r.readline()
if buf.endswith('\n'):
return buf
# GDB-internal exception passing
if self._errfile_r.fileno() in fd_list:
exc = self._errfile_r.readline()
if exc:
exc_text = '\n-----------------------------------\n'
exc_text += 'Error occurred within GdbService:\n'
try:
exc_text += json.loads(exc)
except ValueError:
# whatever we got back wasn't valid JSON.
# This usually means we've run into an exception before the special
# exception handling was turned on. The first line we read up there
# will have been "Traceback (most recent call last):". Obviously, we
# want the rest, too, so we wait a bit and read it.
deadline = time.time() + 0.5
while self.is_running and TimeLeft() > 0:
exc += self._errfile_r.read()
try:
exc_text += json.loads(exc)
except ValueError:
exc_text = exc
raise ProxyError(exc_text)
# timeout
raise TimeoutError() | [
"def",
"_Recv",
"(",
"self",
",",
"timeout",
")",
":",
"buf",
"=",
"''",
"# The messiness of this stems from the \"duck-typiness\" of this function.",
"# The timeout parameter of poll has different semantics depending on whether",
"# it's <=0, >0, or None. Yay.",
"wait_for_line",
"=",
"timeout",
"is",
"TIMEOUT_FOREVER",
"deadline",
"=",
"time",
".",
"time",
"(",
")",
"+",
"(",
"timeout",
"if",
"not",
"wait_for_line",
"else",
"0",
")",
"def",
"TimeLeft",
"(",
")",
":",
"return",
"max",
"(",
"1000",
"*",
"(",
"deadline",
"-",
"time",
".",
"time",
"(",
")",
")",
",",
"0",
")",
"continue_reading",
"=",
"True",
"while",
"continue_reading",
":",
"poll_timeout",
"=",
"None",
"if",
"wait_for_line",
"else",
"TimeLeft",
"(",
")",
"fd_list",
"=",
"[",
"event",
"[",
"0",
"]",
"for",
"event",
"in",
"self",
".",
"_poller",
".",
"poll",
"(",
"poll_timeout",
")",
"if",
"event",
"[",
"1",
"]",
"&",
"(",
"select",
".",
"POLLIN",
"|",
"select",
".",
"POLLPRI",
")",
"]",
"if",
"not",
"wait_for_line",
"and",
"TimeLeft",
"(",
")",
"==",
"0",
":",
"continue_reading",
"=",
"False",
"if",
"self",
".",
"_outfile_r",
".",
"fileno",
"(",
")",
"in",
"fd_list",
":",
"buf",
"+=",
"self",
".",
"_outfile_r",
".",
"readline",
"(",
")",
"if",
"buf",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"return",
"buf",
"# GDB-internal exception passing",
"if",
"self",
".",
"_errfile_r",
".",
"fileno",
"(",
")",
"in",
"fd_list",
":",
"exc",
"=",
"self",
".",
"_errfile_r",
".",
"readline",
"(",
")",
"if",
"exc",
":",
"exc_text",
"=",
"'\\n-----------------------------------\\n'",
"exc_text",
"+=",
"'Error occurred within GdbService:\\n'",
"try",
":",
"exc_text",
"+=",
"json",
".",
"loads",
"(",
"exc",
")",
"except",
"ValueError",
":",
"# whatever we got back wasn't valid JSON.",
"# This usually means we've run into an exception before the special",
"# exception handling was turned on. The first line we read up there",
"# will have been \"Traceback (most recent call last):\". Obviously, we",
"# want the rest, too, so we wait a bit and read it.",
"deadline",
"=",
"time",
".",
"time",
"(",
")",
"+",
"0.5",
"while",
"self",
".",
"is_running",
"and",
"TimeLeft",
"(",
")",
">",
"0",
":",
"exc",
"+=",
"self",
".",
"_errfile_r",
".",
"read",
"(",
")",
"try",
":",
"exc_text",
"+=",
"json",
".",
"loads",
"(",
"exc",
")",
"except",
"ValueError",
":",
"exc_text",
"=",
"exc",
"raise",
"ProxyError",
"(",
"exc_text",
")",
"# timeout",
"raise",
"TimeoutError",
"(",
")"
] | Receive output from gdb.
This reads gdb's stdout and stderr streams, returns a single line of gdb's
stdout or rethrows any exceptions thrown from within gdb as well as it can.
Args:
timeout: floating point number of seconds after which to abort.
A value of None or TIMEOUT_FOREVER means "there is no timeout", i.e.
this might block forever.
Raises:
ProxyError: All exceptions received from the gdb service are generically
reraised as this.
TimeoutError: Raised if no answer is received from gdb in after the
specified time.
Returns:
The current contents of gdb's stdout buffer, read until the next newline,
or `None`, should the read fail or timeout. | [
"Receive",
"output",
"from",
"gdb",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L361-L429 | train |
google/pyringe | pyringe/inferior.py | Inferior.needsattached | def needsattached(func):
"""Decorator to prevent commands from being used when not attached."""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not self.attached:
raise PositionError('Not attached to any process.')
return func(self, *args, **kwargs)
return wrap | python | def needsattached(func):
"""Decorator to prevent commands from being used when not attached."""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not self.attached:
raise PositionError('Not attached to any process.')
return func(self, *args, **kwargs)
return wrap | [
"def",
"needsattached",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrap",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"attached",
":",
"raise",
"PositionError",
"(",
"'Not attached to any process.'",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrap"
] | Decorator to prevent commands from being used when not attached. | [
"Decorator",
"to",
"prevent",
"commands",
"from",
"being",
"used",
"when",
"not",
"attached",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L458-L466 | train |
google/pyringe | pyringe/inferior.py | Inferior.Reinit | def Reinit(self, pid, auto_symfile_loading=True):
"""Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb.
"""
self.ShutDownGdb()
self.__init__(pid, auto_symfile_loading, architecture=self.arch) | python | def Reinit(self, pid, auto_symfile_loading=True):
"""Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb.
"""
self.ShutDownGdb()
self.__init__(pid, auto_symfile_loading, architecture=self.arch) | [
"def",
"Reinit",
"(",
"self",
",",
"pid",
",",
"auto_symfile_loading",
"=",
"True",
")",
":",
"self",
".",
"ShutDownGdb",
"(",
")",
"self",
".",
"__init__",
"(",
"pid",
",",
"auto_symfile_loading",
",",
"architecture",
"=",
"self",
".",
"arch",
")"
] | Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb. | [
"Reinitializes",
"the",
"object",
"with",
"a",
"new",
"pid",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L472-L484 | train |
google/pyringe | pyringe/plugins/inject.py | InjectPlugin.InjectString | def InjectString(self, codestring, wait_for_completion=True):
"""Try to inject python code into current thread.
Args:
codestring: Python snippet to execute in inferior. (may contain newlines)
wait_for_completion: Block until execution of snippet has completed.
"""
if self.inferior.is_running and self.inferior.gdb.IsAttached():
try:
self.inferior.gdb.InjectString(
self.inferior.position,
codestring,
wait_for_completion=wait_for_completion)
except RuntimeError:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
else:
logging.error('Not attached to any process.') | python | def InjectString(self, codestring, wait_for_completion=True):
"""Try to inject python code into current thread.
Args:
codestring: Python snippet to execute in inferior. (may contain newlines)
wait_for_completion: Block until execution of snippet has completed.
"""
if self.inferior.is_running and self.inferior.gdb.IsAttached():
try:
self.inferior.gdb.InjectString(
self.inferior.position,
codestring,
wait_for_completion=wait_for_completion)
except RuntimeError:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
else:
logging.error('Not attached to any process.') | [
"def",
"InjectString",
"(",
"self",
",",
"codestring",
",",
"wait_for_completion",
"=",
"True",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
"and",
"self",
".",
"inferior",
".",
"gdb",
".",
"IsAttached",
"(",
")",
":",
"try",
":",
"self",
".",
"inferior",
".",
"gdb",
".",
"InjectString",
"(",
"self",
".",
"inferior",
".",
"position",
",",
"codestring",
",",
"wait_for_completion",
"=",
"wait_for_completion",
")",
"except",
"RuntimeError",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"traceback",
".",
"print_exception",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
"else",
":",
"logging",
".",
"error",
"(",
"'Not attached to any process.'",
")"
] | Try to inject python code into current thread.
Args:
codestring: Python snippet to execute in inferior. (may contain newlines)
wait_for_completion: Block until execution of snippet has completed. | [
"Try",
"to",
"inject",
"python",
"code",
"into",
"current",
"thread",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/inject.py#L40-L57 | train |
google/pyringe | pyringe/payload/libpython.py | PyObjectPtr.field | def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
try:
# Python 2:
return self._gdbval.dereference()[name]
except RuntimeError:
# Python 3:
return self._gdbval.dereference()['ob_base'][name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name] | python | def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
try:
# Python 2:
return self._gdbval.dereference()[name]
except RuntimeError:
# Python 3:
return self._gdbval.dereference()['ob_base'][name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name] | [
"def",
"field",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"is_null",
"(",
")",
":",
"raise",
"NullPyObjectPtr",
"(",
"self",
")",
"if",
"name",
"==",
"'ob_type'",
":",
"pyo_ptr",
"=",
"self",
".",
"_gdbval",
".",
"cast",
"(",
"PyObjectPtr",
".",
"get_gdb_type",
"(",
")",
")",
"return",
"pyo_ptr",
".",
"dereference",
"(",
")",
"[",
"name",
"]",
"if",
"name",
"==",
"'ob_size'",
":",
"try",
":",
"# Python 2:",
"return",
"self",
".",
"_gdbval",
".",
"dereference",
"(",
")",
"[",
"name",
"]",
"except",
"RuntimeError",
":",
"# Python 3:",
"return",
"self",
".",
"_gdbval",
".",
"dereference",
"(",
")",
"[",
"'ob_base'",
"]",
"[",
"name",
"]",
"# General case: look it up inside the object:",
"return",
"self",
".",
"_gdbval",
".",
"dereference",
"(",
")",
"[",
"name",
"]"
] | Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*). | [
"Get",
"the",
"gdb",
".",
"Value",
"for",
"the",
"given",
"field",
"within",
"the",
"PyObject",
"coping",
"with",
"some",
"python",
"2",
"versus",
"python",
"3",
"differences",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L131-L163 | train |
google/pyringe | pyringe/payload/libpython.py | PyObjectPtr.write_repr | def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited))) | python | def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited))) | [
"def",
"write_repr",
"(",
"self",
",",
"out",
",",
"visited",
")",
":",
"# Default implementation: generate a proxy value and write its repr",
"# However, this could involve a lot of work for complicated objects,",
"# so for derived classes we specialize this",
"return",
"out",
".",
"write",
"(",
"repr",
"(",
"self",
".",
"proxyval",
"(",
"visited",
")",
")",
")"
] | Write a string representation of the value scraped from the inferior
process to "out", a file-like object. | [
"Write",
"a",
"string",
"representation",
"of",
"the",
"value",
"scraped",
"from",
"the",
"inferior",
"process",
"to",
"out",
"a",
"file",
"-",
"like",
"object",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L262-L270 | train |
google/pyringe | pyringe/payload/libpython.py | PyObjectPtr.from_pyobject_ptr | def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval) | python | def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval) | [
"def",
"from_pyobject_ptr",
"(",
"cls",
",",
"gdbval",
")",
":",
"try",
":",
"p",
"=",
"PyObjectPtr",
"(",
"gdbval",
")",
"cls",
"=",
"cls",
".",
"subclass_from_type",
"(",
"p",
".",
"type",
"(",
")",
")",
"return",
"cls",
"(",
"gdbval",
",",
"cast_to",
"=",
"cls",
".",
"get_gdb_type",
"(",
")",
")",
"except",
"RuntimeError",
":",
"# Handle any kind of error e.g. NULL ptrs by simply using the base",
"# class",
"pass",
"return",
"cls",
"(",
"gdbval",
")"
] | Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly. | [
"Try",
"to",
"locate",
"the",
"appropriate",
"derived",
"class",
"dynamically",
"and",
"cast",
"the",
"pointer",
"accordingly",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L340-L353 | train |
google/pyringe | pyringe/payload/libpython.py | HeapTypeObjectPtr.proxyval | def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval)) | python | def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval)) | [
"def",
"proxyval",
"(",
"self",
",",
"visited",
")",
":",
"# Guard against infinite loops:",
"if",
"self",
".",
"as_address",
"(",
")",
"in",
"visited",
":",
"return",
"ProxyAlreadyVisited",
"(",
"'<...>'",
")",
"visited",
".",
"add",
"(",
"self",
".",
"as_address",
"(",
")",
")",
"pyop_attr_dict",
"=",
"self",
".",
"get_attr_dict",
"(",
")",
"if",
"pyop_attr_dict",
":",
"attr_dict",
"=",
"pyop_attr_dict",
".",
"proxyval",
"(",
"visited",
")",
"else",
":",
"attr_dict",
"=",
"{",
"}",
"tp_name",
"=",
"self",
".",
"safe_tp_name",
"(",
")",
"# New-style class:",
"return",
"InstanceProxy",
"(",
"tp_name",
",",
"attr_dict",
",",
"long",
"(",
"self",
".",
"_gdbval",
")",
")"
] | Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors | [
"Support",
"for",
"new",
"-",
"style",
"classes",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L459-L479 | train |
google/pyringe | pyringe/payload/libpython.py | PyCodeObjectPtr.addr2line | def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno | python | def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno | [
"def",
"addr2line",
"(",
"self",
",",
"addrq",
")",
":",
"co_lnotab",
"=",
"self",
".",
"pyop_field",
"(",
"'co_lnotab'",
")",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"# Initialize lineno to co_firstlineno as per PyCode_Addr2Line",
"# not 0, as lnotab_notes.txt has it:",
"lineno",
"=",
"int_from_int",
"(",
"self",
".",
"field",
"(",
"'co_firstlineno'",
")",
")",
"addr",
"=",
"0",
"for",
"addr_incr",
",",
"line_incr",
"in",
"zip",
"(",
"co_lnotab",
"[",
":",
":",
"2",
"]",
",",
"co_lnotab",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"addr",
"+=",
"ord",
"(",
"addr_incr",
")",
"if",
"addr",
">",
"addrq",
":",
"return",
"lineno",
"lineno",
"+=",
"ord",
"(",
"line_incr",
")",
"return",
"lineno"
] | Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt | [
"Get",
"the",
"line",
"number",
"for",
"a",
"given",
"bytecode",
"offset"
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L592-L611 | train |
google/pyringe | pyringe/payload/libpython.py | PyFrameObjectPtr.current_line | def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
with open(self.filename(), 'r') as f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1] | python | def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
with open(self.filename(), 'r') as f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1] | [
"def",
"current_line",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_optimized_out",
"(",
")",
":",
"return",
"'(frame information optimized out)'",
"with",
"open",
"(",
"self",
".",
"filename",
"(",
")",
",",
"'r'",
")",
"as",
"f",
":",
"all_lines",
"=",
"f",
".",
"readlines",
"(",
")",
"# Convert from 1-based current_line_num to 0-based list offset:",
"return",
"all_lines",
"[",
"self",
".",
"current_line_num",
"(",
")",
"-",
"1",
"]"
] | Get the text of the current source line as a string, with a trailing
newline character | [
"Get",
"the",
"text",
"of",
"the",
"current",
"source",
"line",
"as",
"a",
"string",
"with",
"a",
"trailing",
"newline",
"character"
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L889-L897 | train |
google/pyringe | pyringe/payload/libpython.py | Frame.select | def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True | python | def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True | [
"def",
"select",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
".",
"_gdbframe",
",",
"'select'",
")",
":",
"print",
"(",
"'Unable to select frame: '",
"'this build of gdb does not expose a gdb.Frame.select method'",
")",
"return",
"False",
"self",
".",
"_gdbframe",
".",
"select",
"(",
")",
"return",
"True"
] | If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot | [
"If",
"supported",
"select",
"this",
"frame",
"and",
"return",
"True",
";",
"return",
"False",
"if",
"unsupported"
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L1173-L1183 | train |
google/pyringe | pyringe/payload/libpython.py | Frame.get_index | def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index | python | def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index | [
"def",
"get_index",
"(",
"self",
")",
":",
"index",
"=",
"0",
"# Go down until you reach the newest frame:",
"iter_frame",
"=",
"self",
"while",
"iter_frame",
".",
"newer",
"(",
")",
":",
"index",
"+=",
"1",
"iter_frame",
"=",
"iter_frame",
".",
"newer",
"(",
")",
"return",
"index"
] | Calculate index of frame, starting at 0 for the newest frame within
this thread | [
"Calculate",
"index",
"of",
"frame",
"starting",
"at",
"0",
"for",
"the",
"newest",
"frame",
"within",
"this",
"thread"
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L1185-L1194 | train |
google/pyringe | pyringe/payload/libpython.py | Frame.is_evalframeex | def is_evalframeex(self):
'''Is this a PyEval_EvalFrameEx frame?'''
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a PyEval_EvalFrameEx frame:
return True
return False | python | def is_evalframeex(self):
'''Is this a PyEval_EvalFrameEx frame?'''
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a PyEval_EvalFrameEx frame:
return True
return False | [
"def",
"is_evalframeex",
"(",
"self",
")",
":",
"if",
"self",
".",
"_gdbframe",
".",
"name",
"(",
")",
"==",
"'PyEval_EvalFrameEx'",
":",
"'''\n I believe we also need to filter on the inline\n struct frame_id.inline_depth, only regarding frames with\n an inline depth of 0 as actually being this function\n\n So we reject those with type gdb.INLINE_FRAME\n '''",
"if",
"self",
".",
"_gdbframe",
".",
"type",
"(",
")",
"==",
"gdb",
".",
"NORMAL_FRAME",
":",
"# We have a PyEval_EvalFrameEx frame:",
"return",
"True",
"return",
"False"
] | Is this a PyEval_EvalFrameEx frame? | [
"Is",
"this",
"a",
"PyEval_EvalFrameEx",
"frame?"
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L1196-L1210 | train |
google/pyringe | pyringe/payload/libpython.py | Frame.get_selected_python_frame | def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python code in the selected frame,
or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None | python | def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python code in the selected frame,
or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None | [
"def",
"get_selected_python_frame",
"(",
"cls",
")",
":",
"frame",
"=",
"cls",
".",
"get_selected_frame",
"(",
")",
"while",
"frame",
":",
"if",
"frame",
".",
"is_evalframeex",
"(",
")",
":",
"return",
"frame",
"frame",
"=",
"frame",
".",
"older",
"(",
")",
"# Not found:",
"return",
"None"
] | Try to obtain the Frame for the python code in the selected frame,
or None | [
"Try",
"to",
"obtain",
"the",
"Frame",
"for",
"the",
"python",
"code",
"in",
"the",
"selected",
"frame",
"or",
"None"
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L1240-L1251 | train |
google/pyringe | pyringe/repl.py | DebuggingConsole.ListCommands | def ListCommands(self):
"""Print a list of currently available commands and their descriptions."""
print 'Available commands:'
commands = dict(self.commands)
for plugin in self.plugins:
commands.update(plugin.commands)
for com in sorted(commands):
if not com.startswith('_'):
self.PrintHelpTextLine(com, commands[com]) | python | def ListCommands(self):
"""Print a list of currently available commands and their descriptions."""
print 'Available commands:'
commands = dict(self.commands)
for plugin in self.plugins:
commands.update(plugin.commands)
for com in sorted(commands):
if not com.startswith('_'):
self.PrintHelpTextLine(com, commands[com]) | [
"def",
"ListCommands",
"(",
"self",
")",
":",
"print",
"'Available commands:'",
"commands",
"=",
"dict",
"(",
"self",
".",
"commands",
")",
"for",
"plugin",
"in",
"self",
".",
"plugins",
":",
"commands",
".",
"update",
"(",
"plugin",
".",
"commands",
")",
"for",
"com",
"in",
"sorted",
"(",
"commands",
")",
":",
"if",
"not",
"com",
".",
"startswith",
"(",
"'_'",
")",
":",
"self",
".",
"PrintHelpTextLine",
"(",
"com",
",",
"commands",
"[",
"com",
"]",
")"
] | Print a list of currently available commands and their descriptions. | [
"Print",
"a",
"list",
"of",
"currently",
"available",
"commands",
"and",
"their",
"descriptions",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/repl.py#L102-L110 | train |
google/pyringe | pyringe/repl.py | DebuggingConsole.StatusLine | def StatusLine(self):
"""Generate the colored line indicating plugin status."""
pid = self.inferior.pid
curthread = None
threadnum = 0
if pid:
if not self.inferior.is_running:
logging.warning('Inferior is not running.')
self.Detach()
pid = None
else:
try:
# get a gdb running if it wasn't already.
if not self.inferior.attached:
self.inferior.StartGdb()
curthread = self.inferior.current_thread
threadnum = len(self.inferior.threads)
except (inferior.ProxyError,
inferior.TimeoutError,
inferior.PositionError) as err:
# This is not the kind of thing we want to be held up by
logging.debug('Error while getting information in status line:%s'
% err.message)
pass
status = ('==> pid:[%s] #threads:[%s] current thread:[%s]' %
(pid, threadnum, curthread))
return status | python | def StatusLine(self):
"""Generate the colored line indicating plugin status."""
pid = self.inferior.pid
curthread = None
threadnum = 0
if pid:
if not self.inferior.is_running:
logging.warning('Inferior is not running.')
self.Detach()
pid = None
else:
try:
# get a gdb running if it wasn't already.
if not self.inferior.attached:
self.inferior.StartGdb()
curthread = self.inferior.current_thread
threadnum = len(self.inferior.threads)
except (inferior.ProxyError,
inferior.TimeoutError,
inferior.PositionError) as err:
# This is not the kind of thing we want to be held up by
logging.debug('Error while getting information in status line:%s'
% err.message)
pass
status = ('==> pid:[%s] #threads:[%s] current thread:[%s]' %
(pid, threadnum, curthread))
return status | [
"def",
"StatusLine",
"(",
"self",
")",
":",
"pid",
"=",
"self",
".",
"inferior",
".",
"pid",
"curthread",
"=",
"None",
"threadnum",
"=",
"0",
"if",
"pid",
":",
"if",
"not",
"self",
".",
"inferior",
".",
"is_running",
":",
"logging",
".",
"warning",
"(",
"'Inferior is not running.'",
")",
"self",
".",
"Detach",
"(",
")",
"pid",
"=",
"None",
"else",
":",
"try",
":",
"# get a gdb running if it wasn't already.",
"if",
"not",
"self",
".",
"inferior",
".",
"attached",
":",
"self",
".",
"inferior",
".",
"StartGdb",
"(",
")",
"curthread",
"=",
"self",
".",
"inferior",
".",
"current_thread",
"threadnum",
"=",
"len",
"(",
"self",
".",
"inferior",
".",
"threads",
")",
"except",
"(",
"inferior",
".",
"ProxyError",
",",
"inferior",
".",
"TimeoutError",
",",
"inferior",
".",
"PositionError",
")",
"as",
"err",
":",
"# This is not the kind of thing we want to be held up by",
"logging",
".",
"debug",
"(",
"'Error while getting information in status line:%s'",
"%",
"err",
".",
"message",
")",
"pass",
"status",
"=",
"(",
"'==> pid:[%s] #threads:[%s] current thread:[%s]'",
"%",
"(",
"pid",
",",
"threadnum",
",",
"curthread",
")",
")",
"return",
"status"
] | Generate the colored line indicating plugin status. | [
"Generate",
"the",
"colored",
"line",
"indicating",
"plugin",
"status",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/repl.py#L121-L147 | train |
google/pyringe | pyringe/repl.py | DebuggingConsole.Attach | def Attach(self, pid):
"""Attach to the process with the given pid."""
if self.inferior.is_running:
answer = raw_input('Already attached to process ' +
str(self.inferior.pid) +
'. Detach? [y]/n ')
if answer and answer != 'y' and answer != 'yes':
return None
self.Detach()
# Whatever position we had before will not make any sense now
for plugin in self.plugins:
plugin.position = None
self.inferior.Reinit(pid) | python | def Attach(self, pid):
"""Attach to the process with the given pid."""
if self.inferior.is_running:
answer = raw_input('Already attached to process ' +
str(self.inferior.pid) +
'. Detach? [y]/n ')
if answer and answer != 'y' and answer != 'yes':
return None
self.Detach()
# Whatever position we had before will not make any sense now
for plugin in self.plugins:
plugin.position = None
self.inferior.Reinit(pid) | [
"def",
"Attach",
"(",
"self",
",",
"pid",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
":",
"answer",
"=",
"raw_input",
"(",
"'Already attached to process '",
"+",
"str",
"(",
"self",
".",
"inferior",
".",
"pid",
")",
"+",
"'. Detach? [y]/n '",
")",
"if",
"answer",
"and",
"answer",
"!=",
"'y'",
"and",
"answer",
"!=",
"'yes'",
":",
"return",
"None",
"self",
".",
"Detach",
"(",
")",
"# Whatever position we had before will not make any sense now",
"for",
"plugin",
"in",
"self",
".",
"plugins",
":",
"plugin",
".",
"position",
"=",
"None",
"self",
".",
"inferior",
".",
"Reinit",
"(",
"pid",
")"
] | Attach to the process with the given pid. | [
"Attach",
"to",
"the",
"process",
"with",
"the",
"given",
"pid",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/repl.py#L149-L161 | train |
google/pyringe | pyringe/plugins/gdb_shell.py | GdbPlugin.StartGdb | def StartGdb(self):
"""Hands control over to a new gdb process."""
if self.inferior.is_running:
self.inferior.ShutDownGdb()
program_arg = 'program %d ' % self.inferior.pid
else:
program_arg = ''
os.system('gdb ' + program_arg + ' '.join(self.gdb_args))
reset_position = raw_input('Reset debugger position? [y]/n ')
if not reset_position or reset_position == 'y' or reset_position == 'yes':
self.position = None | python | def StartGdb(self):
"""Hands control over to a new gdb process."""
if self.inferior.is_running:
self.inferior.ShutDownGdb()
program_arg = 'program %d ' % self.inferior.pid
else:
program_arg = ''
os.system('gdb ' + program_arg + ' '.join(self.gdb_args))
reset_position = raw_input('Reset debugger position? [y]/n ')
if not reset_position or reset_position == 'y' or reset_position == 'yes':
self.position = None | [
"def",
"StartGdb",
"(",
"self",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
":",
"self",
".",
"inferior",
".",
"ShutDownGdb",
"(",
")",
"program_arg",
"=",
"'program %d '",
"%",
"self",
".",
"inferior",
".",
"pid",
"else",
":",
"program_arg",
"=",
"''",
"os",
".",
"system",
"(",
"'gdb '",
"+",
"program_arg",
"+",
"' '",
".",
"join",
"(",
"self",
".",
"gdb_args",
")",
")",
"reset_position",
"=",
"raw_input",
"(",
"'Reset debugger position? [y]/n '",
")",
"if",
"not",
"reset_position",
"or",
"reset_position",
"==",
"'y'",
"or",
"reset_position",
"==",
"'yes'",
":",
"self",
".",
"position",
"=",
"None"
] | Hands control over to a new gdb process. | [
"Hands",
"control",
"over",
"to",
"a",
"new",
"gdb",
"process",
"."
] | 76dff5d1ac29cd5e7bf32677654a83291a15ad8a | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/gdb_shell.py#L38-L48 | train |
WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.__get_node | def __get_node(self, word):
"""
Private function retrieving a final node of trie
for given word
Returns node or None, if the trie doesn't contain the word.
"""
node = self.root
for c in word:
try:
node = node.children[c]
except KeyError:
return None
return node | python | def __get_node(self, word):
"""
Private function retrieving a final node of trie
for given word
Returns node or None, if the trie doesn't contain the word.
"""
node = self.root
for c in word:
try:
node = node.children[c]
except KeyError:
return None
return node | [
"def",
"__get_node",
"(",
"self",
",",
"word",
")",
":",
"node",
"=",
"self",
".",
"root",
"for",
"c",
"in",
"word",
":",
"try",
":",
"node",
"=",
"node",
".",
"children",
"[",
"c",
"]",
"except",
"KeyError",
":",
"return",
"None",
"return",
"node"
] | Private function retrieving a final node of trie
for given word
Returns node or None, if the trie doesn't contain the word. | [
"Private",
"function",
"retrieving",
"a",
"final",
"node",
"of",
"trie",
"for",
"given",
"word"
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L55-L70 | train |
WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.get | def get(self, word, default=nil):
"""
Retrieves output value associated with word.
If there is no word returns default value,
and if default is not given rises KeyError.
"""
node = self.__get_node(word)
output = nil
if node:
output = node.output
if output is nil:
if default is nil:
raise KeyError("no key '%s'" % word)
else:
return default
else:
return output | python | def get(self, word, default=nil):
"""
Retrieves output value associated with word.
If there is no word returns default value,
and if default is not given rises KeyError.
"""
node = self.__get_node(word)
output = nil
if node:
output = node.output
if output is nil:
if default is nil:
raise KeyError("no key '%s'" % word)
else:
return default
else:
return output | [
"def",
"get",
"(",
"self",
",",
"word",
",",
"default",
"=",
"nil",
")",
":",
"node",
"=",
"self",
".",
"__get_node",
"(",
"word",
")",
"output",
"=",
"nil",
"if",
"node",
":",
"output",
"=",
"node",
".",
"output",
"if",
"output",
"is",
"nil",
":",
"if",
"default",
"is",
"nil",
":",
"raise",
"KeyError",
"(",
"\"no key '%s'\"",
"%",
"word",
")",
"else",
":",
"return",
"default",
"else",
":",
"return",
"output"
] | Retrieves output value associated with word.
If there is no word returns default value,
and if default is not given rises KeyError. | [
"Retrieves",
"output",
"value",
"associated",
"with",
"word",
"."
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L73-L92 | train |
WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.items | def items(self):
"""
Generator returning all keys and values stored in a trie.
"""
L = []
def aux(node, s):
s = s + node.char
if node.output is not nil:
L.append((s, node.output))
for child in node.children.values():
if child is not node:
aux(child, s)
aux(self.root, '')
return iter(L) | python | def items(self):
"""
Generator returning all keys and values stored in a trie.
"""
L = []
def aux(node, s):
s = s + node.char
if node.output is not nil:
L.append((s, node.output))
for child in node.children.values():
if child is not node:
aux(child, s)
aux(self.root, '')
return iter(L) | [
"def",
"items",
"(",
"self",
")",
":",
"L",
"=",
"[",
"]",
"def",
"aux",
"(",
"node",
",",
"s",
")",
":",
"s",
"=",
"s",
"+",
"node",
".",
"char",
"if",
"node",
".",
"output",
"is",
"not",
"nil",
":",
"L",
".",
"append",
"(",
"(",
"s",
",",
"node",
".",
"output",
")",
")",
"for",
"child",
"in",
"node",
".",
"children",
".",
"values",
"(",
")",
":",
"if",
"child",
"is",
"not",
"node",
":",
"aux",
"(",
"child",
",",
"s",
")",
"aux",
"(",
"self",
".",
"root",
",",
"''",
")",
"return",
"iter",
"(",
"L",
")"
] | Generator returning all keys and values stored in a trie. | [
"Generator",
"returning",
"all",
"keys",
"and",
"values",
"stored",
"in",
"a",
"trie",
"."
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L113-L129 | train |
WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.add_word | def add_word(self, word, value):
"""
Adds word and associated value.
If word already exists, its value is replaced.
"""
if not word:
return
node = self.root
for c in word:
try:
node = node.children[c]
except KeyError:
n = TrieNode(c)
node.children[c] = n
node = n
node.output = value | python | def add_word(self, word, value):
"""
Adds word and associated value.
If word already exists, its value is replaced.
"""
if not word:
return
node = self.root
for c in word:
try:
node = node.children[c]
except KeyError:
n = TrieNode(c)
node.children[c] = n
node = n
node.output = value | [
"def",
"add_word",
"(",
"self",
",",
"word",
",",
"value",
")",
":",
"if",
"not",
"word",
":",
"return",
"node",
"=",
"self",
".",
"root",
"for",
"c",
"in",
"word",
":",
"try",
":",
"node",
"=",
"node",
".",
"children",
"[",
"c",
"]",
"except",
"KeyError",
":",
"n",
"=",
"TrieNode",
"(",
"c",
")",
"node",
".",
"children",
"[",
"c",
"]",
"=",
"n",
"node",
"=",
"n",
"node",
".",
"output",
"=",
"value"
] | Adds word and associated value.
If word already exists, its value is replaced. | [
"Adds",
"word",
"and",
"associated",
"value",
"."
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L151-L169 | train |
WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.exists | def exists(self, word):
"""
Checks if whole word is present in the trie.
"""
node = self.__get_node(word)
if node:
return bool(node.output != nil)
else:
return False | python | def exists(self, word):
"""
Checks if whole word is present in the trie.
"""
node = self.__get_node(word)
if node:
return bool(node.output != nil)
else:
return False | [
"def",
"exists",
"(",
"self",
",",
"word",
")",
":",
"node",
"=",
"self",
".",
"__get_node",
"(",
"word",
")",
"if",
"node",
":",
"return",
"bool",
"(",
"node",
".",
"output",
"!=",
"nil",
")",
"else",
":",
"return",
"False"
] | Checks if whole word is present in the trie. | [
"Checks",
"if",
"whole",
"word",
"is",
"present",
"in",
"the",
"trie",
"."
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L180-L189 | train |
WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.make_automaton | def make_automaton(self):
"""
Converts trie to Aho-Corasick automaton.
"""
queue = deque()
# 1.
for i in range(256):
c = chr(i)
if c in self.root.children:
node = self.root.children[c]
node.fail = self.root # f(s) = 0
queue.append(node)
else:
self.root.children[c] = self.root
# 2.
while queue:
r = queue.popleft()
for node in r.children.values():
queue.append(node)
state = r.fail
while node.char not in state.children:
state = state.fail
node.fail = state.children.get(node.char, self.root) | python | def make_automaton(self):
"""
Converts trie to Aho-Corasick automaton.
"""
queue = deque()
# 1.
for i in range(256):
c = chr(i)
if c in self.root.children:
node = self.root.children[c]
node.fail = self.root # f(s) = 0
queue.append(node)
else:
self.root.children[c] = self.root
# 2.
while queue:
r = queue.popleft()
for node in r.children.values():
queue.append(node)
state = r.fail
while node.char not in state.children:
state = state.fail
node.fail = state.children.get(node.char, self.root) | [
"def",
"make_automaton",
"(",
"self",
")",
":",
"queue",
"=",
"deque",
"(",
")",
"# 1.",
"for",
"i",
"in",
"range",
"(",
"256",
")",
":",
"c",
"=",
"chr",
"(",
"i",
")",
"if",
"c",
"in",
"self",
".",
"root",
".",
"children",
":",
"node",
"=",
"self",
".",
"root",
".",
"children",
"[",
"c",
"]",
"node",
".",
"fail",
"=",
"self",
".",
"root",
"# f(s) = 0",
"queue",
".",
"append",
"(",
"node",
")",
"else",
":",
"self",
".",
"root",
".",
"children",
"[",
"c",
"]",
"=",
"self",
".",
"root",
"# 2.",
"while",
"queue",
":",
"r",
"=",
"queue",
".",
"popleft",
"(",
")",
"for",
"node",
"in",
"r",
".",
"children",
".",
"values",
"(",
")",
":",
"queue",
".",
"append",
"(",
"node",
")",
"state",
"=",
"r",
".",
"fail",
"while",
"node",
".",
"char",
"not",
"in",
"state",
".",
"children",
":",
"state",
"=",
"state",
".",
"fail",
"node",
".",
"fail",
"=",
"state",
".",
"children",
".",
"get",
"(",
"node",
".",
"char",
",",
"self",
".",
"root",
")"
] | Converts trie to Aho-Corasick automaton. | [
"Converts",
"trie",
"to",
"Aho",
"-",
"Corasick",
"automaton",
"."
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L200-L226 | train |
WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.iter_long | def iter_long(self, string):
"""
Generator performs a modified Aho-Corasick search string algorithm,
which maches only the longest word.
"""
state = self.root
last = None
index = 0
while index < len(string):
c = string[index]
if c in state.children:
state = state.children[c]
if state.output is not nil:
# save the last node on the path
last = (state.output, index)
index += 1
else:
if last:
# return the saved match
yield last
# and start over, as we don't want overlapped results
# Note: this leads to quadratic complexity in the worst case
index = last[1] + 1
state = self.root
last = None
else:
# if no output, perform classic Aho-Corasick algorithm
while c not in state.children:
state = state.fail
# corner case
if last:
yield last | python | def iter_long(self, string):
"""
Generator performs a modified Aho-Corasick search string algorithm,
which maches only the longest word.
"""
state = self.root
last = None
index = 0
while index < len(string):
c = string[index]
if c in state.children:
state = state.children[c]
if state.output is not nil:
# save the last node on the path
last = (state.output, index)
index += 1
else:
if last:
# return the saved match
yield last
# and start over, as we don't want overlapped results
# Note: this leads to quadratic complexity in the worst case
index = last[1] + 1
state = self.root
last = None
else:
# if no output, perform classic Aho-Corasick algorithm
while c not in state.children:
state = state.fail
# corner case
if last:
yield last | [
"def",
"iter_long",
"(",
"self",
",",
"string",
")",
":",
"state",
"=",
"self",
".",
"root",
"last",
"=",
"None",
"index",
"=",
"0",
"while",
"index",
"<",
"len",
"(",
"string",
")",
":",
"c",
"=",
"string",
"[",
"index",
"]",
"if",
"c",
"in",
"state",
".",
"children",
":",
"state",
"=",
"state",
".",
"children",
"[",
"c",
"]",
"if",
"state",
".",
"output",
"is",
"not",
"nil",
":",
"# save the last node on the path",
"last",
"=",
"(",
"state",
".",
"output",
",",
"index",
")",
"index",
"+=",
"1",
"else",
":",
"if",
"last",
":",
"# return the saved match",
"yield",
"last",
"# and start over, as we don't want overlapped results",
"# Note: this leads to quadratic complexity in the worst case",
"index",
"=",
"last",
"[",
"1",
"]",
"+",
"1",
"state",
"=",
"self",
".",
"root",
"last",
"=",
"None",
"else",
":",
"# if no output, perform classic Aho-Corasick algorithm",
"while",
"c",
"not",
"in",
"state",
".",
"children",
":",
"state",
"=",
"state",
".",
"fail",
"# corner case",
"if",
"last",
":",
"yield",
"last"
] | Generator performs a modified Aho-Corasick search string algorithm,
which maches only the longest word. | [
"Generator",
"performs",
"a",
"modified",
"Aho",
"-",
"Corasick",
"search",
"string",
"algorithm",
"which",
"maches",
"only",
"the",
"longest",
"word",
"."
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L254-L292 | train |
WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.find_all | def find_all(self, string, callback):
"""
Wrapper on iter method, callback gets an iterator result
"""
for index, output in self.iter(string):
callback(index, output) | python | def find_all(self, string, callback):
"""
Wrapper on iter method, callback gets an iterator result
"""
for index, output in self.iter(string):
callback(index, output) | [
"def",
"find_all",
"(",
"self",
",",
"string",
",",
"callback",
")",
":",
"for",
"index",
",",
"output",
"in",
"self",
".",
"iter",
"(",
"string",
")",
":",
"callback",
"(",
"index",
",",
"output",
")"
] | Wrapper on iter method, callback gets an iterator result | [
"Wrapper",
"on",
"iter",
"method",
"callback",
"gets",
"an",
"iterator",
"result"
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L294-L299 | train |
WojciechMula/pyahocorasick | setup.py | get_long_description | def get_long_description():
"""
Strip the content index from the long description.
"""
import codecs
with codecs.open('README.rst', encoding='UTF-8') as f:
readme = [line for line in f if not line.startswith('.. contents::')]
return ''.join(readme) | python | def get_long_description():
"""
Strip the content index from the long description.
"""
import codecs
with codecs.open('README.rst', encoding='UTF-8') as f:
readme = [line for line in f if not line.startswith('.. contents::')]
return ''.join(readme) | [
"def",
"get_long_description",
"(",
")",
":",
"import",
"codecs",
"with",
"codecs",
".",
"open",
"(",
"'README.rst'",
",",
"encoding",
"=",
"'UTF-8'",
")",
"as",
"f",
":",
"readme",
"=",
"[",
"line",
"for",
"line",
"in",
"f",
"if",
"not",
"line",
".",
"startswith",
"(",
"'.. contents::'",
")",
"]",
"return",
"''",
".",
"join",
"(",
"readme",
")"
] | Strip the content index from the long description. | [
"Strip",
"the",
"content",
"index",
"from",
"the",
"long",
"description",
"."
] | 53842f783fbe3fa77d53cde1ac251b23c3cbed02 | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/setup.py#L19-L26 | train |
jreese/markdown-pp | MarkdownPP/Modules/YoutubeEmbed.py | YoutubeEmbed._add_play_button | def _add_play_button(self, image_url, image_path):
"""Try to add a play button to the screenshot."""
try:
from PIL import Image
from tempfile import NamedTemporaryFile
import urllib
try:
urlretrieve = urllib.request.urlretrieve
except ImportError:
urlretrieve = urllib.urlretrieve
# create temporary files for image operations
with NamedTemporaryFile(suffix=".jpg") as screenshot_img:
with NamedTemporaryFile(suffix=".jpg") as button_img:
# grab screenshot and button image
urlretrieve(image_url, screenshot_img.name)
urlretrieve(play_button_url, button_img.name)
# layer the images using PIL and save
with Image.open(screenshot_img.name) as background:
with Image.open(button_img.name) as foreground:
background.paste(foreground, (90, 65), foreground)
background.save(image_path)
except ImportError as e:
print(e)
except Exception as e:
print('Unable to add play button to YouTube '
'screenshot (%s). Using the screenshot '
'on its own instead.' % e) | python | def _add_play_button(self, image_url, image_path):
"""Try to add a play button to the screenshot."""
try:
from PIL import Image
from tempfile import NamedTemporaryFile
import urllib
try:
urlretrieve = urllib.request.urlretrieve
except ImportError:
urlretrieve = urllib.urlretrieve
# create temporary files for image operations
with NamedTemporaryFile(suffix=".jpg") as screenshot_img:
with NamedTemporaryFile(suffix=".jpg") as button_img:
# grab screenshot and button image
urlretrieve(image_url, screenshot_img.name)
urlretrieve(play_button_url, button_img.name)
# layer the images using PIL and save
with Image.open(screenshot_img.name) as background:
with Image.open(button_img.name) as foreground:
background.paste(foreground, (90, 65), foreground)
background.save(image_path)
except ImportError as e:
print(e)
except Exception as e:
print('Unable to add play button to YouTube '
'screenshot (%s). Using the screenshot '
'on its own instead.' % e) | [
"def",
"_add_play_button",
"(",
"self",
",",
"image_url",
",",
"image_path",
")",
":",
"try",
":",
"from",
"PIL",
"import",
"Image",
"from",
"tempfile",
"import",
"NamedTemporaryFile",
"import",
"urllib",
"try",
":",
"urlretrieve",
"=",
"urllib",
".",
"request",
".",
"urlretrieve",
"except",
"ImportError",
":",
"urlretrieve",
"=",
"urllib",
".",
"urlretrieve",
"# create temporary files for image operations",
"with",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"\".jpg\"",
")",
"as",
"screenshot_img",
":",
"with",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"\".jpg\"",
")",
"as",
"button_img",
":",
"# grab screenshot and button image",
"urlretrieve",
"(",
"image_url",
",",
"screenshot_img",
".",
"name",
")",
"urlretrieve",
"(",
"play_button_url",
",",
"button_img",
".",
"name",
")",
"# layer the images using PIL and save",
"with",
"Image",
".",
"open",
"(",
"screenshot_img",
".",
"name",
")",
"as",
"background",
":",
"with",
"Image",
".",
"open",
"(",
"button_img",
".",
"name",
")",
"as",
"foreground",
":",
"background",
".",
"paste",
"(",
"foreground",
",",
"(",
"90",
",",
"65",
")",
",",
"foreground",
")",
"background",
".",
"save",
"(",
"image_path",
")",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"e",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Unable to add play button to YouTube '",
"'screenshot (%s). Using the screenshot '",
"'on its own instead.'",
"%",
"e",
")"
] | Try to add a play button to the screenshot. | [
"Try",
"to",
"add",
"a",
"play",
"button",
"to",
"the",
"screenshot",
"."
] | fba644c08176abef4ea5ad36b5b60d32379bddac | https://github.com/jreese/markdown-pp/blob/fba644c08176abef4ea5ad36b5b60d32379bddac/MarkdownPP/Modules/YoutubeEmbed.py#L71-L101 | train |
jreese/markdown-pp | MarkdownPP/Processor.py | Processor.process | def process(self):
"""
This method handles the actual processing of Modules and Transforms
"""
self.modules.sort(key=lambda x: x.priority)
for module in self.modules:
transforms = module.transform(self.data)
transforms.sort(key=lambda x: x.linenum, reverse=True)
for transform in transforms:
linenum = transform.linenum
if isinstance(transform.data, basestring):
transform.data = [transform.data]
if transform.oper == "prepend":
self.data[linenum:linenum] = transform.data
elif transform.oper == "append":
self.data[linenum+1:linenum+1] = transform.data
elif transform.oper == "swap":
self.data[linenum:linenum+1] = transform.data
elif transform.oper == "drop":
self.data[linenum:linenum+1] = []
elif transform.oper == "noop":
pass | python | def process(self):
"""
This method handles the actual processing of Modules and Transforms
"""
self.modules.sort(key=lambda x: x.priority)
for module in self.modules:
transforms = module.transform(self.data)
transforms.sort(key=lambda x: x.linenum, reverse=True)
for transform in transforms:
linenum = transform.linenum
if isinstance(transform.data, basestring):
transform.data = [transform.data]
if transform.oper == "prepend":
self.data[linenum:linenum] = transform.data
elif transform.oper == "append":
self.data[linenum+1:linenum+1] = transform.data
elif transform.oper == "swap":
self.data[linenum:linenum+1] = transform.data
elif transform.oper == "drop":
self.data[linenum:linenum+1] = []
elif transform.oper == "noop":
pass | [
"def",
"process",
"(",
"self",
")",
":",
"self",
".",
"modules",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"priority",
")",
"for",
"module",
"in",
"self",
".",
"modules",
":",
"transforms",
"=",
"module",
".",
"transform",
"(",
"self",
".",
"data",
")",
"transforms",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"linenum",
",",
"reverse",
"=",
"True",
")",
"for",
"transform",
"in",
"transforms",
":",
"linenum",
"=",
"transform",
".",
"linenum",
"if",
"isinstance",
"(",
"transform",
".",
"data",
",",
"basestring",
")",
":",
"transform",
".",
"data",
"=",
"[",
"transform",
".",
"data",
"]",
"if",
"transform",
".",
"oper",
"==",
"\"prepend\"",
":",
"self",
".",
"data",
"[",
"linenum",
":",
"linenum",
"]",
"=",
"transform",
".",
"data",
"elif",
"transform",
".",
"oper",
"==",
"\"append\"",
":",
"self",
".",
"data",
"[",
"linenum",
"+",
"1",
":",
"linenum",
"+",
"1",
"]",
"=",
"transform",
".",
"data",
"elif",
"transform",
".",
"oper",
"==",
"\"swap\"",
":",
"self",
".",
"data",
"[",
"linenum",
":",
"linenum",
"+",
"1",
"]",
"=",
"transform",
".",
"data",
"elif",
"transform",
".",
"oper",
"==",
"\"drop\"",
":",
"self",
".",
"data",
"[",
"linenum",
":",
"linenum",
"+",
"1",
"]",
"=",
"[",
"]",
"elif",
"transform",
".",
"oper",
"==",
"\"noop\"",
":",
"pass"
] | This method handles the actual processing of Modules and Transforms | [
"This",
"method",
"handles",
"the",
"actual",
"processing",
"of",
"Modules",
"and",
"Transforms"
] | fba644c08176abef4ea5ad36b5b60d32379bddac | https://github.com/jreese/markdown-pp/blob/fba644c08176abef4ea5ad36b5b60d32379bddac/MarkdownPP/Processor.py#L42-L71 | train |
jpvanhal/inflection | inflection.py | _irregular | def _irregular(singular, plural):
"""
A convenience function to add appropriate rules to plurals and singular
for irregular words.
:param singular: irregular word in singular form
:param plural: irregular word in plural form
"""
def caseinsensitive(string):
return ''.join('[' + char + char.upper() + ']' for char in string)
if singular[0].upper() == plural[0].upper():
PLURALS.insert(0, (
r"(?i)({}){}$".format(singular[0], singular[1:]),
r'\1' + plural[1:]
))
PLURALS.insert(0, (
r"(?i)({}){}$".format(plural[0], plural[1:]),
r'\1' + plural[1:]
))
SINGULARS.insert(0, (
r"(?i)({}){}$".format(plural[0], plural[1:]),
r'\1' + singular[1:]
))
else:
PLURALS.insert(0, (
r"{}{}$".format(singular[0].upper(),
caseinsensitive(singular[1:])),
plural[0].upper() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(singular[0].lower(),
caseinsensitive(singular[1:])),
plural[0].lower() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(plural[0].upper(), caseinsensitive(plural[1:])),
plural[0].upper() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(plural[0].lower(), caseinsensitive(plural[1:])),
plural[0].lower() + plural[1:]
))
SINGULARS.insert(0, (
r"{}{}$".format(plural[0].upper(), caseinsensitive(plural[1:])),
singular[0].upper() + singular[1:]
))
SINGULARS.insert(0, (
r"{}{}$".format(plural[0].lower(), caseinsensitive(plural[1:])),
singular[0].lower() + singular[1:]
)) | python | def _irregular(singular, plural):
"""
A convenience function to add appropriate rules to plurals and singular
for irregular words.
:param singular: irregular word in singular form
:param plural: irregular word in plural form
"""
def caseinsensitive(string):
return ''.join('[' + char + char.upper() + ']' for char in string)
if singular[0].upper() == plural[0].upper():
PLURALS.insert(0, (
r"(?i)({}){}$".format(singular[0], singular[1:]),
r'\1' + plural[1:]
))
PLURALS.insert(0, (
r"(?i)({}){}$".format(plural[0], plural[1:]),
r'\1' + plural[1:]
))
SINGULARS.insert(0, (
r"(?i)({}){}$".format(plural[0], plural[1:]),
r'\1' + singular[1:]
))
else:
PLURALS.insert(0, (
r"{}{}$".format(singular[0].upper(),
caseinsensitive(singular[1:])),
plural[0].upper() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(singular[0].lower(),
caseinsensitive(singular[1:])),
plural[0].lower() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(plural[0].upper(), caseinsensitive(plural[1:])),
plural[0].upper() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(plural[0].lower(), caseinsensitive(plural[1:])),
plural[0].lower() + plural[1:]
))
SINGULARS.insert(0, (
r"{}{}$".format(plural[0].upper(), caseinsensitive(plural[1:])),
singular[0].upper() + singular[1:]
))
SINGULARS.insert(0, (
r"{}{}$".format(plural[0].lower(), caseinsensitive(plural[1:])),
singular[0].lower() + singular[1:]
)) | [
"def",
"_irregular",
"(",
"singular",
",",
"plural",
")",
":",
"def",
"caseinsensitive",
"(",
"string",
")",
":",
"return",
"''",
".",
"join",
"(",
"'['",
"+",
"char",
"+",
"char",
".",
"upper",
"(",
")",
"+",
"']'",
"for",
"char",
"in",
"string",
")",
"if",
"singular",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"==",
"plural",
"[",
"0",
"]",
".",
"upper",
"(",
")",
":",
"PLURALS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"(?i)({}){}$\"",
".",
"format",
"(",
"singular",
"[",
"0",
"]",
",",
"singular",
"[",
"1",
":",
"]",
")",
",",
"r'\\1'",
"+",
"plural",
"[",
"1",
":",
"]",
")",
")",
"PLURALS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"(?i)({}){}$\"",
".",
"format",
"(",
"plural",
"[",
"0",
"]",
",",
"plural",
"[",
"1",
":",
"]",
")",
",",
"r'\\1'",
"+",
"plural",
"[",
"1",
":",
"]",
")",
")",
"SINGULARS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"(?i)({}){}$\"",
".",
"format",
"(",
"plural",
"[",
"0",
"]",
",",
"plural",
"[",
"1",
":",
"]",
")",
",",
"r'\\1'",
"+",
"singular",
"[",
"1",
":",
"]",
")",
")",
"else",
":",
"PLURALS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"{}{}$\"",
".",
"format",
"(",
"singular",
"[",
"0",
"]",
".",
"upper",
"(",
")",
",",
"caseinsensitive",
"(",
"singular",
"[",
"1",
":",
"]",
")",
")",
",",
"plural",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"plural",
"[",
"1",
":",
"]",
")",
")",
"PLURALS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"{}{}$\"",
".",
"format",
"(",
"singular",
"[",
"0",
"]",
".",
"lower",
"(",
")",
",",
"caseinsensitive",
"(",
"singular",
"[",
"1",
":",
"]",
")",
")",
",",
"plural",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"+",
"plural",
"[",
"1",
":",
"]",
")",
")",
"PLURALS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"{}{}$\"",
".",
"format",
"(",
"plural",
"[",
"0",
"]",
".",
"upper",
"(",
")",
",",
"caseinsensitive",
"(",
"plural",
"[",
"1",
":",
"]",
")",
")",
",",
"plural",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"plural",
"[",
"1",
":",
"]",
")",
")",
"PLURALS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"{}{}$\"",
".",
"format",
"(",
"plural",
"[",
"0",
"]",
".",
"lower",
"(",
")",
",",
"caseinsensitive",
"(",
"plural",
"[",
"1",
":",
"]",
")",
")",
",",
"plural",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"+",
"plural",
"[",
"1",
":",
"]",
")",
")",
"SINGULARS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"{}{}$\"",
".",
"format",
"(",
"plural",
"[",
"0",
"]",
".",
"upper",
"(",
")",
",",
"caseinsensitive",
"(",
"plural",
"[",
"1",
":",
"]",
")",
")",
",",
"singular",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"singular",
"[",
"1",
":",
"]",
")",
")",
"SINGULARS",
".",
"insert",
"(",
"0",
",",
"(",
"r\"{}{}$\"",
".",
"format",
"(",
"plural",
"[",
"0",
"]",
".",
"lower",
"(",
")",
",",
"caseinsensitive",
"(",
"plural",
"[",
"1",
":",
"]",
")",
")",
",",
"singular",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"+",
"singular",
"[",
"1",
":",
"]",
")",
")"
] | A convenience function to add appropriate rules to plurals and singular
for irregular words.
:param singular: irregular word in singular form
:param plural: irregular word in plural form | [
"A",
"convenience",
"function",
"to",
"add",
"appropriate",
"rules",
"to",
"plurals",
"and",
"singular",
"for",
"irregular",
"words",
"."
] | ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c | https://github.com/jpvanhal/inflection/blob/ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c/inflection.py#L89-L139 | train |
jpvanhal/inflection | inflection.py | camelize | def camelize(string, uppercase_first_letter=True):
"""
Convert strings to CamelCase.
Examples::
>>> camelize("device_type")
"DeviceType"
>>> camelize("device_type", False)
"deviceType"
:func:`camelize` can be thought of as a inverse of :func:`underscore`,
although there are some cases where that does not hold::
>>> camelize(underscore("IOError"))
"IoError"
:param uppercase_first_letter: if set to `True` :func:`camelize` converts
strings to UpperCamelCase. If set to `False` :func:`camelize` produces
lowerCamelCase. Defaults to `True`.
"""
if uppercase_first_letter:
return re.sub(r"(?:^|_)(.)", lambda m: m.group(1).upper(), string)
else:
return string[0].lower() + camelize(string)[1:] | python | def camelize(string, uppercase_first_letter=True):
"""
Convert strings to CamelCase.
Examples::
>>> camelize("device_type")
"DeviceType"
>>> camelize("device_type", False)
"deviceType"
:func:`camelize` can be thought of as a inverse of :func:`underscore`,
although there are some cases where that does not hold::
>>> camelize(underscore("IOError"))
"IoError"
:param uppercase_first_letter: if set to `True` :func:`camelize` converts
strings to UpperCamelCase. If set to `False` :func:`camelize` produces
lowerCamelCase. Defaults to `True`.
"""
if uppercase_first_letter:
return re.sub(r"(?:^|_)(.)", lambda m: m.group(1).upper(), string)
else:
return string[0].lower() + camelize(string)[1:] | [
"def",
"camelize",
"(",
"string",
",",
"uppercase_first_letter",
"=",
"True",
")",
":",
"if",
"uppercase_first_letter",
":",
"return",
"re",
".",
"sub",
"(",
"r\"(?:^|_)(.)\"",
",",
"lambda",
"m",
":",
"m",
".",
"group",
"(",
"1",
")",
".",
"upper",
"(",
")",
",",
"string",
")",
"else",
":",
"return",
"string",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"+",
"camelize",
"(",
"string",
")",
"[",
"1",
":",
"]"
] | Convert strings to CamelCase.
Examples::
>>> camelize("device_type")
"DeviceType"
>>> camelize("device_type", False)
"deviceType"
:func:`camelize` can be thought of as a inverse of :func:`underscore`,
although there are some cases where that does not hold::
>>> camelize(underscore("IOError"))
"IoError"
:param uppercase_first_letter: if set to `True` :func:`camelize` converts
strings to UpperCamelCase. If set to `False` :func:`camelize` produces
lowerCamelCase. Defaults to `True`. | [
"Convert",
"strings",
"to",
"CamelCase",
"."
] | ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c | https://github.com/jpvanhal/inflection/blob/ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c/inflection.py#L142-L166 | train |
jpvanhal/inflection | inflection.py | parameterize | def parameterize(string, separator='-'):
"""
Replace special characters in a string so that it may be used as part of a
'pretty' URL.
Example::
>>> parameterize(u"Donald E. Knuth")
'donald-e-knuth'
"""
string = transliterate(string)
# Turn unwanted chars into the separator
string = re.sub(r"(?i)[^a-z0-9\-_]+", separator, string)
if separator:
re_sep = re.escape(separator)
# No more than one of the separator in a row.
string = re.sub(r'%s{2,}' % re_sep, separator, string)
# Remove leading/trailing separator.
string = re.sub(r"(?i)^{sep}|{sep}$".format(sep=re_sep), '', string)
return string.lower() | python | def parameterize(string, separator='-'):
"""
Replace special characters in a string so that it may be used as part of a
'pretty' URL.
Example::
>>> parameterize(u"Donald E. Knuth")
'donald-e-knuth'
"""
string = transliterate(string)
# Turn unwanted chars into the separator
string = re.sub(r"(?i)[^a-z0-9\-_]+", separator, string)
if separator:
re_sep = re.escape(separator)
# No more than one of the separator in a row.
string = re.sub(r'%s{2,}' % re_sep, separator, string)
# Remove leading/trailing separator.
string = re.sub(r"(?i)^{sep}|{sep}$".format(sep=re_sep), '', string)
return string.lower() | [
"def",
"parameterize",
"(",
"string",
",",
"separator",
"=",
"'-'",
")",
":",
"string",
"=",
"transliterate",
"(",
"string",
")",
"# Turn unwanted chars into the separator",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"(?i)[^a-z0-9\\-_]+\"",
",",
"separator",
",",
"string",
")",
"if",
"separator",
":",
"re_sep",
"=",
"re",
".",
"escape",
"(",
"separator",
")",
"# No more than one of the separator in a row.",
"string",
"=",
"re",
".",
"sub",
"(",
"r'%s{2,}'",
"%",
"re_sep",
",",
"separator",
",",
"string",
")",
"# Remove leading/trailing separator.",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"(?i)^{sep}|{sep}$\"",
".",
"format",
"(",
"sep",
"=",
"re_sep",
")",
",",
"''",
",",
"string",
")",
"return",
"string",
".",
"lower",
"(",
")"
] | Replace special characters in a string so that it may be used as part of a
'pretty' URL.
Example::
>>> parameterize(u"Donald E. Knuth")
'donald-e-knuth' | [
"Replace",
"special",
"characters",
"in",
"a",
"string",
"so",
"that",
"it",
"may",
"be",
"used",
"as",
"part",
"of",
"a",
"pretty",
"URL",
"."
] | ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c | https://github.com/jpvanhal/inflection/blob/ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c/inflection.py#L258-L279 | train |
jpvanhal/inflection | inflection.py | pluralize | def pluralize(word):
"""
Return the plural form of a word.
Examples::
>>> pluralize("post")
"posts"
>>> pluralize("octopus")
"octopi"
>>> pluralize("sheep")
"sheep"
>>> pluralize("CamelOctopus")
"CamelOctopi"
"""
if not word or word.lower() in UNCOUNTABLES:
return word
else:
for rule, replacement in PLURALS:
if re.search(rule, word):
return re.sub(rule, replacement, word)
return word | python | def pluralize(word):
"""
Return the plural form of a word.
Examples::
>>> pluralize("post")
"posts"
>>> pluralize("octopus")
"octopi"
>>> pluralize("sheep")
"sheep"
>>> pluralize("CamelOctopus")
"CamelOctopi"
"""
if not word or word.lower() in UNCOUNTABLES:
return word
else:
for rule, replacement in PLURALS:
if re.search(rule, word):
return re.sub(rule, replacement, word)
return word | [
"def",
"pluralize",
"(",
"word",
")",
":",
"if",
"not",
"word",
"or",
"word",
".",
"lower",
"(",
")",
"in",
"UNCOUNTABLES",
":",
"return",
"word",
"else",
":",
"for",
"rule",
",",
"replacement",
"in",
"PLURALS",
":",
"if",
"re",
".",
"search",
"(",
"rule",
",",
"word",
")",
":",
"return",
"re",
".",
"sub",
"(",
"rule",
",",
"replacement",
",",
"word",
")",
"return",
"word"
] | Return the plural form of a word.
Examples::
>>> pluralize("post")
"posts"
>>> pluralize("octopus")
"octopi"
>>> pluralize("sheep")
"sheep"
>>> pluralize("CamelOctopus")
"CamelOctopi" | [
"Return",
"the",
"plural",
"form",
"of",
"a",
"word",
"."
] | ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c | https://github.com/jpvanhal/inflection/blob/ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c/inflection.py#L282-L304 | train |
jpvanhal/inflection | inflection.py | underscore | def underscore(word):
"""
Make an underscored, lowercase form from the expression in the string.
Example::
>>> underscore("DeviceType")
"device_type"
As a rule of thumb you can think of :func:`underscore` as the inverse of
:func:`camelize`, though there are cases where that does not hold::
>>> camelize(underscore("IOError"))
"IoError"
"""
word = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', word)
word = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', word)
word = word.replace("-", "_")
return word.lower() | python | def underscore(word):
"""
Make an underscored, lowercase form from the expression in the string.
Example::
>>> underscore("DeviceType")
"device_type"
As a rule of thumb you can think of :func:`underscore` as the inverse of
:func:`camelize`, though there are cases where that does not hold::
>>> camelize(underscore("IOError"))
"IoError"
"""
word = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', word)
word = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', word)
word = word.replace("-", "_")
return word.lower() | [
"def",
"underscore",
"(",
"word",
")",
":",
"word",
"=",
"re",
".",
"sub",
"(",
"r\"([A-Z]+)([A-Z][a-z])\"",
",",
"r'\\1_\\2'",
",",
"word",
")",
"word",
"=",
"re",
".",
"sub",
"(",
"r\"([a-z\\d])([A-Z])\"",
",",
"r'\\1_\\2'",
",",
"word",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
"return",
"word",
".",
"lower",
"(",
")"
] | Make an underscored, lowercase form from the expression in the string.
Example::
>>> underscore("DeviceType")
"device_type"
As a rule of thumb you can think of :func:`underscore` as the inverse of
:func:`camelize`, though there are cases where that does not hold::
>>> camelize(underscore("IOError"))
"IoError" | [
"Make",
"an",
"underscored",
"lowercase",
"form",
"from",
"the",
"expression",
"in",
"the",
"string",
"."
] | ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c | https://github.com/jpvanhal/inflection/blob/ad195ab72b193b57bb4cf68396c4cd8a62f1fe6c/inflection.py#L395-L414 | train |
libvips/pyvips | pyvips/vobject.py | VipsObject.print_all | def print_all(msg):
"""Print all objects.
Print a table of all active libvips objects. Handy for debugging.
"""
gc.collect()
logger.debug(msg)
vips_lib.vips_object_print_all()
logger.debug() | python | def print_all(msg):
"""Print all objects.
Print a table of all active libvips objects. Handy for debugging.
"""
gc.collect()
logger.debug(msg)
vips_lib.vips_object_print_all()
logger.debug() | [
"def",
"print_all",
"(",
"msg",
")",
":",
"gc",
".",
"collect",
"(",
")",
"logger",
".",
"debug",
"(",
"msg",
")",
"vips_lib",
".",
"vips_object_print_all",
"(",
")",
"logger",
".",
"debug",
"(",
")"
] | Print all objects.
Print a table of all active libvips objects. Handy for debugging. | [
"Print",
"all",
"objects",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vobject.py#L22-L32 | train |
libvips/pyvips | pyvips/vobject.py | VipsObject.get_typeof | def get_typeof(self, name):
"""Get the GType of a GObject property.
This function returns 0 if the property does not exist.
"""
# logger.debug('VipsObject.get_typeof: self = %s, name = %s',
# str(self), name)
pspec = self._get_pspec(name)
if pspec is None:
# need to clear any error, this is horrible
Error('')
return 0
return pspec.value_type | python | def get_typeof(self, name):
"""Get the GType of a GObject property.
This function returns 0 if the property does not exist.
"""
# logger.debug('VipsObject.get_typeof: self = %s, name = %s',
# str(self), name)
pspec = self._get_pspec(name)
if pspec is None:
# need to clear any error, this is horrible
Error('')
return 0
return pspec.value_type | [
"def",
"get_typeof",
"(",
"self",
",",
"name",
")",
":",
"# logger.debug('VipsObject.get_typeof: self = %s, name = %s',",
"# str(self), name)",
"pspec",
"=",
"self",
".",
"_get_pspec",
"(",
"name",
")",
"if",
"pspec",
"is",
"None",
":",
"# need to clear any error, this is horrible",
"Error",
"(",
"''",
")",
"return",
"0",
"return",
"pspec",
".",
"value_type"
] | Get the GType of a GObject property.
This function returns 0 if the property does not exist. | [
"Get",
"the",
"GType",
"of",
"a",
"GObject",
"property",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vobject.py#L52-L68 | train |
libvips/pyvips | pyvips/vobject.py | VipsObject.get_blurb | def get_blurb(self, name):
"""Get the blurb for a GObject property."""
c_str = gobject_lib.g_param_spec_get_blurb(self._get_pspec(name))
return _to_string(c_str) | python | def get_blurb(self, name):
"""Get the blurb for a GObject property."""
c_str = gobject_lib.g_param_spec_get_blurb(self._get_pspec(name))
return _to_string(c_str) | [
"def",
"get_blurb",
"(",
"self",
",",
"name",
")",
":",
"c_str",
"=",
"gobject_lib",
".",
"g_param_spec_get_blurb",
"(",
"self",
".",
"_get_pspec",
"(",
"name",
")",
")",
"return",
"_to_string",
"(",
"c_str",
")"
] | Get the blurb for a GObject property. | [
"Get",
"the",
"blurb",
"for",
"a",
"GObject",
"property",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vobject.py#L70-L74 | train |
libvips/pyvips | pyvips/vobject.py | VipsObject.get | def get(self, name):
"""Get a GObject property.
The value of the property is converted to a Python value.
"""
logger.debug('VipsObject.get: name = %s', name)
pspec = self._get_pspec(name)
if pspec is None:
raise Error('Property not found.')
gtype = pspec.value_type
gv = pyvips.GValue()
gv.set_type(gtype)
go = ffi.cast('GObject *', self.pointer)
gobject_lib.g_object_get_property(go, _to_bytes(name), gv.pointer)
return gv.get() | python | def get(self, name):
"""Get a GObject property.
The value of the property is converted to a Python value.
"""
logger.debug('VipsObject.get: name = %s', name)
pspec = self._get_pspec(name)
if pspec is None:
raise Error('Property not found.')
gtype = pspec.value_type
gv = pyvips.GValue()
gv.set_type(gtype)
go = ffi.cast('GObject *', self.pointer)
gobject_lib.g_object_get_property(go, _to_bytes(name), gv.pointer)
return gv.get() | [
"def",
"get",
"(",
"self",
",",
"name",
")",
":",
"logger",
".",
"debug",
"(",
"'VipsObject.get: name = %s'",
",",
"name",
")",
"pspec",
"=",
"self",
".",
"_get_pspec",
"(",
"name",
")",
"if",
"pspec",
"is",
"None",
":",
"raise",
"Error",
"(",
"'Property not found.'",
")",
"gtype",
"=",
"pspec",
".",
"value_type",
"gv",
"=",
"pyvips",
".",
"GValue",
"(",
")",
"gv",
".",
"set_type",
"(",
"gtype",
")",
"go",
"=",
"ffi",
".",
"cast",
"(",
"'GObject *'",
",",
"self",
".",
"pointer",
")",
"gobject_lib",
".",
"g_object_get_property",
"(",
"go",
",",
"_to_bytes",
"(",
"name",
")",
",",
"gv",
".",
"pointer",
")",
"return",
"gv",
".",
"get",
"(",
")"
] | Get a GObject property.
The value of the property is converted to a Python value. | [
"Get",
"a",
"GObject",
"property",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vobject.py#L76-L95 | train |
libvips/pyvips | pyvips/vobject.py | VipsObject.set | def set(self, name, value):
"""Set a GObject property.
The value is converted to the property type, if possible.
"""
logger.debug('VipsObject.set: name = %s, value = %s', name, value)
gtype = self.get_typeof(name)
gv = pyvips.GValue()
gv.set_type(gtype)
gv.set(value)
go = ffi.cast('GObject *', self.pointer)
gobject_lib.g_object_set_property(go, _to_bytes(name), gv.pointer) | python | def set(self, name, value):
"""Set a GObject property.
The value is converted to the property type, if possible.
"""
logger.debug('VipsObject.set: name = %s, value = %s', name, value)
gtype = self.get_typeof(name)
gv = pyvips.GValue()
gv.set_type(gtype)
gv.set(value)
go = ffi.cast('GObject *', self.pointer)
gobject_lib.g_object_set_property(go, _to_bytes(name), gv.pointer) | [
"def",
"set",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"logger",
".",
"debug",
"(",
"'VipsObject.set: name = %s, value = %s'",
",",
"name",
",",
"value",
")",
"gtype",
"=",
"self",
".",
"get_typeof",
"(",
"name",
")",
"gv",
"=",
"pyvips",
".",
"GValue",
"(",
")",
"gv",
".",
"set_type",
"(",
"gtype",
")",
"gv",
".",
"set",
"(",
"value",
")",
"go",
"=",
"ffi",
".",
"cast",
"(",
"'GObject *'",
",",
"self",
".",
"pointer",
")",
"gobject_lib",
".",
"g_object_set_property",
"(",
"go",
",",
"_to_bytes",
"(",
"name",
")",
",",
"gv",
".",
"pointer",
")"
] | Set a GObject property.
The value is converted to the property type, if possible. | [
"Set",
"a",
"GObject",
"property",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vobject.py#L97-L112 | train |
libvips/pyvips | pyvips/vobject.py | VipsObject.set_string | def set_string(self, string_options):
"""Set a series of properties using a string.
For example::
'fred=12, tile'
'[fred=12]'
"""
vo = ffi.cast('VipsObject *', self.pointer)
cstr = _to_bytes(string_options)
result = vips_lib.vips_object_set_from_string(vo, cstr)
return result == 0 | python | def set_string(self, string_options):
"""Set a series of properties using a string.
For example::
'fred=12, tile'
'[fred=12]'
"""
vo = ffi.cast('VipsObject *', self.pointer)
cstr = _to_bytes(string_options)
result = vips_lib.vips_object_set_from_string(vo, cstr)
return result == 0 | [
"def",
"set_string",
"(",
"self",
",",
"string_options",
")",
":",
"vo",
"=",
"ffi",
".",
"cast",
"(",
"'VipsObject *'",
",",
"self",
".",
"pointer",
")",
"cstr",
"=",
"_to_bytes",
"(",
"string_options",
")",
"result",
"=",
"vips_lib",
".",
"vips_object_set_from_string",
"(",
"vo",
",",
"cstr",
")",
"return",
"result",
"==",
"0"
] | Set a series of properties using a string.
For example::
'fred=12, tile'
'[fred=12]' | [
"Set",
"a",
"series",
"of",
"properties",
"using",
"a",
"string",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vobject.py#L114-L128 | train |
libvips/pyvips | pyvips/vobject.py | VipsObject.get_description | def get_description(self):
"""Get the description of a GObject."""
vo = ffi.cast('VipsObject *', self.pointer)
return _to_string(vips_lib.vips_object_get_description(vo)) | python | def get_description(self):
"""Get the description of a GObject."""
vo = ffi.cast('VipsObject *', self.pointer)
return _to_string(vips_lib.vips_object_get_description(vo)) | [
"def",
"get_description",
"(",
"self",
")",
":",
"vo",
"=",
"ffi",
".",
"cast",
"(",
"'VipsObject *'",
",",
"self",
".",
"pointer",
")",
"return",
"_to_string",
"(",
"vips_lib",
".",
"vips_object_get_description",
"(",
"vo",
")",
")"
] | Get the description of a GObject. | [
"Get",
"the",
"description",
"of",
"a",
"GObject",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vobject.py#L130-L134 | train |
libvips/pyvips | pyvips/voperation.py | Operation.generate_sphinx_all | def generate_sphinx_all():
"""Generate sphinx documentation.
This generates a .rst file for all auto-generated image methods. Use it
to regenerate the docs with something like::
$ python -c \
"import pyvips; pyvips.Operation.generate_sphinx_all()" > x
And copy-paste the file contents into doc/vimage.rst in the appropriate
place.
"""
# generate list of all nicknames we can generate docstrings for
all_nicknames = []
def add_nickname(gtype, a, b):
nickname = nickname_find(gtype)
try:
Operation.generate_sphinx(nickname)
all_nicknames.append(nickname)
except Error:
pass
type_map(gtype, add_nickname)
return ffi.NULL
type_map(type_from_name('VipsOperation'), add_nickname)
all_nicknames.sort()
# remove operations we have to wrap by hand
exclude = ['scale', 'ifthenelse', 'bandjoin', 'bandrank']
all_nicknames = [x for x in all_nicknames if x not in exclude]
# Output summary table
print('.. class:: pyvips.Image\n')
print(' .. rubric:: Methods\n')
print(' .. autosummary::')
print(' :nosignatures:\n')
for nickname in all_nicknames:
print(' ~{0}'.format(nickname))
print()
# Output docs
print()
for nickname in all_nicknames:
docstr = Operation.generate_sphinx(nickname)
docstr = docstr.replace('\n', '\n ')
print(' ' + docstr) | python | def generate_sphinx_all():
"""Generate sphinx documentation.
This generates a .rst file for all auto-generated image methods. Use it
to regenerate the docs with something like::
$ python -c \
"import pyvips; pyvips.Operation.generate_sphinx_all()" > x
And copy-paste the file contents into doc/vimage.rst in the appropriate
place.
"""
# generate list of all nicknames we can generate docstrings for
all_nicknames = []
def add_nickname(gtype, a, b):
nickname = nickname_find(gtype)
try:
Operation.generate_sphinx(nickname)
all_nicknames.append(nickname)
except Error:
pass
type_map(gtype, add_nickname)
return ffi.NULL
type_map(type_from_name('VipsOperation'), add_nickname)
all_nicknames.sort()
# remove operations we have to wrap by hand
exclude = ['scale', 'ifthenelse', 'bandjoin', 'bandrank']
all_nicknames = [x for x in all_nicknames if x not in exclude]
# Output summary table
print('.. class:: pyvips.Image\n')
print(' .. rubric:: Methods\n')
print(' .. autosummary::')
print(' :nosignatures:\n')
for nickname in all_nicknames:
print(' ~{0}'.format(nickname))
print()
# Output docs
print()
for nickname in all_nicknames:
docstr = Operation.generate_sphinx(nickname)
docstr = docstr.replace('\n', '\n ')
print(' ' + docstr) | [
"def",
"generate_sphinx_all",
"(",
")",
":",
"# generate list of all nicknames we can generate docstrings for",
"all_nicknames",
"=",
"[",
"]",
"def",
"add_nickname",
"(",
"gtype",
",",
"a",
",",
"b",
")",
":",
"nickname",
"=",
"nickname_find",
"(",
"gtype",
")",
"try",
":",
"Operation",
".",
"generate_sphinx",
"(",
"nickname",
")",
"all_nicknames",
".",
"append",
"(",
"nickname",
")",
"except",
"Error",
":",
"pass",
"type_map",
"(",
"gtype",
",",
"add_nickname",
")",
"return",
"ffi",
".",
"NULL",
"type_map",
"(",
"type_from_name",
"(",
"'VipsOperation'",
")",
",",
"add_nickname",
")",
"all_nicknames",
".",
"sort",
"(",
")",
"# remove operations we have to wrap by hand",
"exclude",
"=",
"[",
"'scale'",
",",
"'ifthenelse'",
",",
"'bandjoin'",
",",
"'bandrank'",
"]",
"all_nicknames",
"=",
"[",
"x",
"for",
"x",
"in",
"all_nicknames",
"if",
"x",
"not",
"in",
"exclude",
"]",
"# Output summary table",
"print",
"(",
"'.. class:: pyvips.Image\\n'",
")",
"print",
"(",
"' .. rubric:: Methods\\n'",
")",
"print",
"(",
"' .. autosummary::'",
")",
"print",
"(",
"' :nosignatures:\\n'",
")",
"for",
"nickname",
"in",
"all_nicknames",
":",
"print",
"(",
"' ~{0}'",
".",
"format",
"(",
"nickname",
")",
")",
"print",
"(",
")",
"# Output docs",
"print",
"(",
")",
"for",
"nickname",
"in",
"all_nicknames",
":",
"docstr",
"=",
"Operation",
".",
"generate_sphinx",
"(",
"nickname",
")",
"docstr",
"=",
"docstr",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
"print",
"(",
"' '",
"+",
"docstr",
")"
] | Generate sphinx documentation.
This generates a .rst file for all auto-generated image methods. Use it
to regenerate the docs with something like::
$ python -c \
"import pyvips; pyvips.Operation.generate_sphinx_all()" > x
And copy-paste the file contents into doc/vimage.rst in the appropriate
place. | [
"Generate",
"sphinx",
"documentation",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/voperation.py#L482-L535 | train |
libvips/pyvips | pyvips/vregion.py | Region.new | def new(image):
"""Make a region on an image.
Returns:
A new :class:`.Region`.
Raises:
:class:`.Error`
"""
pointer = vips_lib.vips_region_new(image.pointer)
if pointer == ffi.NULL:
raise Error('unable to make region')
return pyvips.Region(pointer) | python | def new(image):
"""Make a region on an image.
Returns:
A new :class:`.Region`.
Raises:
:class:`.Error`
"""
pointer = vips_lib.vips_region_new(image.pointer)
if pointer == ffi.NULL:
raise Error('unable to make region')
return pyvips.Region(pointer) | [
"def",
"new",
"(",
"image",
")",
":",
"pointer",
"=",
"vips_lib",
".",
"vips_region_new",
"(",
"image",
".",
"pointer",
")",
"if",
"pointer",
"==",
"ffi",
".",
"NULL",
":",
"raise",
"Error",
"(",
"'unable to make region'",
")",
"return",
"pyvips",
".",
"Region",
"(",
"pointer",
")"
] | Make a region on an image.
Returns:
A new :class:`.Region`.
Raises:
:class:`.Error` | [
"Make",
"a",
"region",
"on",
"an",
"image",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vregion.py#L21-L36 | train |
libvips/pyvips | pyvips/vregion.py | Region.fetch | def fetch(self, x, y, w, h):
"""Fill a region with pixel data.
Pixels are filled with data!
Returns:
Pixel data.
Raises:
:class:`.Error`
"""
if not at_least_libvips(8, 8):
raise Error('libvips too old')
psize = ffi.new('size_t *')
pointer = vips_lib.vips_region_fetch(self.pointer, x, y, w, h, psize)
if pointer == ffi.NULL:
raise Error('unable to fetch from region')
pointer = ffi.gc(pointer, glib_lib.g_free)
return ffi.buffer(pointer, psize[0]) | python | def fetch(self, x, y, w, h):
"""Fill a region with pixel data.
Pixels are filled with data!
Returns:
Pixel data.
Raises:
:class:`.Error`
"""
if not at_least_libvips(8, 8):
raise Error('libvips too old')
psize = ffi.new('size_t *')
pointer = vips_lib.vips_region_fetch(self.pointer, x, y, w, h, psize)
if pointer == ffi.NULL:
raise Error('unable to fetch from region')
pointer = ffi.gc(pointer, glib_lib.g_free)
return ffi.buffer(pointer, psize[0]) | [
"def",
"fetch",
"(",
"self",
",",
"x",
",",
"y",
",",
"w",
",",
"h",
")",
":",
"if",
"not",
"at_least_libvips",
"(",
"8",
",",
"8",
")",
":",
"raise",
"Error",
"(",
"'libvips too old'",
")",
"psize",
"=",
"ffi",
".",
"new",
"(",
"'size_t *'",
")",
"pointer",
"=",
"vips_lib",
".",
"vips_region_fetch",
"(",
"self",
".",
"pointer",
",",
"x",
",",
"y",
",",
"w",
",",
"h",
",",
"psize",
")",
"if",
"pointer",
"==",
"ffi",
".",
"NULL",
":",
"raise",
"Error",
"(",
"'unable to fetch from region'",
")",
"pointer",
"=",
"ffi",
".",
"gc",
"(",
"pointer",
",",
"glib_lib",
".",
"g_free",
")",
"return",
"ffi",
".",
"buffer",
"(",
"pointer",
",",
"psize",
"[",
"0",
"]",
")"
] | Fill a region with pixel data.
Pixels are filled with data!
Returns:
Pixel data.
Raises:
:class:`.Error` | [
"Fill",
"a",
"region",
"with",
"pixel",
"data",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vregion.py#L52-L74 | train |
libvips/pyvips | pyvips/gvalue.py | GValue.gtype_to_python | def gtype_to_python(gtype):
"""Map a gtype to the name of the Python type we use to represent it.
"""
fundamental = gobject_lib.g_type_fundamental(gtype)
if gtype in GValue._gtype_to_python:
return GValue._gtype_to_python[gtype]
if fundamental in GValue._gtype_to_python:
return GValue._gtype_to_python[fundamental]
return '<unknown type>' | python | def gtype_to_python(gtype):
"""Map a gtype to the name of the Python type we use to represent it.
"""
fundamental = gobject_lib.g_type_fundamental(gtype)
if gtype in GValue._gtype_to_python:
return GValue._gtype_to_python[gtype]
if fundamental in GValue._gtype_to_python:
return GValue._gtype_to_python[fundamental]
return '<unknown type>' | [
"def",
"gtype_to_python",
"(",
"gtype",
")",
":",
"fundamental",
"=",
"gobject_lib",
".",
"g_type_fundamental",
"(",
"gtype",
")",
"if",
"gtype",
"in",
"GValue",
".",
"_gtype_to_python",
":",
"return",
"GValue",
".",
"_gtype_to_python",
"[",
"gtype",
"]",
"if",
"fundamental",
"in",
"GValue",
".",
"_gtype_to_python",
":",
"return",
"GValue",
".",
"_gtype_to_python",
"[",
"fundamental",
"]",
"return",
"'<unknown type>'"
] | Map a gtype to the name of the Python type we use to represent it. | [
"Map",
"a",
"gtype",
"to",
"the",
"name",
"of",
"the",
"Python",
"type",
"we",
"use",
"to",
"represent",
"it",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/gvalue.py#L75-L86 | train |
libvips/pyvips | pyvips/gvalue.py | GValue.to_enum | def to_enum(gtype, value):
"""Turn a string into an enum value ready to be passed into libvips.
"""
if isinstance(value, basestring if _is_PY2 else str):
enum_value = vips_lib.vips_enum_from_nick(b'pyvips', gtype,
_to_bytes(value))
if enum_value < 0:
raise Error('no value {0} in gtype {1} ({2})'.
format(value, type_name(gtype), gtype))
else:
enum_value = value
return enum_value | python | def to_enum(gtype, value):
"""Turn a string into an enum value ready to be passed into libvips.
"""
if isinstance(value, basestring if _is_PY2 else str):
enum_value = vips_lib.vips_enum_from_nick(b'pyvips', gtype,
_to_bytes(value))
if enum_value < 0:
raise Error('no value {0} in gtype {1} ({2})'.
format(value, type_name(gtype), gtype))
else:
enum_value = value
return enum_value | [
"def",
"to_enum",
"(",
"gtype",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
"if",
"_is_PY2",
"else",
"str",
")",
":",
"enum_value",
"=",
"vips_lib",
".",
"vips_enum_from_nick",
"(",
"b'pyvips'",
",",
"gtype",
",",
"_to_bytes",
"(",
"value",
")",
")",
"if",
"enum_value",
"<",
"0",
":",
"raise",
"Error",
"(",
"'no value {0} in gtype {1} ({2})'",
".",
"format",
"(",
"value",
",",
"type_name",
"(",
"gtype",
")",
",",
"gtype",
")",
")",
"else",
":",
"enum_value",
"=",
"value",
"return",
"enum_value"
] | Turn a string into an enum value ready to be passed into libvips. | [
"Turn",
"a",
"string",
"into",
"an",
"enum",
"value",
"ready",
"to",
"be",
"passed",
"into",
"libvips",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/gvalue.py#L89-L103 | train |
libvips/pyvips | pyvips/gvalue.py | GValue.from_enum | def from_enum(gtype, enum_value):
"""Turn an int back into an enum string.
"""
pointer = vips_lib.vips_enum_nick(gtype, enum_value)
if pointer == ffi.NULL:
raise Error('value not in enum')
return _to_string(pointer) | python | def from_enum(gtype, enum_value):
"""Turn an int back into an enum string.
"""
pointer = vips_lib.vips_enum_nick(gtype, enum_value)
if pointer == ffi.NULL:
raise Error('value not in enum')
return _to_string(pointer) | [
"def",
"from_enum",
"(",
"gtype",
",",
"enum_value",
")",
":",
"pointer",
"=",
"vips_lib",
".",
"vips_enum_nick",
"(",
"gtype",
",",
"enum_value",
")",
"if",
"pointer",
"==",
"ffi",
".",
"NULL",
":",
"raise",
"Error",
"(",
"'value not in enum'",
")",
"return",
"_to_string",
"(",
"pointer",
")"
] | Turn an int back into an enum string. | [
"Turn",
"an",
"int",
"back",
"into",
"an",
"enum",
"string",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/gvalue.py#L106-L115 | train |
libvips/pyvips | pyvips/gvalue.py | GValue.set | def set(self, value):
"""Set a GValue.
The value is converted to the type of the GValue, if possible, and
assigned.
"""
# logger.debug('GValue.set: value = %s', value)
gtype = self.gvalue.g_type
fundamental = gobject_lib.g_type_fundamental(gtype)
if gtype == GValue.gbool_type:
gobject_lib.g_value_set_boolean(self.gvalue, value)
elif gtype == GValue.gint_type:
gobject_lib.g_value_set_int(self.gvalue, int(value))
elif gtype == GValue.guint64_type:
gobject_lib.g_value_set_uint64(self.gvalue, value)
elif gtype == GValue.gdouble_type:
gobject_lib.g_value_set_double(self.gvalue, value)
elif fundamental == GValue.genum_type:
gobject_lib.g_value_set_enum(self.gvalue,
GValue.to_enum(gtype, value))
elif fundamental == GValue.gflags_type:
gobject_lib.g_value_set_flags(self.gvalue, value)
elif gtype == GValue.gstr_type:
gobject_lib.g_value_set_string(self.gvalue, _to_bytes(value))
elif gtype == GValue.refstr_type:
vips_lib.vips_value_set_ref_string(self.gvalue, _to_bytes(value))
elif fundamental == GValue.gobject_type:
gobject_lib.g_value_set_object(self.gvalue, value.pointer)
elif gtype == GValue.array_int_type:
if isinstance(value, numbers.Number):
value = [value]
array = ffi.new('int[]', value)
vips_lib.vips_value_set_array_int(self.gvalue, array, len(value))
elif gtype == GValue.array_double_type:
if isinstance(value, numbers.Number):
value = [value]
array = ffi.new('double[]', value)
vips_lib.vips_value_set_array_double(self.gvalue, array,
len(value))
elif gtype == GValue.array_image_type:
if isinstance(value, pyvips.Image):
value = [value]
vips_lib.vips_value_set_array_image(self.gvalue, len(value))
array = vips_lib.vips_value_get_array_image(self.gvalue, ffi.NULL)
for i, image in enumerate(value):
gobject_lib.g_object_ref(image.pointer)
array[i] = image.pointer
elif gtype == GValue.blob_type:
# we need to set the blob to a copy of the string that vips_lib
# can own
memory = glib_lib.g_malloc(len(value))
ffi.memmove(memory, value, len(value))
# this is horrible!
#
# * in API mode, we must have 8.6+ and use set_blob_free to
# attach the metadata to avoid leaks
# * pre-8.6, we just pass a NULL free pointer and live with the
# leak
#
# this is because in API mode you can't pass a builtin (what
# vips_lib.g_free() becomes) as a parameter to ffi.callback(), and
# vips_value_set_blob() needs a callback for arg 2
#
# additionally, you can't make a py def which calls g_free() and
# then use the py def in the callback, since libvips will trigger
# these functions during cleanup, and py will have shut down by
# then and you'll get a segv
if at_least_libvips(8, 6):
vips_lib.vips_value_set_blob_free(self.gvalue,
memory, len(value))
else:
if pyvips.API_mode:
vips_lib.vips_value_set_blob(self.gvalue,
ffi.NULL, memory, len(value))
else:
vips_lib.vips_value_set_blob(self.gvalue,
glib_lib.g_free,
memory, len(value))
else:
raise Error('unsupported gtype for set {0}, fundamental {1}'.
format(type_name(gtype), type_name(fundamental))) | python | def set(self, value):
"""Set a GValue.
The value is converted to the type of the GValue, if possible, and
assigned.
"""
# logger.debug('GValue.set: value = %s', value)
gtype = self.gvalue.g_type
fundamental = gobject_lib.g_type_fundamental(gtype)
if gtype == GValue.gbool_type:
gobject_lib.g_value_set_boolean(self.gvalue, value)
elif gtype == GValue.gint_type:
gobject_lib.g_value_set_int(self.gvalue, int(value))
elif gtype == GValue.guint64_type:
gobject_lib.g_value_set_uint64(self.gvalue, value)
elif gtype == GValue.gdouble_type:
gobject_lib.g_value_set_double(self.gvalue, value)
elif fundamental == GValue.genum_type:
gobject_lib.g_value_set_enum(self.gvalue,
GValue.to_enum(gtype, value))
elif fundamental == GValue.gflags_type:
gobject_lib.g_value_set_flags(self.gvalue, value)
elif gtype == GValue.gstr_type:
gobject_lib.g_value_set_string(self.gvalue, _to_bytes(value))
elif gtype == GValue.refstr_type:
vips_lib.vips_value_set_ref_string(self.gvalue, _to_bytes(value))
elif fundamental == GValue.gobject_type:
gobject_lib.g_value_set_object(self.gvalue, value.pointer)
elif gtype == GValue.array_int_type:
if isinstance(value, numbers.Number):
value = [value]
array = ffi.new('int[]', value)
vips_lib.vips_value_set_array_int(self.gvalue, array, len(value))
elif gtype == GValue.array_double_type:
if isinstance(value, numbers.Number):
value = [value]
array = ffi.new('double[]', value)
vips_lib.vips_value_set_array_double(self.gvalue, array,
len(value))
elif gtype == GValue.array_image_type:
if isinstance(value, pyvips.Image):
value = [value]
vips_lib.vips_value_set_array_image(self.gvalue, len(value))
array = vips_lib.vips_value_get_array_image(self.gvalue, ffi.NULL)
for i, image in enumerate(value):
gobject_lib.g_object_ref(image.pointer)
array[i] = image.pointer
elif gtype == GValue.blob_type:
# we need to set the blob to a copy of the string that vips_lib
# can own
memory = glib_lib.g_malloc(len(value))
ffi.memmove(memory, value, len(value))
# this is horrible!
#
# * in API mode, we must have 8.6+ and use set_blob_free to
# attach the metadata to avoid leaks
# * pre-8.6, we just pass a NULL free pointer and live with the
# leak
#
# this is because in API mode you can't pass a builtin (what
# vips_lib.g_free() becomes) as a parameter to ffi.callback(), and
# vips_value_set_blob() needs a callback for arg 2
#
# additionally, you can't make a py def which calls g_free() and
# then use the py def in the callback, since libvips will trigger
# these functions during cleanup, and py will have shut down by
# then and you'll get a segv
if at_least_libvips(8, 6):
vips_lib.vips_value_set_blob_free(self.gvalue,
memory, len(value))
else:
if pyvips.API_mode:
vips_lib.vips_value_set_blob(self.gvalue,
ffi.NULL, memory, len(value))
else:
vips_lib.vips_value_set_blob(self.gvalue,
glib_lib.g_free,
memory, len(value))
else:
raise Error('unsupported gtype for set {0}, fundamental {1}'.
format(type_name(gtype), type_name(fundamental))) | [
"def",
"set",
"(",
"self",
",",
"value",
")",
":",
"# logger.debug('GValue.set: value = %s', value)",
"gtype",
"=",
"self",
".",
"gvalue",
".",
"g_type",
"fundamental",
"=",
"gobject_lib",
".",
"g_type_fundamental",
"(",
"gtype",
")",
"if",
"gtype",
"==",
"GValue",
".",
"gbool_type",
":",
"gobject_lib",
".",
"g_value_set_boolean",
"(",
"self",
".",
"gvalue",
",",
"value",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gint_type",
":",
"gobject_lib",
".",
"g_value_set_int",
"(",
"self",
".",
"gvalue",
",",
"int",
"(",
"value",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"guint64_type",
":",
"gobject_lib",
".",
"g_value_set_uint64",
"(",
"self",
".",
"gvalue",
",",
"value",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gdouble_type",
":",
"gobject_lib",
".",
"g_value_set_double",
"(",
"self",
".",
"gvalue",
",",
"value",
")",
"elif",
"fundamental",
"==",
"GValue",
".",
"genum_type",
":",
"gobject_lib",
".",
"g_value_set_enum",
"(",
"self",
".",
"gvalue",
",",
"GValue",
".",
"to_enum",
"(",
"gtype",
",",
"value",
")",
")",
"elif",
"fundamental",
"==",
"GValue",
".",
"gflags_type",
":",
"gobject_lib",
".",
"g_value_set_flags",
"(",
"self",
".",
"gvalue",
",",
"value",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gstr_type",
":",
"gobject_lib",
".",
"g_value_set_string",
"(",
"self",
".",
"gvalue",
",",
"_to_bytes",
"(",
"value",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"refstr_type",
":",
"vips_lib",
".",
"vips_value_set_ref_string",
"(",
"self",
".",
"gvalue",
",",
"_to_bytes",
"(",
"value",
")",
")",
"elif",
"fundamental",
"==",
"GValue",
".",
"gobject_type",
":",
"gobject_lib",
".",
"g_value_set_object",
"(",
"self",
".",
"gvalue",
",",
"value",
".",
"pointer",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_int_type",
":",
"if",
"isinstance",
"(",
"value",
",",
"numbers",
".",
"Number",
")",
":",
"value",
"=",
"[",
"value",
"]",
"array",
"=",
"ffi",
".",
"new",
"(",
"'int[]'",
",",
"value",
")",
"vips_lib",
".",
"vips_value_set_array_int",
"(",
"self",
".",
"gvalue",
",",
"array",
",",
"len",
"(",
"value",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_double_type",
":",
"if",
"isinstance",
"(",
"value",
",",
"numbers",
".",
"Number",
")",
":",
"value",
"=",
"[",
"value",
"]",
"array",
"=",
"ffi",
".",
"new",
"(",
"'double[]'",
",",
"value",
")",
"vips_lib",
".",
"vips_value_set_array_double",
"(",
"self",
".",
"gvalue",
",",
"array",
",",
"len",
"(",
"value",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_image_type",
":",
"if",
"isinstance",
"(",
"value",
",",
"pyvips",
".",
"Image",
")",
":",
"value",
"=",
"[",
"value",
"]",
"vips_lib",
".",
"vips_value_set_array_image",
"(",
"self",
".",
"gvalue",
",",
"len",
"(",
"value",
")",
")",
"array",
"=",
"vips_lib",
".",
"vips_value_get_array_image",
"(",
"self",
".",
"gvalue",
",",
"ffi",
".",
"NULL",
")",
"for",
"i",
",",
"image",
"in",
"enumerate",
"(",
"value",
")",
":",
"gobject_lib",
".",
"g_object_ref",
"(",
"image",
".",
"pointer",
")",
"array",
"[",
"i",
"]",
"=",
"image",
".",
"pointer",
"elif",
"gtype",
"==",
"GValue",
".",
"blob_type",
":",
"# we need to set the blob to a copy of the string that vips_lib",
"# can own",
"memory",
"=",
"glib_lib",
".",
"g_malloc",
"(",
"len",
"(",
"value",
")",
")",
"ffi",
".",
"memmove",
"(",
"memory",
",",
"value",
",",
"len",
"(",
"value",
")",
")",
"# this is horrible!",
"#",
"# * in API mode, we must have 8.6+ and use set_blob_free to",
"# attach the metadata to avoid leaks",
"# * pre-8.6, we just pass a NULL free pointer and live with the",
"# leak",
"#",
"# this is because in API mode you can't pass a builtin (what",
"# vips_lib.g_free() becomes) as a parameter to ffi.callback(), and",
"# vips_value_set_blob() needs a callback for arg 2",
"#",
"# additionally, you can't make a py def which calls g_free() and",
"# then use the py def in the callback, since libvips will trigger",
"# these functions during cleanup, and py will have shut down by",
"# then and you'll get a segv",
"if",
"at_least_libvips",
"(",
"8",
",",
"6",
")",
":",
"vips_lib",
".",
"vips_value_set_blob_free",
"(",
"self",
".",
"gvalue",
",",
"memory",
",",
"len",
"(",
"value",
")",
")",
"else",
":",
"if",
"pyvips",
".",
"API_mode",
":",
"vips_lib",
".",
"vips_value_set_blob",
"(",
"self",
".",
"gvalue",
",",
"ffi",
".",
"NULL",
",",
"memory",
",",
"len",
"(",
"value",
")",
")",
"else",
":",
"vips_lib",
".",
"vips_value_set_blob",
"(",
"self",
".",
"gvalue",
",",
"glib_lib",
".",
"g_free",
",",
"memory",
",",
"len",
"(",
"value",
")",
")",
"else",
":",
"raise",
"Error",
"(",
"'unsupported gtype for set {0}, fundamental {1}'",
".",
"format",
"(",
"type_name",
"(",
"gtype",
")",
",",
"type_name",
"(",
"fundamental",
")",
")",
")"
] | Set a GValue.
The value is converted to the type of the GValue, if possible, and
assigned. | [
"Set",
"a",
"GValue",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/gvalue.py#L139-L228 | train |
libvips/pyvips | pyvips/gvalue.py | GValue.get | def get(self):
"""Get the contents of a GValue.
The contents of the GValue are read out as a Python type.
"""
# logger.debug('GValue.get: self = %s', self)
gtype = self.gvalue.g_type
fundamental = gobject_lib.g_type_fundamental(gtype)
result = None
if gtype == GValue.gbool_type:
result = bool(gobject_lib.g_value_get_boolean(self.gvalue))
elif gtype == GValue.gint_type:
result = gobject_lib.g_value_get_int(self.gvalue)
elif gtype == GValue.guint64_type:
result = gobject_lib.g_value_get_uint64(self.gvalue)
elif gtype == GValue.gdouble_type:
result = gobject_lib.g_value_get_double(self.gvalue)
elif fundamental == GValue.genum_type:
return GValue.from_enum(gtype,
gobject_lib.g_value_get_enum(self.gvalue))
elif fundamental == GValue.gflags_type:
result = gobject_lib.g_value_get_flags(self.gvalue)
elif gtype == GValue.gstr_type:
pointer = gobject_lib.g_value_get_string(self.gvalue)
if pointer != ffi.NULL:
result = _to_string(pointer)
elif gtype == GValue.refstr_type:
psize = ffi.new('size_t *')
pointer = vips_lib.vips_value_get_ref_string(self.gvalue, psize)
# psize[0] will be number of bytes in string, but just assume it's
# NULL-terminated
result = _to_string(pointer)
elif gtype == GValue.image_type:
# g_value_get_object() will not add a ref ... that is
# held by the gvalue
go = gobject_lib.g_value_get_object(self.gvalue)
vi = ffi.cast('VipsImage *', go)
# we want a ref that will last with the life of the vimage:
# this ref is matched by the unref that's attached to finalize
# by Image()
gobject_lib.g_object_ref(go)
result = pyvips.Image(vi)
elif gtype == GValue.array_int_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_int(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
result.append(array[i])
elif gtype == GValue.array_double_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_double(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
result.append(array[i])
elif gtype == GValue.array_image_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_image(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
vi = array[i]
gobject_lib.g_object_ref(vi)
image = pyvips.Image(vi)
result.append(image)
elif gtype == GValue.blob_type:
psize = ffi.new('size_t *')
array = vips_lib.vips_value_get_blob(self.gvalue, psize)
buf = ffi.cast('char*', array)
result = ffi.unpack(buf, psize[0])
else:
raise Error('unsupported gtype for get {0}'.
format(type_name(gtype)))
return result | python | def get(self):
"""Get the contents of a GValue.
The contents of the GValue are read out as a Python type.
"""
# logger.debug('GValue.get: self = %s', self)
gtype = self.gvalue.g_type
fundamental = gobject_lib.g_type_fundamental(gtype)
result = None
if gtype == GValue.gbool_type:
result = bool(gobject_lib.g_value_get_boolean(self.gvalue))
elif gtype == GValue.gint_type:
result = gobject_lib.g_value_get_int(self.gvalue)
elif gtype == GValue.guint64_type:
result = gobject_lib.g_value_get_uint64(self.gvalue)
elif gtype == GValue.gdouble_type:
result = gobject_lib.g_value_get_double(self.gvalue)
elif fundamental == GValue.genum_type:
return GValue.from_enum(gtype,
gobject_lib.g_value_get_enum(self.gvalue))
elif fundamental == GValue.gflags_type:
result = gobject_lib.g_value_get_flags(self.gvalue)
elif gtype == GValue.gstr_type:
pointer = gobject_lib.g_value_get_string(self.gvalue)
if pointer != ffi.NULL:
result = _to_string(pointer)
elif gtype == GValue.refstr_type:
psize = ffi.new('size_t *')
pointer = vips_lib.vips_value_get_ref_string(self.gvalue, psize)
# psize[0] will be number of bytes in string, but just assume it's
# NULL-terminated
result = _to_string(pointer)
elif gtype == GValue.image_type:
# g_value_get_object() will not add a ref ... that is
# held by the gvalue
go = gobject_lib.g_value_get_object(self.gvalue)
vi = ffi.cast('VipsImage *', go)
# we want a ref that will last with the life of the vimage:
# this ref is matched by the unref that's attached to finalize
# by Image()
gobject_lib.g_object_ref(go)
result = pyvips.Image(vi)
elif gtype == GValue.array_int_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_int(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
result.append(array[i])
elif gtype == GValue.array_double_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_double(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
result.append(array[i])
elif gtype == GValue.array_image_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_image(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
vi = array[i]
gobject_lib.g_object_ref(vi)
image = pyvips.Image(vi)
result.append(image)
elif gtype == GValue.blob_type:
psize = ffi.new('size_t *')
array = vips_lib.vips_value_get_blob(self.gvalue, psize)
buf = ffi.cast('char*', array)
result = ffi.unpack(buf, psize[0])
else:
raise Error('unsupported gtype for get {0}'.
format(type_name(gtype)))
return result | [
"def",
"get",
"(",
"self",
")",
":",
"# logger.debug('GValue.get: self = %s', self)",
"gtype",
"=",
"self",
".",
"gvalue",
".",
"g_type",
"fundamental",
"=",
"gobject_lib",
".",
"g_type_fundamental",
"(",
"gtype",
")",
"result",
"=",
"None",
"if",
"gtype",
"==",
"GValue",
".",
"gbool_type",
":",
"result",
"=",
"bool",
"(",
"gobject_lib",
".",
"g_value_get_boolean",
"(",
"self",
".",
"gvalue",
")",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gint_type",
":",
"result",
"=",
"gobject_lib",
".",
"g_value_get_int",
"(",
"self",
".",
"gvalue",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"guint64_type",
":",
"result",
"=",
"gobject_lib",
".",
"g_value_get_uint64",
"(",
"self",
".",
"gvalue",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gdouble_type",
":",
"result",
"=",
"gobject_lib",
".",
"g_value_get_double",
"(",
"self",
".",
"gvalue",
")",
"elif",
"fundamental",
"==",
"GValue",
".",
"genum_type",
":",
"return",
"GValue",
".",
"from_enum",
"(",
"gtype",
",",
"gobject_lib",
".",
"g_value_get_enum",
"(",
"self",
".",
"gvalue",
")",
")",
"elif",
"fundamental",
"==",
"GValue",
".",
"gflags_type",
":",
"result",
"=",
"gobject_lib",
".",
"g_value_get_flags",
"(",
"self",
".",
"gvalue",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"gstr_type",
":",
"pointer",
"=",
"gobject_lib",
".",
"g_value_get_string",
"(",
"self",
".",
"gvalue",
")",
"if",
"pointer",
"!=",
"ffi",
".",
"NULL",
":",
"result",
"=",
"_to_string",
"(",
"pointer",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"refstr_type",
":",
"psize",
"=",
"ffi",
".",
"new",
"(",
"'size_t *'",
")",
"pointer",
"=",
"vips_lib",
".",
"vips_value_get_ref_string",
"(",
"self",
".",
"gvalue",
",",
"psize",
")",
"# psize[0] will be number of bytes in string, but just assume it's",
"# NULL-terminated",
"result",
"=",
"_to_string",
"(",
"pointer",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"image_type",
":",
"# g_value_get_object() will not add a ref ... that is",
"# held by the gvalue",
"go",
"=",
"gobject_lib",
".",
"g_value_get_object",
"(",
"self",
".",
"gvalue",
")",
"vi",
"=",
"ffi",
".",
"cast",
"(",
"'VipsImage *'",
",",
"go",
")",
"# we want a ref that will last with the life of the vimage:",
"# this ref is matched by the unref that's attached to finalize",
"# by Image()",
"gobject_lib",
".",
"g_object_ref",
"(",
"go",
")",
"result",
"=",
"pyvips",
".",
"Image",
"(",
"vi",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_int_type",
":",
"pint",
"=",
"ffi",
".",
"new",
"(",
"'int *'",
")",
"array",
"=",
"vips_lib",
".",
"vips_value_get_array_int",
"(",
"self",
".",
"gvalue",
",",
"pint",
")",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"pint",
"[",
"0",
"]",
")",
":",
"result",
".",
"append",
"(",
"array",
"[",
"i",
"]",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_double_type",
":",
"pint",
"=",
"ffi",
".",
"new",
"(",
"'int *'",
")",
"array",
"=",
"vips_lib",
".",
"vips_value_get_array_double",
"(",
"self",
".",
"gvalue",
",",
"pint",
")",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"pint",
"[",
"0",
"]",
")",
":",
"result",
".",
"append",
"(",
"array",
"[",
"i",
"]",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"array_image_type",
":",
"pint",
"=",
"ffi",
".",
"new",
"(",
"'int *'",
")",
"array",
"=",
"vips_lib",
".",
"vips_value_get_array_image",
"(",
"self",
".",
"gvalue",
",",
"pint",
")",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"pint",
"[",
"0",
"]",
")",
":",
"vi",
"=",
"array",
"[",
"i",
"]",
"gobject_lib",
".",
"g_object_ref",
"(",
"vi",
")",
"image",
"=",
"pyvips",
".",
"Image",
"(",
"vi",
")",
"result",
".",
"append",
"(",
"image",
")",
"elif",
"gtype",
"==",
"GValue",
".",
"blob_type",
":",
"psize",
"=",
"ffi",
".",
"new",
"(",
"'size_t *'",
")",
"array",
"=",
"vips_lib",
".",
"vips_value_get_blob",
"(",
"self",
".",
"gvalue",
",",
"psize",
")",
"buf",
"=",
"ffi",
".",
"cast",
"(",
"'char*'",
",",
"array",
")",
"result",
"=",
"ffi",
".",
"unpack",
"(",
"buf",
",",
"psize",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"Error",
"(",
"'unsupported gtype for get {0}'",
".",
"format",
"(",
"type_name",
"(",
"gtype",
")",
")",
")",
"return",
"result"
] | Get the contents of a GValue.
The contents of the GValue are read out as a Python type. | [
"Get",
"the",
"contents",
"of",
"a",
"GValue",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/gvalue.py#L230-L314 | train |
libvips/pyvips | examples/cod.py | to_polar | def to_polar(image):
"""Transform image coordinates to polar.
The image is transformed so that it is wrapped around a point in the
centre. Vertical straight lines become circles or segments of circles,
horizontal straight lines become radial spokes.
"""
# xy image, origin in the centre, scaled to fit image to a circle
xy = pyvips.Image.xyz(image.width, image.height)
xy -= [image.width / 2.0, image.height / 2.0]
scale = min(image.width, image.height) / float(image.width)
xy *= 2.0 / scale
index = xy.polar()
# scale vertical axis to 360 degrees
index *= [1, image.height / 360.0]
return image.mapim(index) | python | def to_polar(image):
"""Transform image coordinates to polar.
The image is transformed so that it is wrapped around a point in the
centre. Vertical straight lines become circles or segments of circles,
horizontal straight lines become radial spokes.
"""
# xy image, origin in the centre, scaled to fit image to a circle
xy = pyvips.Image.xyz(image.width, image.height)
xy -= [image.width / 2.0, image.height / 2.0]
scale = min(image.width, image.height) / float(image.width)
xy *= 2.0 / scale
index = xy.polar()
# scale vertical axis to 360 degrees
index *= [1, image.height / 360.0]
return image.mapim(index) | [
"def",
"to_polar",
"(",
"image",
")",
":",
"# xy image, origin in the centre, scaled to fit image to a circle",
"xy",
"=",
"pyvips",
".",
"Image",
".",
"xyz",
"(",
"image",
".",
"width",
",",
"image",
".",
"height",
")",
"xy",
"-=",
"[",
"image",
".",
"width",
"/",
"2.0",
",",
"image",
".",
"height",
"/",
"2.0",
"]",
"scale",
"=",
"min",
"(",
"image",
".",
"width",
",",
"image",
".",
"height",
")",
"/",
"float",
"(",
"image",
".",
"width",
")",
"xy",
"*=",
"2.0",
"/",
"scale",
"index",
"=",
"xy",
".",
"polar",
"(",
")",
"# scale vertical axis to 360 degrees",
"index",
"*=",
"[",
"1",
",",
"image",
".",
"height",
"/",
"360.0",
"]",
"return",
"image",
".",
"mapim",
"(",
"index",
")"
] | Transform image coordinates to polar.
The image is transformed so that it is wrapped around a point in the
centre. Vertical straight lines become circles or segments of circles,
horizontal straight lines become radial spokes. | [
"Transform",
"image",
"coordinates",
"to",
"polar",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/examples/cod.py#L12-L30 | train |
libvips/pyvips | examples/cod.py | to_rectangular | def to_rectangular(image):
"""Transform image coordinates to rectangular.
The image is transformed so that it is unwrapped from a point in the
centre. Circles or segments of circles become vertical straight lines,
radial lines become horizontal lines.
"""
# xy image, vertical scaled to 360 degrees
xy = pyvips.Image.xyz(image.width, image.height)
xy *= [1, 360.0 / image.height]
index = xy.rect()
# scale to image rect
scale = min(image.width, image.height) / float(image.width)
index *= scale / 2.0
index += [image.width / 2.0, image.height / 2.0]
return image.mapim(index) | python | def to_rectangular(image):
"""Transform image coordinates to rectangular.
The image is transformed so that it is unwrapped from a point in the
centre. Circles or segments of circles become vertical straight lines,
radial lines become horizontal lines.
"""
# xy image, vertical scaled to 360 degrees
xy = pyvips.Image.xyz(image.width, image.height)
xy *= [1, 360.0 / image.height]
index = xy.rect()
# scale to image rect
scale = min(image.width, image.height) / float(image.width)
index *= scale / 2.0
index += [image.width / 2.0, image.height / 2.0]
return image.mapim(index) | [
"def",
"to_rectangular",
"(",
"image",
")",
":",
"# xy image, vertical scaled to 360 degrees",
"xy",
"=",
"pyvips",
".",
"Image",
".",
"xyz",
"(",
"image",
".",
"width",
",",
"image",
".",
"height",
")",
"xy",
"*=",
"[",
"1",
",",
"360.0",
"/",
"image",
".",
"height",
"]",
"index",
"=",
"xy",
".",
"rect",
"(",
")",
"# scale to image rect",
"scale",
"=",
"min",
"(",
"image",
".",
"width",
",",
"image",
".",
"height",
")",
"/",
"float",
"(",
"image",
".",
"width",
")",
"index",
"*=",
"scale",
"/",
"2.0",
"index",
"+=",
"[",
"image",
".",
"width",
"/",
"2.0",
",",
"image",
".",
"height",
"/",
"2.0",
"]",
"return",
"image",
".",
"mapim",
"(",
"index",
")"
] | Transform image coordinates to rectangular.
The image is transformed so that it is unwrapped from a point in the
centre. Circles or segments of circles become vertical straight lines,
radial lines become horizontal lines. | [
"Transform",
"image",
"coordinates",
"to",
"rectangular",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/examples/cod.py#L33-L51 | train |
libvips/pyvips | pyvips/error.py | _to_string | def _to_string(x):
"""Convert to a unicode string.
If x is a byte string, assume it is utf-8 and decode to a Python unicode
string. You must call this on text strings you get back from libvips.
"""
if x == ffi.NULL:
x = 'NULL'
else:
x = ffi.string(x)
if isinstance(x, byte_type):
x = x.decode('utf-8')
return x | python | def _to_string(x):
"""Convert to a unicode string.
If x is a byte string, assume it is utf-8 and decode to a Python unicode
string. You must call this on text strings you get back from libvips.
"""
if x == ffi.NULL:
x = 'NULL'
else:
x = ffi.string(x)
if isinstance(x, byte_type):
x = x.decode('utf-8')
return x | [
"def",
"_to_string",
"(",
"x",
")",
":",
"if",
"x",
"==",
"ffi",
".",
"NULL",
":",
"x",
"=",
"'NULL'",
"else",
":",
"x",
"=",
"ffi",
".",
"string",
"(",
"x",
")",
"if",
"isinstance",
"(",
"x",
",",
"byte_type",
")",
":",
"x",
"=",
"x",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"x"
] | Convert to a unicode string.
If x is a byte string, assume it is utf-8 and decode to a Python unicode
string. You must call this on text strings you get back from libvips. | [
"Convert",
"to",
"a",
"unicode",
"string",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/error.py#L33-L47 | train |
libvips/pyvips | pyvips/vinterpolate.py | Interpolate.new | def new(name):
"""Make a new interpolator by name.
Make a new interpolator from the libvips class nickname. For example::
inter = pyvips.Interpolator.new('bicubic')
You can get a list of all supported interpolators from the command-line
with::
$ vips -l interpolate
See for example :meth:`.affine`.
"""
# logger.debug('VipsInterpolate.new: name = %s', name)
vi = vips_lib.vips_interpolate_new(_to_bytes(name))
if vi == ffi.NULL:
raise Error('no such interpolator {0}'.format(name))
return Interpolate(vi) | python | def new(name):
"""Make a new interpolator by name.
Make a new interpolator from the libvips class nickname. For example::
inter = pyvips.Interpolator.new('bicubic')
You can get a list of all supported interpolators from the command-line
with::
$ vips -l interpolate
See for example :meth:`.affine`.
"""
# logger.debug('VipsInterpolate.new: name = %s', name)
vi = vips_lib.vips_interpolate_new(_to_bytes(name))
if vi == ffi.NULL:
raise Error('no such interpolator {0}'.format(name))
return Interpolate(vi) | [
"def",
"new",
"(",
"name",
")",
":",
"# logger.debug('VipsInterpolate.new: name = %s', name)",
"vi",
"=",
"vips_lib",
".",
"vips_interpolate_new",
"(",
"_to_bytes",
"(",
"name",
")",
")",
"if",
"vi",
"==",
"ffi",
".",
"NULL",
":",
"raise",
"Error",
"(",
"'no such interpolator {0}'",
".",
"format",
"(",
"name",
")",
")",
"return",
"Interpolate",
"(",
"vi",
")"
] | Make a new interpolator by name.
Make a new interpolator from the libvips class nickname. For example::
inter = pyvips.Interpolator.new('bicubic')
You can get a list of all supported interpolators from the command-line
with::
$ vips -l interpolate
See for example :meth:`.affine`. | [
"Make",
"a",
"new",
"interpolator",
"by",
"name",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vinterpolate.py#L21-L44 | train |
libvips/pyvips | pyvips/vimage.py | _run_cmplx | def _run_cmplx(fn, image):
"""Run a complex function on a non-complex image.
The image needs to be complex, or have an even number of bands. The input
can be int, the output is always float or double.
"""
original_format = image.format
if image.format != 'complex' and image.format != 'dpcomplex':
if image.bands % 2 != 0:
raise Error('not an even number of bands')
if image.format != 'float' and image.format != 'double':
image = image.cast('float')
if image.format == 'double':
new_format = 'dpcomplex'
else:
new_format = 'complex'
image = image.copy(format=new_format, bands=image.bands / 2)
image = fn(image)
if original_format != 'complex' and original_format != 'dpcomplex':
if image.format == 'dpcomplex':
new_format = 'double'
else:
new_format = 'float'
image = image.copy(format=new_format, bands=image.bands * 2)
return image | python | def _run_cmplx(fn, image):
"""Run a complex function on a non-complex image.
The image needs to be complex, or have an even number of bands. The input
can be int, the output is always float or double.
"""
original_format = image.format
if image.format != 'complex' and image.format != 'dpcomplex':
if image.bands % 2 != 0:
raise Error('not an even number of bands')
if image.format != 'float' and image.format != 'double':
image = image.cast('float')
if image.format == 'double':
new_format = 'dpcomplex'
else:
new_format = 'complex'
image = image.copy(format=new_format, bands=image.bands / 2)
image = fn(image)
if original_format != 'complex' and original_format != 'dpcomplex':
if image.format == 'dpcomplex':
new_format = 'double'
else:
new_format = 'float'
image = image.copy(format=new_format, bands=image.bands * 2)
return image | [
"def",
"_run_cmplx",
"(",
"fn",
",",
"image",
")",
":",
"original_format",
"=",
"image",
".",
"format",
"if",
"image",
".",
"format",
"!=",
"'complex'",
"and",
"image",
".",
"format",
"!=",
"'dpcomplex'",
":",
"if",
"image",
".",
"bands",
"%",
"2",
"!=",
"0",
":",
"raise",
"Error",
"(",
"'not an even number of bands'",
")",
"if",
"image",
".",
"format",
"!=",
"'float'",
"and",
"image",
".",
"format",
"!=",
"'double'",
":",
"image",
"=",
"image",
".",
"cast",
"(",
"'float'",
")",
"if",
"image",
".",
"format",
"==",
"'double'",
":",
"new_format",
"=",
"'dpcomplex'",
"else",
":",
"new_format",
"=",
"'complex'",
"image",
"=",
"image",
".",
"copy",
"(",
"format",
"=",
"new_format",
",",
"bands",
"=",
"image",
".",
"bands",
"/",
"2",
")",
"image",
"=",
"fn",
"(",
"image",
")",
"if",
"original_format",
"!=",
"'complex'",
"and",
"original_format",
"!=",
"'dpcomplex'",
":",
"if",
"image",
".",
"format",
"==",
"'dpcomplex'",
":",
"new_format",
"=",
"'double'",
"else",
":",
"new_format",
"=",
"'float'",
"image",
"=",
"image",
".",
"copy",
"(",
"format",
"=",
"new_format",
",",
"bands",
"=",
"image",
".",
"bands",
"*",
"2",
")",
"return",
"image"
] | Run a complex function on a non-complex image.
The image needs to be complex, or have an even number of bands. The input
can be int, the output is always float or double. | [
"Run",
"a",
"complex",
"function",
"on",
"a",
"non",
"-",
"complex",
"image",
"."
] | f4d9334d2e3085b4b058129f14ac17a7872b109b | https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/vimage.py#L50-L82 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.