hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
1766743b590f2bbe15c9fb6570dc9964311ff1ca83d584a84b61507bc7c32785 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains models representing polynomials and polynomial series.
"""
# pylint: disable=invalid-name
from math import comb
import numpy as np
from astropy.utils import check_broadcast, indent
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
from .utils import _validate_domain_window, poly_map_domain
__all__ = [
"Chebyshev1D",
"Chebyshev2D",
"Hermite1D",
"Hermite2D",
"InverseSIP",
"Legendre1D",
"Legendre2D",
"Polynomial1D",
"Polynomial2D",
"SIP",
"OrthoPolynomialBase",
"PolynomialModel",
]
class PolynomialBase(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
class PolynomialModel(PolynomialBase):
"""
Base class for polynomial models.
Its main purpose is to determine how many coefficients are needed
based on the polynomial order and dimension and to provide their
default values, names and ordering.
"""
def __init__(
self, degree, n_models=None, model_set_axis=None, name=None, meta=None, **params
):
self._degree = degree
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
@property
def degree(self):
"""Degree of polynomial."""
return self._degree
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one parameter set.
"""
if self.degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
nmixed = comb(self.degree, ndim)
else:
nmixed = 0
numc = self.degree * ndim + nmixed + 1
return numc
def _invlex(self):
c = []
lencoeff = self.degree + 1
for i in range(lencoeff):
for j in range(lencoeff):
if i + j <= self.degree:
c.append((j, i))
return c[::-1]
def _generate_coeff_names(self, ndim):
names = []
if ndim == 1:
for n in range(self._order):
names.append(f"c{n}")
else:
for i in range(self.degree + 1):
names.append(f"c{i}_{0}")
for i in range(1, self.degree + 1):
names.append(f"c{0}_{i}")
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j < self.degree + 1:
names.append(f"c{i}_{j}")
return tuple(names)
class _PolyDomainWindow1D(PolynomialModel):
"""
This class sets ``domain`` and ``window`` of 1D polynomials.
"""
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree, n_models, model_set_axis, name=name, meta=meta, **params
)
self._set_default_domain_window(domain, window)
@property
def window(self):
return self._window
@window.setter
def window(self, val):
self._window = _validate_domain_window(val)
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, val):
self._domain = _validate_domain_window(val)
def _set_default_domain_window(self, domain, window):
"""
This method sets the ``domain`` and ``window`` attributes on 1D subclasses.
"""
self._default_domain_window = {"domain": None, "window": (-1, 1)}
self.window = window or (-1, 1)
self.domain = domain
def __repr__(self):
return self._format_repr(
[self.degree],
kwargs={"domain": self.domain, "window": self.window},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[("Degree", self.degree), ("Domain", self.domain), ("Window", self.window)],
self._default_domain_window,
)
class OrthoPolynomialBase(PolynomialBase):
"""
This is a base class for the 2D Chebyshev and Legendre models.
The polynomials implemented here require a maximum degree in x and y.
For explanation of ``x_domain``, ``y_domain``, ```x_window`` and ```y_window``
see :ref:`Notes regarding usage of domain and window <astropy:domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
x_window : tuple or None, optional
range of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
y_window : tuple or None, optional
range of the y independent variable
**params : dict
{keyword: value} pairs, representing {parameter_name: value}
"""
n_inputs = 2
n_outputs = 1
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
self.x_degree = x_degree
self.y_degree = y_degree
self._order = self.get_num_coeff()
# Set the ``x/y_domain`` and ``x/y_wndow`` attributes in subclasses.
self._default_domain_window = {
"x_window": (-1, 1),
"y_window": (-1, 1),
"x_domain": None,
"y_domain": None,
}
self.x_window = x_window or self._default_domain_window["x_window"]
self.y_window = y_window or self._default_domain_window["y_window"]
self.x_domain = x_domain
self.y_domain = y_domain
self._param_names = self._generate_coeff_names()
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
@property
def x_domain(self):
return self._x_domain
@x_domain.setter
def x_domain(self, val):
self._x_domain = _validate_domain_window(val)
@property
def y_domain(self):
return self._y_domain
@y_domain.setter
def y_domain(self, val):
self._y_domain = _validate_domain_window(val)
@property
def x_window(self):
return self._x_window
@x_window.setter
def x_window(self, val):
self._x_window = _validate_domain_window(val)
@property
def y_window(self):
return self._y_window
@y_window.setter
def y_window(self, val):
self._y_window = _validate_domain_window(val)
def __repr__(self):
return self._format_repr(
[self.x_degree, self.y_degree],
kwargs={
"x_domain": self.x_domain,
"y_domain": self.y_domain,
"x_window": self.x_window,
"y_window": self.y_window,
},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[
("X_Degree", self.x_degree),
("Y_Degree", self.y_degree),
("X_Domain", self.x_domain),
("Y_Domain", self.y_domain),
("X_Window", self.x_window),
("Y_Window", self.y_window),
],
self._default_domain_window,
)
def get_num_coeff(self):
"""
Determine how many coefficients are needed.
Returns
-------
numc : int
number of coefficients
"""
if self.x_degree < 0 or self.y_degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
return (self.x_degree + 1) * (self.y_degree + 1)
def _invlex(self):
# TODO: This is a very slow way to do this; fix it and related methods
# like _alpha
c = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
c.append((i, j))
return np.array(c[::-1])
def invlex_coeff(self, coeffs):
invlex_coeffs = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
name = f"c{i}_{j}"
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return np.array(invlex_coeffs[::-1])
def _alpha(self):
invlexdeg = self._invlex()
invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1
nx = self.x_degree + 1
ny = self.y_degree + 1
alpha = np.zeros((ny * nx + 3, ny + nx))
for n in range(len(invlexdeg)):
alpha[n][invlexdeg[n]] = [1, 1]
alpha[-2, 0] = 1
alpha[-3, nx] = 1
return alpha
def imhorner(self, x, y, coeff):
_coeff = list(coeff)
_coeff.extend([0, 0, 0])
alpha = self._alpha()
r0 = _coeff[0]
nalpha = len(alpha)
karr = np.diff(alpha, axis=0)
kfunc = self._fcache(x, y)
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
nterms = x_terms + y_terms
for n in range(1, nterms + 1 + 3):
setattr(self, "r" + str(n), 0.0)
for n in range(1, nalpha):
k = karr[n - 1].nonzero()[0].max() + 1
rsum = 0
for i in range(1, k + 1):
rsum = rsum + getattr(self, "r" + str(i))
val = kfunc[k - 1] * (r0 + rsum)
setattr(self, "r" + str(k), val)
r0 = _coeff[n]
for i in range(1, k):
setattr(self, "r" + str(i), 0.0)
result = r0
for i in range(1, nterms + 1 + 3):
result = result + getattr(self, "r" + str(i))
return result
def _generate_coeff_names(self):
names = []
for j in range(self.y_degree + 1):
for i in range(self.x_degree + 1):
names.append(f"c{i}_{j}")
return tuple(names)
def _fcache(self, x, y):
"""
Computation and store the individual functions.
To be implemented by subclasses"
"""
raise NotImplementedError("Subclasses should implement this")
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
return self.imhorner(x, y, invcoeff)
def prepare_inputs(self, x, y, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), broadcasted_shapes
class Chebyshev1D(_PolyDomainWindow1D):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
For explanation of ```domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window.
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain=domain,
window=window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
class Hermite1D(_PolyDomainWindow1D):
r"""
Univariate Hermite series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x)
where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind").
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units,
4x^2 and -2 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = 2 * x
for i in range(2, self.degree + 1):
v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
x2 = x * 2
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
temp = c0
nd = nd - 1
c0 = coeffs[-i] - c1 * (2 * (nd - 1))
c1 = temp + c1 * x2
return c0 + c1 * x2
class Hermite2D(OrthoPolynomialBase):
r"""
Bivariate Hermite series.
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y)
where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x and/or y - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Hermite polynomial (H2) is 4x^2-2, but if x was
specified with units, 4x^2 and -2 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Hermite functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = 2 * x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = 2 * y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Hermite polynomials:
.. math::
H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._hermderiv1d(x, self.x_degree + 1).T
y_deriv = self._hermderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _hermderiv1d(self, x, deg):
"""
Derivative of 1D Hermite series.
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x2
for i in range(2, deg + 1):
d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre1D(_PolyDomainWindow1D):
r"""
Univariate Legendre series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_i(x)`` is the corresponding Legendre polynomial.
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def clenshaw(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
nd = nd - 1
c0 = coeffs[-i] - (c1 * (nd - 1)) / nd
c1 = tmp + (c1 * x * (2 * nd - 1)) / nd
return c0 + c1 * x
class Polynomial1D(_PolyDomainWindow1D):
r"""
1D Polynomial model.
It is defined as:
.. math::
P = \sum_{i=0}^{i=n}C_{i} * x^{i}
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
If None, it is set to (-1, 1)
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
# Set domain separately because it's different from
# the orthogonal polynomials.
self._default_domain_window = {
"domain": (-1, 1),
"window": (-1, 1),
}
self.domain = domain or self._default_domain_window["domain"]
self.window = window or self._default_domain_window["window"]
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.horner(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=float)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def horner(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[-1] * np.ones_like(x, subok=False)
else:
c0 = coeffs[-1]
for i in range(2, len(coeffs) + 1):
c0 = coeffs[-i] + c0 * x
return c0
@property
def input_units(self):
if self.degree == 0 or self.c1.unit is None:
return None
else:
return {self.inputs[0]: self.c0.unit / self.c1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
par = getattr(self, f"c{i}")
mapping[par.name] = (
outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i
)
return mapping
class Polynomial2D(PolynomialModel):
r"""
2D Polynomial model.
Represents a general polynomial of degree n:
.. math::
P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n
+ c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
Polynomial degree: largest sum of exponents (:math:`i + j`) of
variables in each monomial term of the form :math:`x^i y^j`. The
number of terms in a 2D polynomial of degree ``n`` is given by binomial
coefficient :math:`C(n + 2, 2) = (n + 2)! / (2!\,n!) = (n + 1)(n + 2) / 2`.
x_domain : tuple or None, optional
domain of the x independent variable
If None, it is set to (-1, 1)
y_domain : tuple or None, optional
domain of the y independent variable
If None, it is set to (-1, 1)
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the x_domain to x_window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the y_domain to y_window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 2
n_outputs = 1
_separable = False
def __init__(
self,
degree,
x_domain=None,
y_domain=None,
x_window=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
self._default_domain_window = {
"x_domain": (-1, 1),
"y_domain": (-1, 1),
"x_window": (-1, 1),
"y_window": (-1, 1),
}
self.x_domain = x_domain or self._default_domain_window["x_domain"]
self.y_domain = y_domain or self._default_domain_window["y_domain"]
self.x_window = x_window or self._default_domain_window["x_window"]
self.y_window = y_window or self._default_domain_window["y_window"]
def prepare_inputs(self, x, y, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
return (x, y), broadcasted_shapes
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
result = self.multivariate_horner(x, y, invcoeff)
# Special case for degree==0 to ensure that the shape of the output is
# still as expected by the broadcasting rules, even though the x and y
# inputs are not used in the evaluation
if self.degree == 0:
output_shape = check_broadcast(np.shape(coeffs[0]), x.shape)
if output_shape:
new_result = np.empty(output_shape)
new_result[:] = result
result = new_result
return result
def __repr__(self):
return self._format_repr(
[self.degree],
kwargs={
"x_domain": self.x_domain,
"y_domain": self.y_domain,
"x_window": self.x_window,
"y_window": self.y_window,
},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[
("Degree", self.degree),
("X_Domain", self.x_domain),
("Y_Domain", self.y_domain),
("X_Window", self.x_window),
("Y_Window", self.y_window),
],
self._default_domain_window,
)
def fit_deriv(self, x, y, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.ndim == 2:
x = x.flatten()
if y.ndim == 2:
y = y.flatten()
if x.size != y.size:
raise ValueError("Expected x and y to be of equal size")
designx = x[:, None] ** np.arange(self.degree + 1)
designy = y[:, None] ** np.arange(1, self.degree + 1)
designmixed = []
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j <= self.degree:
designmixed.append((x**i) * (y**j))
designmixed = np.array(designmixed).T
if designmixed.any():
v = np.hstack([designx, designy, designmixed])
else:
v = np.hstack([designx, designy])
return v
def invlex_coeff(self, coeffs):
invlex_coeffs = []
lencoeff = range(self.degree + 1)
for i in lencoeff:
for j in lencoeff:
if i + j <= self.degree:
name = f"c{j}_{i}"
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return invlex_coeffs[::-1]
def multivariate_horner(self, x, y, coeffs):
"""
Multivariate Horner's scheme.
Parameters
----------
x, y : array
coeffs : array
Coefficients in inverse lexical order.
"""
alpha = self._invlex()
r0 = coeffs[0]
r1 = r0 * 0.0
r2 = r0 * 0.0
karr = np.diff(alpha, axis=0)
for n in range(len(karr)):
if karr[n, 1] != 0:
r2 = y * (r0 + r1 + r2)
r1 = np.zeros_like(coeffs[0], subok=False)
else:
r1 = x * (r0 + r1)
r0 = coeffs[n + 1]
return r0 + r1 + r2
@property
def input_units(self):
if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None):
return None
return {
self.inputs[0]: self.c0_0.unit / self.c1_0.unit,
self.inputs[1]: self.c0_0.unit / self.c0_1.unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
for j in range(self.degree + 1):
if i + j > 2:
continue
par = getattr(self, f"c{i}_{j}")
mapping[par.name] = (
outputs_unit[self.outputs[0]]
/ inputs_unit[self.inputs[0]] ** i
/ inputs_unit[self.inputs[1]] ** j
)
return mapping
@property
def x_domain(self):
return self._x_domain
@x_domain.setter
def x_domain(self, val):
self._x_domain = _validate_domain_window(val)
@property
def y_domain(self):
return self._y_domain
@y_domain.setter
def y_domain(self, val):
self._y_domain = _validate_domain_window(val)
@property
def x_window(self):
return self._x_window
@x_window.setter
def x_window(self, val):
self._x_window = _validate_domain_window(val)
@property
def y_window(self):
return self._y_window
@y_window.setter
def y_window(self, val):
self._y_window = _validate_domain_window(val)
class Chebyshev2D(OrthoPolynomialBase):
r"""
Bivariate Chebyshev series..
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y)
where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x and/or y - since
the coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was
specified with units, 2x^2 and -1 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Chebyshev functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Chebyshev polynomials:
.. math::
T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._chebderiv1d(x, self.x_degree + 1).T
y_deriv = self._chebderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _chebderiv1d(self, x, deg):
"""
Derivative of 1D Chebyshev series.
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x
for i in range(2, deg + 1):
d[i] = d[i - 1] * x2 - d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre2D(OrthoPolynomialBase):
r"""
Bivariate Legendre series.
Defined as:
.. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y)
where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
Model formula:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_{i}`` is the corresponding Legendre polynomial.
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Legendre functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = (
(2 * (n - 1) + 1) * x * kfunc[n - 1] - (n - 1) * kfunc[n - 2]
) / n
for n in range(2, y_terms):
kfunc[n + x_terms] = (
(2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1]
- (n - 1) * kfunc[n + x_terms - 2]
) / (n)
return kfunc
def fit_deriv(self, x, y, *params):
"""Derivatives with respect to the coefficients.
This is an array with Legendre polynomials:
Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._legendderiv1d(x, self.x_degree + 1).T
y_deriv = self._legendderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _legendderiv1d(self, x, deg):
"""Derivative of 1D Legendre polynomial."""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1,) + x.shape, dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
d[1] = x
for i in range(2, deg + 1):
d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i
return np.rollaxis(d, 0, d.ndim)
class _SIP1D(PolynomialBase):
"""
This implements the Simple Imaging Polynomial Model (SIP) in 1D.
It's unlikely it will be used in 1D so this class is private
and SIP should be used instead.
"""
n_inputs = 2
n_outputs = 1
_separable = False
def __init__(
self,
order,
coeff_prefix,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
self.order = order
self.coeff_prefix = coeff_prefix
self._param_names = self._generate_coeff_names(coeff_prefix)
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def __repr__(self):
return self._format_repr(args=[self.order, self.coeff_prefix])
def __str__(self):
return self._format_str(
[("Order", self.order), ("Coeff. Prefix", self.coeff_prefix)]
)
def evaluate(self, x, y, *coeffs):
# TODO: Rewrite this so that it uses a simpler method of determining
# the matrix based on the number of given coefficients.
mcoef = self._coeff_matrix(self.coeff_prefix, coeffs)
return self._eval_sip(x, y, mcoef)
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one param set.
"""
if self.order < 2 or self.order > 9:
raise ValueError("Degree of polynomial must be 2< deg < 9")
nmixed = comb(self.order, ndim)
# remove 3 terms because SIP deg >= 2
numc = self.order * ndim + nmixed - 2
return numc
def _generate_coeff_names(self, coeff_prefix):
names = []
for i in range(2, self.order + 1):
names.append(f"{coeff_prefix}_{i}_{0}")
for i in range(2, self.order + 1):
names.append(f"{coeff_prefix}_{0}_{i}")
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
names.append(f"{coeff_prefix}_{i}_{j}")
return tuple(names)
def _coeff_matrix(self, coeff_prefix, coeffs):
mat = np.zeros((self.order + 1, self.order + 1))
for i in range(2, self.order + 1):
attr = f"{coeff_prefix}_{i}_{0}"
mat[i, 0] = coeffs[self.param_names.index(attr)]
for i in range(2, self.order + 1):
attr = f"{coeff_prefix}_{0}_{i}"
mat[0, i] = coeffs[self.param_names.index(attr)]
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
attr = f"{coeff_prefix}_{i}_{j}"
mat[i, j] = coeffs[self.param_names.index(attr)]
return mat
def _eval_sip(self, x, y, coef):
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if self.coeff_prefix == "A":
result = np.zeros(x.shape)
else:
result = np.zeros(y.shape)
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
if 1 < i + j < self.order + 1:
result = result + coef[i, j] * x**i * y**j
return result
class SIP(Model):
"""
Simple Imaging Polynomial (SIP) model.
The SIP convention is used to represent distortions in FITS image headers.
See [1]_ for a description of the SIP convention.
Parameters
----------
crpix : list or (2,) ndarray
CRPIX values
a_order : int
SIP polynomial order for first axis
b_order : int
SIP order for second axis
a_coeff : dict
SIP coefficients for first axis
b_coeff : dict
SIP coefficients for the second axis
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
References
----------
.. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005
<https://ui.adsabs.harvard.edu/abs/2005ASPC..347..491S>`_
"""
n_inputs = 2
n_outputs = 2
_separable = False
def __init__(
self,
crpix,
a_order,
b_order,
a_coeff={},
b_coeff={},
ap_order=None,
bp_order=None,
ap_coeff={},
bp_coeff={},
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
self._crpix = crpix
self._a_order = a_order
self._b_order = b_order
self._a_coeff = a_coeff
self._b_coeff = b_coeff
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
self.shift_a = Shift(-crpix[0])
self.shift_b = Shift(-crpix[1])
self.sip1d_a = _SIP1D(
a_order,
coeff_prefix="A",
n_models=n_models,
model_set_axis=model_set_axis,
**a_coeff,
)
self.sip1d_b = _SIP1D(
b_order,
coeff_prefix="B",
n_models=n_models,
model_set_axis=model_set_axis,
**b_coeff,
)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta
)
self._inputs = ("u", "v")
self._outputs = ("x", "y")
def __repr__(self):
return (
f"<{self.__class__.__name__}"
f"({[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]!r})>"
)
def __str__(self):
parts = [f"Model: {self.__class__.__name__}"]
for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]:
parts.append(indent(str(model), width=4))
parts.append("")
return "\n".join(parts)
@property
def inverse(self):
if self._ap_order is not None and self._bp_order is not None:
return InverseSIP(
self._ap_order, self._bp_order, self._ap_coeff, self._bp_coeff
)
else:
raise NotImplementedError("SIP inverse coefficients are not available.")
def evaluate(self, x, y):
u = self.shift_a.evaluate(x, *self.shift_a.param_sets)
v = self.shift_b.evaluate(y, *self.shift_b.param_sets)
f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets)
g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets)
return f, g
class InverseSIP(Model):
"""
Inverse Simple Imaging Polynomial.
Parameters
----------
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
"""
n_inputs = 2
n_outputs = 2
_separable = False
def __init__(
self,
ap_order,
bp_order,
ap_coeff={},
bp_coeff={},
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
# define the 0th term in order to use Polynomial2D
ap_coeff.setdefault("AP_0_0", 0)
bp_coeff.setdefault("BP_0_0", 0)
ap_coeff_params = {k.replace("AP_", "c"): v for k, v in ap_coeff.items()}
bp_coeff_params = {k.replace("BP_", "c"): v for k, v in bp_coeff.items()}
self.sip1d_ap = Polynomial2D(
degree=ap_order, model_set_axis=model_set_axis, **ap_coeff_params
)
self.sip1d_bp = Polynomial2D(
degree=bp_order, model_set_axis=model_set_axis, **bp_coeff_params
)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta
)
def __repr__(self):
return f"<{self.__class__.__name__}({[self.sip1d_ap, self.sip1d_bp]!r})>"
def __str__(self):
parts = [f"Model: {self.__class__.__name__}"]
for model in [self.sip1d_ap, self.sip1d_bp]:
parts.append(indent(str(model), width=4))
parts.append("")
return "\n".join(parts)
def evaluate(self, x, y):
x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets)
y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets)
return x1, y1
|
01e7b4984b337aab8f0cd936ccbde4fe486e884803ffbfa16d3ab61df806a197 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Models that have physical origins.
"""
# pylint: disable=invalid-name, no-member
import warnings
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
from .core import Fittable1DModel
from .parameters import InputParameterError, Parameter
__all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"]
class BlackBody(Fittable1DModel):
"""
Blackbody model using the Planck function.
Parameters
----------
temperature : `~astropy.units.Quantity` ['temperature']
Blackbody temperature.
scale : float or `~astropy.units.Quantity` ['dimensionless']
Scale factor. If dimensionless, input units will assumed
to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr).
If not dimensionless, must be equivalent to either
(erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr),
in which case the result will be returned in the requested units and
the scale will be stripped of units (with the float value applied).
Notes
-----
Model formula:
.. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody(temperature=5000*u.K)
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.53254685e-05 erg / (cm2 Hz s sr)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav)
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a scale.
temperature = Parameter(
default=5000.0, min=0, unit=u.K, description="Blackbody temperature"
)
scale = Parameter(default=1.0, min=0, description="Scale factor")
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz or wavelengths
# in AA (depending on the choice of output units controlled by units on scale
# and stored in self._output_units during init).
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {"x": u.spectral()}
# Store the native units returned by B_nu equation
_native_units = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
# Store the base native output units. If scale is not dimensionless, it
# must be equivalent to one of these. If equivalent to SLAM, then
# input_units will expect AA for 'x', otherwise Hz.
_native_output_units = {
"SNU": u.erg / (u.cm**2 * u.s * u.Hz * u.sr),
"SLAM": u.erg / (u.cm**2 * u.s * u.AA * u.sr),
}
def __init__(self, *args, **kwargs):
scale = kwargs.get("scale", None)
# Support scale with non-dimensionless unit by stripping the unit and
# storing as self._output_units.
if hasattr(scale, "unit") and not scale.unit.is_equivalent(
u.dimensionless_unscaled
):
output_units = scale.unit
if not output_units.is_equivalent(
self._native_units, u.spectral_density(1 * u.AA)
):
raise ValueError(
"scale units not dimensionless or in "
f"surface brightness: {output_units}"
)
kwargs["scale"] = scale.value
self._output_units = output_units
else:
self._output_units = self._native_units
return super().__init__(*args, **kwargs)
def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
if not isinstance(x, u.Quantity):
# then we assume it has input_units which depends on the
# requested output units (either Hz or AA)
in_x = u.Quantity(x, self.input_units["x"])
else:
in_x = x
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
if not hasattr(scale, "unit"):
# during fitting, scale will be passed without units
# but we still need to convert from the input dimensionless
# to dimensionless unscaled
scale = scale * self.scale.unit
scale = scale.to(u.dimensionless_unscaled).value
# NOTE: scale is already stripped of any input units
y = scale * bb_nu.to(self._output_units, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
@property
def input_units(self):
# The input units are those of the 'x' value, which will depend on the
# units compatible with the expected output units.
if self._output_units.is_equivalent(self._native_output_units["SNU"]):
return {self.inputs[0]: u.Hz}
else:
# only other option is equivalent with SLAM
return {self.inputs[0]: u.AA}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"temperature": u.K}
@property
def bolometric_flux(self):
"""Bolometric flux."""
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
scale = self.scale.quantity.to(u.dimensionless_unscaled)
else:
scale = self.scale.value
# bolometric flux in the native units of the planck function
native_bolflux = scale * const.sigma_sb * self.temperature**4 / np.pi
# return in more "astro" units
return native_bolflux.to(u.erg / (u.cm**2 * u.s))
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
@property
def nu_max(self):
"""Peak frequency when the curve is expressed as power density."""
return 2.8214391 * const.k_B * self.temperature / const.h
class Drude1D(Fittable1DModel):
"""
Drude model based one the behavior of electons in materials (esp. metals).
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
Model formula:
.. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Drude1D
fig, ax = plt.subplots()
# generate the curves and plot them
x = np.arange(7.5 , 12.5 , 0.1)
dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0)
ax.plot(x, dmodel(x))
ax.set_xlabel('x')
ax.set_ylabel('F(x)')
plt.show()
"""
amplitude = Parameter(default=1.0, description="Peak Value")
x_0 = Parameter(default=1.0, description="Position of the peak")
fwhm = Parameter(default=1.0, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""
One dimensional Drude model function.
"""
return (
amplitude
* ((fwhm / x_0) ** 2)
/ ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""
Drude1D model function derivatives.
"""
d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
d_x_0 = (
-2
* amplitude
* d_amplitude
* (
(1 / x_0)
+ d_amplitude
* (x_0**2 / fwhm**2)
* (
(-x / x_0 - 1 / x) * (x / x_0 - x_0 / x)
- (2 * fwhm**2 / x_0**3)
)
)
)
d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
@x_0.validator
def x_0(self, val):
"""Ensure `x_0` is not 0."""
if np.any(val == 0):
raise InputParameterError("0 is not an allowed value for x_0")
def bounding_box(self, factor=50):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
class Plummer1D(Fittable1DModel):
r"""One dimensional Plummer density profile model.
Parameters
----------
mass : float
Total mass of cluster.
r_plum : float
Scale parameter which sets the size of the cluster core.
Notes
-----
Model formula:
.. math::
\rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2}
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P
"""
mass = Parameter(default=1.0, description="Total mass of cluster")
r_plum = Parameter(
default=1.0,
description="Scale parameter which sets the size of the cluster core",
)
@staticmethod
def evaluate(x, mass, r_plum):
"""
Evaluate plummer density profile model.
"""
return (
(3 * mass) / (4 * np.pi * r_plum**3) * (1 + (x / r_plum) ** 2) ** (-5 / 2)
)
@staticmethod
def fit_deriv(x, mass, r_plum):
"""
Plummer1D model derivatives.
"""
d_mass = 3 / ((4 * np.pi * r_plum**3) * (((x / r_plum) ** 2 + 1) ** (5 / 2)))
d_r_plum = (6 * mass * x**2 - 9 * mass * r_plum**2) / (
(4 * np.pi * r_plum**6) * (1 + (x / r_plum) ** 2) ** (7 / 2)
)
return [d_mass, d_r_plum]
@property
def input_units(self):
if self.mass.unit is None and self.r_plum.unit is None:
return None
else:
return {self.inputs[0]: self.r_plum.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mass": outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3,
"r_plum": inputs_unit[self.inputs[0]],
}
class NFW(Fittable1DModel):
r"""
Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter.
Parameters
----------
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
massfactor : tuple or str
Mass overdensity factor and type for provided profiles:
Tuple version:
("virial",) : virial radius
("critical", N) : radius where density is N times that of the critical density
("mean", N) : radius where density is N times that of the mean density
String version:
"virial" : virial radius
"Nc" : radius where density is N times that of the critical density (e.g. "200c")
"Nm" : radius where density is N times that of the mean density (e.g. "500m")
cosmo : :class:`~astropy.cosmology.Cosmology`
Background cosmology for density calculation. If None, the default cosmology will be used.
Notes
-----
Model formula:
.. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2}
References
----------
.. [1] https://arxiv.org/pdf/astro-ph/9508025
.. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile
.. [3] https://en.wikipedia.org/wiki/Virial_mass
"""
# Model Parameters
# NFW Profile mass
mass = Parameter(
default=1.0,
min=1.0,
unit=u.M_sun,
description="Peak mass within specified overdensity radius",
)
# NFW profile concentration
concentration = Parameter(default=1.0, min=1.0, description="Concentration")
# NFW Profile redshift
redshift = Parameter(default=0.0, min=0.0, description="Redshift")
# We allow values without units to be passed when evaluating the model, and
# in this case the input r values are assumed to be lengths / positions in kpc.
_input_units_allow_dimensionless = True
def __init__(
self,
mass=u.Quantity(mass.default, mass.unit),
concentration=concentration.default,
redshift=redshift.default,
massfactor=("critical", 200),
cosmo=None,
**kwargs,
):
# Set default cosmology
if cosmo is None:
# LOCAL
from astropy.cosmology import default_cosmology
cosmo = default_cosmology.get()
# Set mass overdensity type and factor
self._density_delta(massfactor, cosmo, redshift)
# Establish mass units for density calculation (default solar masses)
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Obtain scale radius
self._radius_s(mass, concentration)
# Obtain scale density
self._density_s(mass, concentration)
super().__init__(
mass=in_mass, concentration=concentration, redshift=redshift, **kwargs
)
def evaluate(self, r, mass, concentration, redshift):
"""
One dimensional NFW profile function.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of density to be calculated for the NFW profile.
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
Returns
-------
density : float or `~astropy.units.Quantity` ['density']
NFW profile mass density at location ``r``. The density units are:
[``mass`` / ``r`` ^3]
Notes
-----
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Create radial version of input with dimension
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Define reduced radius (r / r_{\\rm s})
# also update scale radius
radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit)
# Density distribution
# \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2}
# also update scale density
density = self._density_s(mass, concentration) / (
radius_reduced * (u.Quantity(1.0) + radius_reduced) ** 2
)
if hasattr(mass, "unit"):
return density
else:
return density.value
def _density_delta(self, massfactor, cosmo, redshift):
"""
Calculate density delta.
"""
# Set mass overdensity type and factor
if isinstance(massfactor, tuple):
# Tuple options
# ("virial") : virial radius
# ("critical", N) : radius where density is N that of the critical density
# ("mean", N) : radius where density is N that of the mean density
if massfactor[0].lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor[0].lower()
elif massfactor[0].lower() == "critical":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = "c"
elif massfactor[0].lower() == "mean":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = "m"
else:
raise ValueError(
f"Massfactor '{massfactor[0]}' not one of 'critical', "
"'mean', or 'virial'"
)
else:
try:
# String options
# virial : virial radius
# Nc : radius where density is N that of the critical density
# Nm : radius where density is N that of the mean density
if massfactor.lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor.lower()
elif massfactor[-1].lower() == "c" or massfactor[-1].lower() == "m":
# Critical or Mean Overdensity Mass
delta = float(massfactor[0:-1])
masstype = massfactor[-1].lower()
else:
raise ValueError(
f"Massfactor {massfactor} string not of the form "
"'#m', '#c', or 'virial'"
)
except (AttributeError, TypeError):
raise TypeError(f"Massfactor {massfactor} not a tuple or string")
# Set density from masstype specification
if masstype == "virial":
Om_c = cosmo.Om(redshift) - 1.0
d_c = 18.0 * np.pi**2 + 82.0 * Om_c - 39.0 * Om_c**2
self.density_delta = d_c * cosmo.critical_density(redshift)
elif masstype == "c":
self.density_delta = delta * cosmo.critical_density(redshift)
elif masstype == "m":
self.density_delta = (
delta * cosmo.critical_density(redshift) * cosmo.Om(redshift)
)
return self.density_delta
@staticmethod
def A_NFW(y):
r"""
Dimensionless volume integral of the NFW profile, used as an intermediate step in some
calculations for this model.
Notes
-----
Model formula:
.. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}]
"""
return np.log(1.0 + y) - (y / (1.0 + y))
def _density_s(self, mass, concentration):
"""
Calculate scale density of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Calculate scale density
# M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right].
self.density_s = in_mass / (
4.0
* np.pi
* self._radius_s(in_mass, concentration) ** 3
* self.A_NFW(concentration)
)
return self.density_s
@property
def rho_scale(self):
r"""
Scale density of the NFW profile. Often written in the literature as :math:`\rho_s`.
"""
return self.density_s
def _radius_s(self, mass, concentration):
"""
Calculate scale radius of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Delta Mass is related to delta radius by
# M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c}
# And delta radius is related to the NFW scale radius by
# c = R / r_{\\rm s}
self.radius_s = (
((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** (1.0 / 3.0)
) / concentration
# Set radial units to kiloparsec by default (unit will be rescaled by units of radius
# in evaluate)
return self.radius_s.to(u.kpc)
@property
def r_s(self):
"""
Scale radius of the NFW profile.
"""
return self.radius_s
@property
def r_virial(self):
"""
Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.).
"""
return self.r_s * self.concentration
@property
def r_max(self):
"""
Radius of maximum circular velocity.
"""
return self.r_s * 2.16258
@property
def v_max(self):
"""
Maximum circular velocity.
"""
return self.circular_velocity(self.r_max)
def circular_velocity(self, r):
r"""
Circular velocities of the NFW profile.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of velocity to be calculated for the NFW profile.
Returns
-------
velocity : float or `~astropy.units.Quantity` ['speed']
NFW profile circular velocity at location ``r``. The velocity units are:
[km / s]
Notes
-----
Model formula:
.. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
.. math:: x = r/r_s
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Enforce default units (if parameters are without units)
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir)
v_profile = np.sqrt(
self.mass
* const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2))
/ self.r_virial
)
# Define reduced radius (r / r_{\\rm s})
reduced_radius = in_r / self.r_virial.to(in_r.unit)
# Circular velocity given by:
# v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
# where x=r/r_{200}
velocity = np.sqrt(
(v_profile**2 * self.A_NFW(self.concentration * reduced_radius))
/ (reduced_radius * self.A_NFW(self.concentration))
)
return velocity.to(u.km / u.s)
@property
def input_units(self):
# The units for the 'r' variable should be a length (default kpc)
return {self.inputs[0]: u.kpc}
@property
def return_units(self):
# The units for the 'density' variable should be a matter density (default M_sun / kpc^3)
if self.mass.unit is None:
return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3}
else:
return {
self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"mass": u.M_sun, "concentration": None, "redshift": None}
|
0e07f74b02cefb0ead659d48d4e6c449abec33b0b138ee4fc96d7deea10f5863 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Statistic functions used in `~astropy.modeling.fitting`.
"""
# pylint: disable=invalid-name
import numpy as np
__all__ = ["leastsquare", "leastsquare_1d", "leastsquare_2d", "leastsquare_3d"]
def leastsquare(measured_vals, updated_model, weights, *x):
"""Least square statistic, with optional weights, in N-dimensions.
Parameters
----------
measured_vals : ndarray or sequence
Measured data values. Will be cast to array whose
shape must match the array-cast of the evaluated model.
updated_model : :class:`~astropy.modeling.Model` instance
Model with parameters set by the current iteration of the optimizer.
when evaluated on "x", must return array of shape "measured_vals"
weights : ndarray or None
Array of weights to apply to each residual.
*x : ndarray
Independent variables on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare_1d`
:func:`~astropy.modeling.statistic.leastsquare_2d`
:func:`~astropy.modeling.statistic.leastsquare_3d`
Notes
-----
Models in :mod:`~astropy.modeling` have broadcasting rules that try to
match inputs with outputs with Model shapes. Numpy arrays have flexible
broadcasting rules, so mismatched shapes can often be made compatible. To
ensure data matches the model we must perform shape comparison and leverage
the Numpy arithmetic functions. This can obfuscate arithmetic computation
overrides, like with Quantities. Implement a custom statistic for more
direct control.
"""
model_vals = updated_model(*x)
if np.shape(model_vals) != np.shape(measured_vals):
raise ValueError(
f"Shape mismatch between model ({np.shape(model_vals)}) "
f"and measured ({np.shape(measured_vals)})"
)
if weights is None:
weights = 1.0
return np.sum(np.square(weights * np.subtract(model_vals, measured_vals)))
# -------------------------------------------------------------------
def leastsquare_1d(measured_vals, updated_model, weights, x):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 1D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2)
def leastsquare_2d(measured_vals, updated_model, weights, x, y):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 2D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
y : ndarray
Independent variable "y" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x, y)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2)
def leastsquare_3d(measured_vals, updated_model, weights, x, y, z):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 3D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
y : ndarray
Independent variable "y" on which to evaluate the model.
z : ndarray
Independent variable "z" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x, y, z)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2)
|
deb1ec9be6ddd15e8e3740b83fdf250cc8ec1d70bad5d06c140dbe764dd6487a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Power law model variants.
"""
# pylint: disable=invalid-name
import numpy as np
from astropy.units import Magnitude, Quantity, UnitsError, dimensionless_unscaled, mag
from .core import Fittable1DModel
from .parameters import InputParameterError, Parameter
__all__ = [
"PowerLaw1D",
"BrokenPowerLaw1D",
"SmoothlyBrokenPowerLaw1D",
"ExponentialCutoffPowerLaw1D",
"LogParabola1D",
"Schechter1D",
]
class PowerLaw1D(Fittable1DModel):
"""
One dimensional power law model.
Parameters
----------
amplitude : float
Model amplitude at the reference point
x_0 : float
Reference point
alpha : float
Power law index
See Also
--------
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha}
"""
amplitude = Parameter(default=1, description="Peak value at the reference point")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
@staticmethod
def evaluate(x, amplitude, x_0, alpha):
"""One dimensional power law model function."""
xx = x / x_0
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
"""One dimensional power law derivative with respect to parameters."""
xx = x / x_0
d_amplitude = xx ** (-alpha)
d_x_0 = amplitude * alpha * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
return [d_amplitude, d_x_0, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class BrokenPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with a break.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for x < x_break.
alpha_2 : float
Power law index for x > x_break.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1`
for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``):
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\
A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Peak value at break point")
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=1, description="Power law index before break point")
alpha_2 = Parameter(default=1, description="Power law index after break point")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law model function."""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law derivative with respect to parameters."""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
d_amplitude = xx ** (-alpha)
d_x_break = amplitude * alpha * d_amplitude / x_break
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_alpha_1 = np.where(x < x_break, d_alpha, 0)
d_alpha_2 = np.where(x >= x_break, d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
@property
def input_units(self):
if self.x_break.unit is None:
return None
return {self.inputs[0]: self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_break": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class SmoothlyBrokenPowerLaw1D(Fittable1DModel):
"""One dimensional smoothly broken power law model.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for ``x << x_break``.
alpha_2 : float
Power law index for ``x >> x_break``.
delta : float
Smoothness parameter.
See Also
--------
BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for
``x_break``, :math:`\\alpha_1` for ``alpha_1``,
:math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for
``delta``):
.. math::
f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1}
\\left\\{
\\frac{1}{2}
\\left[
1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta}
\\right]
\\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta}
The change of slope occurs between the values :math:`x_1`
and :math:`x_2` such that:
.. math::
\\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1}
\\sim \\Delta
At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the
model is approximately a simple power law with index
:math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two
power laws are smoothly joined at values :math:`x_1 < x < x_2`,
hence the :math:`\\Delta` parameter sets the "smoothness" of the
slope change.
The ``delta`` parameter is bounded to values greater than 1e-3
(corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid
overflow errors.
The ``amplitude`` parameter is bounded to positive values since
this model is typically used to represent positive quantities.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
x = np.logspace(0.7, 2.3, 500)
f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20,
alpha_1=-2, alpha_2=2)
plt.figure()
plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2")
f.delta = 0.5
plt.loglog(x, f(x), '--', label='delta=0.5')
f.delta = 0.3
plt.loglog(x, f(x), '-.', label='delta=0.3')
f.delta = 0.1
plt.loglog(x, f(x), label='delta=0.1')
plt.axis([x.min(), x.max(), 0.1, 1.1])
plt.legend(loc='lower center')
plt.grid(True)
plt.show()
"""
amplitude = Parameter(
default=1, min=0, description="Peak value at break point", mag=True
)
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=-2, description="Power law index before break point")
alpha_2 = Parameter(default=2, description="Power law index after break point")
delta = Parameter(default=1, min=1.0e-3, description="Smoothness Parameter")
@amplitude.validator
def amplitude(self, value):
if np.any(value <= 0):
raise InputParameterError("amplitude parameter must be > 0")
@delta.validator
def delta(self, value):
if np.any(value < 0.001):
raise InputParameterError("delta parameter must be >= 0.001")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law model function."""
# Pre-calculate `x/x_b`
xx = x / x_break
# Initialize the return value
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
# The quantity `t = (x / x_b)^(1 / delta)` can become quite
# large. To avoid overflow errors we will start by calculating
# its natural logarithm:
logt = np.log(xx) / delta
# When `t >> 1` or `t << 1` we don't actually need to compute
# the `t` value since the main formula (see docstring) can be
# significantly simplified by neglecting `1` or `t`
# respectively. In the following we will check whether `t` is
# much greater, much smaller, or comparable to 1 by comparing
# the `logt` value with an appropriate threshold.
threshold = 30 # corresponding to exp(30) ~ 1e13
i = logt > threshold
if i.max():
# In this case the main formula reduces to a simple power
# law with index `alpha_2`.
f[i] = (
amplitude * xx[i] ** (-alpha_2) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
i = logt < -threshold
if i.max():
# In this case the main formula reduces to a simple power
# law with index `alpha_1`.
f[i] = (
amplitude * xx[i] ** (-alpha_1) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
i = np.abs(logt) <= threshold
if i.max():
# In this case the `t` value is "comparable" to 1, hence we
# we will evaluate the whole formula.
t = np.exp(logt[i])
r = (1.0 + t) / 2.0
f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta)
if return_unit:
return Quantity(f, unit=return_unit, copy=False, subok=True)
return f
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law derivative with respect
to parameters.
"""
# Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in
# SmoothlyBrokenPowerLaw1D.evaluate)
xx = x / x_break
logt = np.log(xx) / delta
# Initialize the return values
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate)
i = logt > threshold
if i.max():
f[i] = (
amplitude * xx[i] ** (-alpha_2) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_2 / x_break
d_alpha_1[i] = f[i] * (-delta * np.log(2))
d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2))
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = logt < -threshold
if i.max():
f[i] = (
amplitude * xx[i] ** (-alpha_1) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_1 / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2))
d_alpha_2[i] = f[i] * delta * np.log(2)
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = np.abs(logt) <= threshold
if i.max():
t = np.exp(logt[i])
r = (1.0 + t) / 2.0
f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = (
f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2.0 / r) / x_break
)
d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r))
d_alpha_2[i] = f[i] * (-delta * np.log(r))
d_delta[i] = (
f[i]
* (alpha_1 - alpha_2)
* (np.log(r) - t / (1.0 + t) / delta * np.log(xx[i]))
)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta]
@property
def input_units(self):
if self.x_break.unit is None:
return None
return {self.inputs[0]: self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_break": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class ExponentialCutoffPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with an exponential cutoff.
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
x_cutoff : float
Cutoff point
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff})
"""
amplitude = Parameter(default=1, description="Peak value of model")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
x_cutoff = Parameter(default=1, description="Cutoff point")
@staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law model function."""
xx = x / x_0
return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
"""
One dimensional exponential cutoff power law derivative with respect to parameters.
"""
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff**2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"x_cutoff": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class LogParabola1D(Fittable1DModel):
"""
One dimensional log parabola model (sometimes called curved power law).
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
beta : float
Power law curvature
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and
:math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``):
.. math:: f(x) = A \\left(
\\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}}
\\right )}}
"""
amplitude = Parameter(default=1, description="Peak value of model")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
beta = Parameter(default=0, description="Power law curvature")
@staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola model function."""
xx = x / x_0
exponent = -alpha - beta * np.log(xx)
return amplitude * xx**exponent
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola derivative with respect to parameters."""
xx = x / x_0
log_xx = np.log(xx)
exponent = -alpha - beta * log_xx
d_amplitude = xx**exponent
d_beta = -amplitude * d_amplitude * log_xx**2
d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)
d_alpha = -amplitude * d_amplitude * log_xx
return [d_amplitude, d_x_0, d_alpha, d_beta]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Schechter1D(Fittable1DModel):
r"""
Schechter luminosity function (`Schechter 1976
<https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract>`_),
parameterized in terms of magnitudes.
Parameters
----------
phi_star : float
The normalization factor in units of number density.
m_star : float
The characteristic magnitude where the power-law form of the
function cuts off.
alpha : float
The power law index, also known as the faint-end slope. Must not
have units.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`\phi^{*}` for ``phi_star``, :math:`M^{*}`
for ``m_star``, and :math:`\alpha` for ``alpha``):
.. math::
n(M) \ dM = (0.4 \ln 10) \ \phi^{*} \
[{10^{0.4 (M^{*} - M)}}]^{\alpha + 1} \
\exp{[-10^{0.4 (M^{*} - M)}]} \ dM
``phi_star`` is the normalization factor in units of number density.
``m_star`` is the characteristic magnitude where the power-law form
of the function cuts off into the exponential form. ``alpha`` is
the power-law index, defining the faint-end slope of the luminosity
function.
Examples
--------
.. plot::
:include-source:
from astropy.modeling.models import Schechter1D
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
phi_star = 4.3e-4 * (u.Mpc ** -3)
m_star = -20.26
alpha = -1.98
model = Schechter1D(phi_star, m_star, alpha)
mag = np.linspace(-25, -17)
fig, ax = plt.subplots()
ax.plot(mag, model(mag))
ax.set_yscale('log')
ax.set_xlim(-22.6, -17)
ax.set_ylim(1.e-7, 1.e-2)
ax.set_xlabel('$M_{UV}$')
ax.set_ylabel('$\phi$ [mag$^{-1}$ Mpc$^{-3}]$')
References
----------
.. [1] Schechter 1976; ApJ 203, 297
(https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract)
.. [2] `Luminosity function <https://en.wikipedia.org/wiki/Luminosity_function_(astronomy)>`_
"""
phi_star = Parameter(
default=1.0, description="Normalization factor in units of number density"
)
m_star = Parameter(default=-20.0, description="Characteristic magnitude", mag=True)
alpha = Parameter(default=-1.0, description="Faint-end slope")
@staticmethod
def _factor(magnitude, m_star):
factor_exp = magnitude - m_star
if isinstance(factor_exp, Quantity):
if factor_exp.unit == mag:
factor_exp = Magnitude(factor_exp.value, unit=mag)
return factor_exp.to(dimensionless_unscaled)
else:
raise UnitsError(
"The units of magnitude and m_star must be a magnitude"
)
else:
return 10 ** (-0.4 * factor_exp)
def evaluate(self, mag, phi_star, m_star, alpha):
"""Schechter luminosity function model function."""
factor = self._factor(mag, m_star)
return 0.4 * np.log(10) * phi_star * factor ** (alpha + 1) * np.exp(-factor)
def fit_deriv(self, mag, phi_star, m_star, alpha):
"""
Schechter luminosity function derivative with respect to
parameters.
"""
factor = self._factor(mag, m_star)
d_phi_star = 0.4 * np.log(10) * factor ** (alpha + 1) * np.exp(-factor)
func = phi_star * d_phi_star
d_m_star = (alpha + 1) * 0.4 * np.log(10) * func - (
0.4 * np.log(10) * func * factor
)
d_alpha = func * np.log(factor)
return [d_phi_star, d_m_star, d_alpha]
@property
def input_units(self):
if self.m_star.unit is None:
return None
return {self.inputs[0]: self.m_star.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"m_star": inputs_unit[self.inputs[0]],
"phi_star": outputs_unit[self.outputs[0]],
}
|
c67177e202b2bbfcec6c8f0fadfe0f7812e9bfba8c50f2246a3986ee75125481 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import queue
import select
import socket
import threading
import time
import uuid
import warnings
import xmlrpc.client as xmlrpc
from urllib.parse import urlunparse
from astropy import log
from .constants import SAMP_STATUS_OK, __profile_version__
from .errors import SAMPHubError, SAMPProxyError, SAMPWarning
from .lockfile_helpers import create_lock_file, read_lockfile
from .standard_profile import ThreadingXMLRPCServer
from .utils import ServerProxyPool, _HubAsClient, internet_on
from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog
__all__ = ["SAMPHubServer", "WebProfileDialog"]
__doctest_skip__ = [".", "SAMPHubServer.*"]
class SAMPHubServer:
"""
SAMP Hub Server.
Parameters
----------
secret : str, optional
The secret code to use for the SAMP lockfile. If none is is specified,
the :func:`uuid.uuid1` function is used to generate one.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
lockfile : str, optional
Custom lockfile name.
timeout : int, optional
Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically
stops after an inactivity period longer than ``timeout`` seconds. By
default ``timeout`` is set to 0 (Hub never expires).
client_timeout : int, optional
Client inactivity timeout. If ``client_timeout > 0`` then the Hub
automatically unregisters the clients which result inactive for a
period longer than ``client_timeout`` seconds. By default
``client_timeout`` is set to 0 (clients never expire).
mode : str, optional
Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub
runs using the standard ``.samp`` lock-file, having a single instance
for user desktop session. Otherwise, if ``mode`` is ``'multiple'``,
then the Hub runs using a non-standard lock-file, placed in
``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where
``<UUID>`` is a unique UUID assigned to the hub.
label : str, optional
A string used to label the Hub with a human readable name. This string
is written in the lock-file assigned to the ``hub.label`` token.
web_profile : bool, optional
Enables or disables the Web Profile support.
web_profile_dialog : class, optional
Allows a class instance to be specified using ``web_profile_dialog``
to replace the terminal-based message with e.g. a GUI pop-up. Two
`queue.Queue` instances will be added to the instance as attributes
``queue_request`` and ``queue_result``. When a request is received via
the ``queue_request`` queue, the pop-up should be displayed, and a
value of `True` or `False` should be added to ``queue_result``
depending on whether the user accepted or refused the connection.
web_port : int, optional
The port to use for web SAMP. This should not be changed except for
testing purposes, since web SAMP should always use port 21012.
pool_size : int, optional
The number of socket connections opened to communicate with the
clients.
"""
def __init__(
self,
secret=None,
addr=None,
port=0,
lockfile=None,
timeout=0,
client_timeout=0,
mode="single",
label="",
web_profile=True,
web_profile_dialog=None,
web_port=21012,
pool_size=20,
):
# Generate random ID for the hub
self._id = str(uuid.uuid1())
# General settings
self._is_running = False
self._customlockfilename = lockfile
self._lockfile = None
self._addr = addr
self._port = port
self._mode = mode
self._label = label
self._timeout = timeout
self._client_timeout = client_timeout
self._pool_size = pool_size
# Web profile specific attributes
self._web_profile = web_profile
self._web_profile_dialog = web_profile_dialog
self._web_port = web_port
self._web_profile_server = None
self._web_profile_callbacks = {}
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name, self._port or 0)
except OSError:
self._host_name = "127.0.0.1"
# Threading stuff
self._thread_lock = threading.Lock()
self._thread_run = None
self._thread_hub_timeout = None
self._thread_client_timeout = None
self._launched_threads = []
# Variables for timeout testing:
self._last_activity_time = None
self._client_activity_time = {}
# Hub message id counter, used to create hub msg ids
self._hub_msg_id_counter = 0
# Hub secret code
self._hub_secret_code_customized = secret
self._hub_secret = self._create_secret_code()
# Hub public id (as SAMP client)
self._hub_public_id = ""
# Client ids
# {private_key: (public_id, timestamp)}
self._private_keys = {}
# Metadata per client
# {private_key: metadata}
self._metadata = {}
# List of subscribed clients per MType
# {mtype: private_key list}
self._mtype2ids = {}
# List of subscribed MTypes per client
# {private_key: mtype list}
self._id2mtypes = {}
# List of XML-RPC addresses per client
# {public_id: (XML-RPC address, ServerProxyPool instance)}
self._xmlrpc_endpoints = {}
# Synchronous message id heap
self._sync_msg_ids_heap = {}
# Public ids counter
self._client_id_counter = -1
@property
def id(self):
"""
The unique hub ID.
"""
return self._id
def _register_standard_api(self, server):
# Standard Profile only operations
server.register_function(self._ping, "samp.hub.ping")
server.register_function(
self._set_xmlrpc_callback, "samp.hub.setXmlrpcCallback"
)
# Standard API operations
server.register_function(self._register, "samp.hub.register")
server.register_function(self._unregister, "samp.hub.unregister")
server.register_function(self._declare_metadata, "samp.hub.declareMetadata")
server.register_function(self._get_metadata, "samp.hub.getMetadata")
server.register_function(
self._declare_subscriptions, "samp.hub.declareSubscriptions"
)
server.register_function(self._get_subscriptions, "samp.hub.getSubscriptions")
server.register_function(
self._get_registered_clients, "samp.hub.getRegisteredClients"
)
server.register_function(
self._get_subscribed_clients, "samp.hub.getSubscribedClients"
)
server.register_function(self._notify, "samp.hub.notify")
server.register_function(self._notify_all, "samp.hub.notifyAll")
server.register_function(self._call, "samp.hub.call")
server.register_function(self._call_all, "samp.hub.callAll")
server.register_function(self._call_and_wait, "samp.hub.callAndWait")
server.register_function(self._reply, "samp.hub.reply")
def _register_web_profile_api(self, server):
# Web Profile methods like Standard Profile
server.register_function(self._ping, "samp.webhub.ping")
server.register_function(self._unregister, "samp.webhub.unregister")
server.register_function(self._declare_metadata, "samp.webhub.declareMetadata")
server.register_function(self._get_metadata, "samp.webhub.getMetadata")
server.register_function(
self._declare_subscriptions, "samp.webhub.declareSubscriptions"
)
server.register_function(
self._get_subscriptions, "samp.webhub.getSubscriptions"
)
server.register_function(
self._get_registered_clients, "samp.webhub.getRegisteredClients"
)
server.register_function(
self._get_subscribed_clients, "samp.webhub.getSubscribedClients"
)
server.register_function(self._notify, "samp.webhub.notify")
server.register_function(self._notify_all, "samp.webhub.notifyAll")
server.register_function(self._call, "samp.webhub.call")
server.register_function(self._call_all, "samp.webhub.callAll")
server.register_function(self._call_and_wait, "samp.webhub.callAndWait")
server.register_function(self._reply, "samp.webhub.reply")
# Methods particularly for Web Profile
server.register_function(self._web_profile_register, "samp.webhub.register")
server.register_function(
self._web_profile_allowReverseCallbacks, "samp.webhub.allowReverseCallbacks"
)
server.register_function(
self._web_profile_pullCallbacks, "samp.webhub.pullCallbacks"
)
def _start_standard_server(self):
self._server = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port or 0),
log,
logRequests=False,
allow_none=True,
)
prot = "http"
self._port = self._server.socket.getsockname()[1]
addr = f"{self._addr or self._host_name}:{self._port}"
self._url = urlunparse((prot, addr, "", "", "", ""))
self._server.register_introspection_functions()
self._register_standard_api(self._server)
def _start_web_profile_server(self):
self._web_profile_requests_queue = queue.Queue(1)
self._web_profile_requests_result = queue.Queue(1)
self._web_profile_requests_semaphore = queue.Queue(1)
if self._web_profile_dialog is not None:
# TODO: Some sort of duck-typing on the web_profile_dialog object
self._web_profile_dialog.queue_request = self._web_profile_requests_queue
self._web_profile_dialog.queue_result = self._web_profile_requests_result
try:
self._web_profile_server = WebProfileXMLRPCServer(
("localhost", self._web_port), log, logRequests=False, allow_none=True
)
self._web_port = self._web_profile_server.socket.getsockname()[1]
self._web_profile_server.register_introspection_functions()
self._register_web_profile_api(self._web_profile_server)
log.info("Hub set to run with Web Profile support enabled.")
except OSError:
log.warning(
"Port {} already in use. Impossible to run the "
"Hub with Web Profile support.".format(self._web_port),
SAMPWarning,
)
self._web_profile = False
# Cleanup
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
def _launch_thread(self, group=None, target=None, name=None, args=None):
# Remove inactive threads
remove = []
for t in self._launched_threads:
if not t.is_alive():
remove.append(t)
for t in remove:
self._launched_threads.remove(t)
# Start new thread
t = threading.Thread(group=group, target=target, name=name, args=args)
t.start()
# Add to list of launched threads
self._launched_threads.append(t)
def _join_launched_threads(self, timeout=None):
for t in self._launched_threads:
t.join(timeout=timeout)
def _timeout_test_hub(self):
if self._timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.0:
with self._thread_lock:
if self._last_activity_time is not None:
if now - self._last_activity_time >= self._timeout:
warnings.warn(
"Timeout expired, Hub is shutting down!", SAMPWarning
)
self.stop()
return
last = now
def _timeout_test_client(self):
if self._client_timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.0:
for private_key in self._client_activity_time.keys():
if (
now - self._client_activity_time[private_key]
> self._client_timeout
and private_key != self._hub_private_key
):
warnings.warn(
f"Client {private_key} timeout expired!", SAMPWarning
)
self._notify_disconnection(private_key)
self._unregister(private_key)
last = now
def _hub_as_client_request_handler(self, method, args):
if method == "samp.client.receiveCall":
return self._receive_call(*args)
elif method == "samp.client.receiveNotification":
return self._receive_notification(*args)
elif method == "samp.client.receiveResponse":
return self._receive_response(*args)
elif method == "samp.app.ping":
return self._ping(*args)
def _setup_hub_as_client(self):
hub_metadata = {
"samp.name": "Astropy SAMP Hub",
"samp.description.text": self._label,
"author.name": "The Astropy Collaboration",
"samp.documentation.url": "https://docs.astropy.org/en/stable/samp",
"samp.icon.url": self._url + "/samp/icon",
}
result = self._register(self._hub_secret)
self._hub_public_id = result["samp.self-id"]
self._hub_private_key = result["samp.private-key"]
self._set_xmlrpc_callback(self._hub_private_key, self._url)
self._declare_metadata(self._hub_private_key, hub_metadata)
self._declare_subscriptions(
self._hub_private_key, {"samp.app.ping": {}, "x-samp.query.by-meta": {}}
)
def start(self, wait=False):
"""
Start the current SAMP Hub instance and create the lock file. Hub
start-up can be blocking or non blocking depending on the ``wait``
parameter.
Parameters
----------
wait : bool
If `True` then the Hub process is joined with the caller, blocking
the code flow. Usually `True` option is used to run a stand-alone
Hub in an executable script. If `False` (default), then the Hub
process runs in a separated thread. `False` is usually used in a
Python shell.
"""
if self._is_running:
raise SAMPHubError("Hub is already running")
if self._lockfile is not None:
raise SAMPHubError("Hub is not running but lockfile is set")
if self._web_profile:
self._start_web_profile_server()
self._start_standard_server()
self._lockfile = create_lock_file(
lockfilename=self._customlockfilename,
mode=self._mode,
hub_id=self.id,
hub_params=self.params,
)
self._update_last_activity_time()
self._setup_hub_as_client()
self._start_threads()
log.info("Hub started")
if wait and self._is_running:
self._thread_run.join()
self._thread_run = None
@property
def params(self):
"""
The hub parameters (which are written to the logfile).
"""
params = {}
# Keys required by standard profile
params["samp.secret"] = self._hub_secret
params["samp.hub.xmlrpc.url"] = self._url
params["samp.profile.version"] = __profile_version__
# Custom keys
params["hub.id"] = self.id
params["hub.label"] = self._label or f"Hub {self.id}"
return params
def _start_threads(self):
self._thread_run = threading.Thread(target=self._serve_forever)
self._thread_run.daemon = True
if self._timeout > 0:
self._thread_hub_timeout = threading.Thread(
target=self._timeout_test_hub, name="Hub timeout test"
)
self._thread_hub_timeout.daemon = True
else:
self._thread_hub_timeout = None
if self._client_timeout > 0:
self._thread_client_timeout = threading.Thread(
target=self._timeout_test_client, name="Client timeout test"
)
self._thread_client_timeout.daemon = True
else:
self._thread_client_timeout = None
self._is_running = True
self._thread_run.start()
if self._thread_hub_timeout is not None:
self._thread_hub_timeout.start()
if self._thread_client_timeout is not None:
self._thread_client_timeout.start()
def _create_secret_code(self):
if self._hub_secret_code_customized is not None:
return self._hub_secret_code_customized
else:
return str(uuid.uuid1())
def stop(self):
"""
Stop the current SAMP Hub instance and delete the lock file.
"""
if not self._is_running:
return
log.info("Hub is stopping...")
self._notify_shutdown()
self._is_running = False
if self._lockfile and os.path.isfile(self._lockfile):
lockfiledict = read_lockfile(self._lockfile)
if lockfiledict["samp.secret"] == self._hub_secret:
os.remove(self._lockfile)
self._lockfile = None
# Reset variables
# TODO: What happens if not all threads are stopped after timeout?
self._join_all_threads(timeout=10.0)
self._hub_msg_id_counter = 0
self._hub_secret = self._create_secret_code()
self._hub_public_id = ""
self._metadata = {}
self._private_keys = {}
self._mtype2ids = {}
self._id2mtypes = {}
self._xmlrpc_endpoints = {}
self._last_activity_time = None
log.info("Hub stopped.")
def _join_all_threads(self, timeout=None):
# In some cases, ``stop`` may be called from some of the sub-threads,
# so we just need to make sure that we don't try and shut down the
# calling thread.
current_thread = threading.current_thread()
if self._thread_run is not current_thread:
self._thread_run.join(timeout=timeout)
if not self._thread_run.is_alive():
self._thread_run = None
if (
self._thread_hub_timeout is not None
and self._thread_hub_timeout is not current_thread
):
self._thread_hub_timeout.join(timeout=timeout)
if not self._thread_hub_timeout.is_alive():
self._thread_hub_timeout = None
if (
self._thread_client_timeout is not None
and self._thread_client_timeout is not current_thread
):
self._thread_client_timeout.join(timeout=timeout)
if not self._thread_client_timeout.is_alive():
self._thread_client_timeout = None
self._join_launched_threads(timeout=timeout)
@property
def is_running(self):
"""Return an information concerning the Hub running status.
Returns
-------
running : bool
Is the hub running?
"""
return self._is_running
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self._server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn(
f"Call to select() in SAMPHubServer failed: {exc}", SAMPWarning
)
else:
if read_ready:
self._server.handle_request()
if self._web_profile:
# We now check if there are any connection requests from the
# web profile, and if so, we initialize the pop-up.
if self._web_profile_dialog is None:
try:
request = self._web_profile_requests_queue.get_nowait()
except queue.Empty:
pass
else:
web_profile_text_dialog(
request, self._web_profile_requests_result
)
# We now check for requests over the web profile socket, and we
# also update the pop-up in case there are any changes.
try:
read_ready = select.select(
[self._web_profile_server.socket], [], [], 0.01
)[0]
except OSError as exc:
warnings.warn(
f"Call to select() in SAMPHubServer failed: {exc}", SAMPWarning
)
else:
if read_ready:
self._web_profile_server.handle_request()
self._server.server_close()
if self._web_profile_server is not None:
self._web_profile_server.server_close()
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
self._notify_(
self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.shutdown", "samp.params": {}},
)
def _notify_register(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.register",
"samp.params": {"id": public_id},
},
)
def _notify_unregister(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.unregister",
"samp.params": {"id": public_id},
},
)
def _notify_metadata(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.metadata",
"samp.params": {
"id": public_id,
"metadata": self._metadata[private_key],
},
},
)
def _notify_subscriptions(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.subscriptions",
"samp.params": {
"id": public_id,
"subscriptions": self._id2mtypes[private_key],
},
},
)
def _notify_disconnection(self, private_key):
def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message):
endpoint.samp.client.receiveNotification(
private_key, hub_public_id, message
)
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect")
public_id = self._private_keys[private_key][0]
endpoint = self._xmlrpc_endpoints[public_id][1]
for mtype in msubs:
if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]:
log.debug(f"notify disconnection to {public_id}")
self._launch_thread(
target=_xmlrpc_call_disconnect,
args=(
endpoint,
private_key,
self._hub_public_id,
{
"samp.mtype": "samp.hub.disconnect",
"samp.params": {"reason": "Timeout expired!"},
},
),
)
def _ping(self):
self._update_last_activity_time()
log.debug("ping")
return "1"
def _query_by_metadata(self, key, value):
public_id_list = []
for private_id in self._metadata:
if key in self._metadata[private_id]:
if self._metadata[private_id][key] == value:
public_id_list.append(self._private_keys[private_id][0])
return public_id_list
def _set_xmlrpc_callback(self, private_key, xmlrpc_addr):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if private_key == self._hub_private_key:
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (
xmlrpc_addr,
_HubAsClient(self._hub_as_client_request_handler),
)
return ""
# Dictionary stored with the public id
log.debug(f"set_xmlrpc_callback: {private_key} {xmlrpc_addr}")
server_proxy_pool = None
server_proxy_pool = ServerProxyPool(
self._pool_size, xmlrpc.ServerProxy, xmlrpc_addr, allow_none=1
)
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (xmlrpc_addr, server_proxy_pool)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _perform_standard_register(self):
with self._thread_lock:
private_key, public_id = self._get_new_ids()
self._private_keys[private_key] = (public_id, time.time())
self._update_last_activity_time(private_key)
self._notify_register(private_key)
log.debug(f"register: private-key = {private_key} and self-id = {public_id}")
return {
"samp.self-id": public_id,
"samp.private-key": private_key,
"samp.hub-id": self._hub_public_id,
}
def _register(self, secret):
self._update_last_activity_time()
if secret == self._hub_secret:
return self._perform_standard_register()
else:
# return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""}
raise SAMPProxyError(7, "Bad secret code")
def _get_new_ids(self):
private_key = str(uuid.uuid1())
self._client_id_counter += 1
public_id = "cli#hub"
if self._client_id_counter > 0:
public_id = f"cli#{self._client_id_counter}"
return private_key, public_id
def _unregister(self, private_key):
self._update_last_activity_time()
public_key = ""
self._notify_unregister(private_key)
with self._thread_lock:
if private_key in self._private_keys:
public_key = self._private_keys[private_key][0]
del self._private_keys[private_key]
else:
return ""
if private_key in self._metadata:
del self._metadata[private_key]
if private_key in self._id2mtypes:
del self._id2mtypes[private_key]
for mtype in self._mtype2ids.keys():
if private_key in self._mtype2ids[mtype]:
self._mtype2ids[mtype].remove(private_key)
if public_key in self._xmlrpc_endpoints:
del self._xmlrpc_endpoints[public_key]
if private_key in self._client_activity_time:
del self._client_activity_time[private_key]
if self._web_profile:
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
self._web_profile_server.remove_client(private_key)
log.debug(f"unregister {public_key} ({private_key})")
return ""
def _declare_metadata(self, private_key, metadata):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug(
"declare_metadata: private-key = {} metadata = {}".format(
private_key, str(metadata)
)
)
self._metadata[private_key] = metadata
self._notify_metadata(private_key)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _get_metadata(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
log.debug(
f"get_metadata: private-key = {private_key} client-id = {client_id}"
)
if client_private_key is not None:
if client_private_key in self._metadata:
log.debug(f"--> metadata = {self._metadata[client_private_key]}")
return self._metadata[client_private_key]
else:
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _declare_subscriptions(self, private_key, mtypes):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug(
"declare_subscriptions: private-key = {} mtypes = {}".format(
private_key, str(mtypes)
)
)
# remove subscription to previous mtypes
if private_key in self._id2mtypes:
prev_mtypes = self._id2mtypes[private_key]
for mtype in prev_mtypes:
try:
self._mtype2ids[mtype].remove(private_key)
except ValueError: # private_key is not in list
pass
self._id2mtypes[private_key] = copy.deepcopy(mtypes)
# remove duplicated MType for wildcard overwriting
original_mtypes = copy.deepcopy(mtypes)
for mtype in original_mtypes:
if mtype.endswith("*"):
for mtype2 in original_mtypes:
if mtype2.startswith(mtype[:-1]) and mtype2 != mtype:
if mtype2 in mtypes:
del mtypes[mtype2]
log.debug(
"declare_subscriptions: subscriptions accepted from {} => {}".format(
private_key, str(mtypes)
)
)
for mtype in mtypes:
if mtype in self._mtype2ids:
if private_key not in self._mtype2ids[mtype]:
self._mtype2ids[mtype].append(private_key)
else:
self._mtype2ids[mtype] = [private_key]
self._notify_subscriptions(private_key)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _get_subscriptions(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
if client_private_key is not None:
if client_private_key in self._id2mtypes:
log.debug(
"get_subscriptions: client-id = {} mtypes = {}".format(
client_id, str(self._id2mtypes[client_private_key])
)
)
return self._id2mtypes[client_private_key]
else:
log.debug(
f"get_subscriptions: client-id = {client_id} mtypes = missing"
)
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _get_registered_clients(self, private_key):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
reg_clients = []
for pkey in self._private_keys.keys():
if pkey != private_key:
reg_clients.append(self._private_keys[pkey][0])
log.debug(
"get_registered_clients: private_key = {} clients = {}".format(
private_key, reg_clients
)
)
return reg_clients
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _get_subscribed_clients(self, private_key, mtype):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
sub_clients = {}
for pkey in self._private_keys.keys():
if pkey != private_key and self._is_subscribed(pkey, mtype):
sub_clients[self._private_keys[pkey][0]] = {}
log.debug(
"get_subscribed_clients: private_key = {} mtype = {} "
"clients = {}".format(private_key, mtype, sub_clients)
)
return sub_clients
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
@staticmethod
def get_mtype_subtypes(mtype):
"""
Return a list containing all the possible wildcarded subtypes of MType.
Parameters
----------
mtype : str
MType to be parsed.
Returns
-------
types : list
List of subtypes
Examples
--------
>>> from astropy.samp import SAMPHubServer
>>> SAMPHubServer.get_mtype_subtypes("samp.app.ping")
['samp.app.ping', 'samp.app.*', 'samp.*', '*']
"""
subtypes = []
msubs = mtype.split(".")
indexes = list(range(len(msubs)))
indexes.reverse()
indexes.append(-1)
for i in indexes:
tmp_mtype = ".".join(msubs[: i + 1])
if tmp_mtype != mtype:
if tmp_mtype != "":
tmp_mtype = tmp_mtype + ".*"
else:
tmp_mtype = "*"
subtypes.append(tmp_mtype)
return subtypes
def _is_subscribed(self, private_key, mtype):
subscribed = False
msubs = SAMPHubServer.get_mtype_subtypes(mtype)
for msub in msubs:
if msub in self._mtype2ids:
if private_key in self._mtype2ids[msub]:
subscribed = True
return subscribed
def _notify(self, private_key, recipient_id, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if not (
self._is_subscribed(
self._public_id_to_private_key(recipient_id), message["samp.mtype"]
)
):
raise SAMPProxyError(
2,
"Client {} not subscribed to MType {}".format(
recipient_id, message["samp.mtype"]
),
)
self._launch_thread(
target=self._notify_, args=(private_key, recipient_id, message)
)
return {}
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _notify_(self, sender_private_key, recipient_public_id, message):
if sender_private_key not in self._private_keys:
return
sender_public_id = self._private_keys[sender_private_key][0]
try:
log.debug(
"notify {} from {} to {}".format(
message["samp.mtype"], sender_public_id, recipient_public_id
)
)
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, message)
samp_method_name = "receiveNotification"
self._retry_method(
recipient_private_key, recipient_public_id, samp_method_name, arg_params
)
except Exception as exc:
warnings.warn(
"{} notification from client {} to client {} failed [{}]".format(
message["samp.mtype"], sender_public_id, recipient_public_id, exc
),
SAMPWarning,
)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing")
recipient_ids = self._notify_all_(private_key, message)
return recipient_ids
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _notify_all_(self, sender_private_key, message):
recipient_ids = []
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_recipient_id = self._private_keys[key][0]
recipient_ids.append(_recipient_id)
self._launch_thread(
target=self._notify,
args=(sender_private_key, _recipient_id, message),
)
return recipient_ids
def _call(self, private_key, recipient_id, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if not (
self._is_subscribed(
self._public_id_to_private_key(recipient_id), message["samp.mtype"]
)
):
raise SAMPProxyError(
2,
"Client {} not subscribed to MType {}".format(
recipient_id, message["samp.mtype"]
),
)
public_id = self._private_keys[private_key][0]
msg_id = self._get_new_hub_msg_id(public_id, msg_tag)
self._launch_thread(
target=self._call_,
args=(private_key, public_id, recipient_id, msg_id, message),
)
return msg_id
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _call_(
self, sender_private_key, sender_public_id, recipient_public_id, msg_id, message
):
if sender_private_key not in self._private_keys:
return
try:
log.debug(
"call {} from {} to {} ({})".format(
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id,
message["samp.mtype"],
)
)
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, msg_id, message)
samp_methodName = "receiveCall"
self._retry_method(
recipient_private_key, recipient_public_id, samp_methodName, arg_params
)
except Exception as exc:
warnings.warn(
"{} call {} from client {} to client {} failed [{},{}]".format(
message["samp.mtype"],
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id,
type(exc),
exc,
),
SAMPWarning,
)
def _call_all(self, private_key, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(
3,
f"samp.mtype keyword is missing in message tagged as {msg_tag}",
)
public_id = self._private_keys[private_key][0]
msg_id = self._call_all_(private_key, public_id, msg_tag, message)
return msg_id
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _call_all_(self, sender_private_key, sender_public_id, msg_tag, message):
msg_id = {}
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_msg_id = self._get_new_hub_msg_id(sender_public_id, msg_tag)
receiver_public_id = self._private_keys[key][0]
msg_id[receiver_public_id] = _msg_id
self._launch_thread(
target=self._call_,
args=(
sender_private_key,
sender_public_id,
receiver_public_id,
_msg_id,
message,
),
)
return msg_id
def _call_and_wait(self, private_key, recipient_id, message, timeout):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
timeout = int(timeout)
now = time.time()
response = {}
msg_id = self._call(private_key, recipient_id, "samp::sync::call", message)
self._sync_msg_ids_heap[msg_id] = None
while self._is_running:
if 0 < timeout <= time.time() - now:
del self._sync_msg_ids_heap[msg_id]
raise SAMPProxyError(1, "Timeout expired!")
if self._sync_msg_ids_heap[msg_id] is not None:
response = copy.deepcopy(self._sync_msg_ids_heap[msg_id])
del self._sync_msg_ids_heap[msg_id]
break
time.sleep(0.01)
return response
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _reply(self, private_key, msg_id, response):
"""
The main method that gets called for replying. This starts up an
asynchronous reply thread and returns.
"""
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
self._launch_thread(
target=self._reply_, args=(private_key, msg_id, response)
)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return {}
def _reply_(self, responder_private_key, msg_id, response):
if responder_private_key not in self._private_keys or not msg_id:
return
responder_public_id = self._private_keys[responder_private_key][0]
counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(
";;", 3
)
try:
log.debug(
f"reply {counter} from {responder_public_id} to {recipient_public_id}"
)
if recipient_msg_tag == "samp::sync::call":
if msg_id in self._sync_msg_ids_heap.keys():
self._sync_msg_ids_heap[msg_id] = response
else:
recipient_private_key = self._public_id_to_private_key(
recipient_public_id
)
arg_params = (responder_public_id, recipient_msg_tag, response)
samp_method_name = "receiveResponse"
self._retry_method(
recipient_private_key,
recipient_public_id,
samp_method_name,
arg_params,
)
except Exception as exc:
warnings.warn(
"{} reply from client {} to client {} failed [{}]".format(
recipient_msg_tag, responder_public_id, recipient_public_id, exc
),
SAMPWarning,
)
def _retry_method(
self, recipient_private_key, recipient_public_id, samp_method_name, arg_params
):
"""
This method is used to retry a SAMP call several times.
Parameters
----------
recipient_private_key
The private key of the receiver of the call
recipient_public_key
The public key of the receiver of the call
samp_method_name : str
The name of the SAMP method to call
arg_params : tuple
Any additional arguments to be passed to the SAMP method
"""
if recipient_private_key is None:
raise SAMPHubError("Invalid client ID")
from . import conf
for attempt in range(conf.n_retries):
if not self._is_running:
time.sleep(0.01)
continue
try:
if (
self._web_profile
and recipient_private_key in self._web_profile_callbacks
):
# Web Profile
callback = {
"samp.methodName": samp_method_name,
"samp.params": arg_params,
}
self._web_profile_callbacks[recipient_private_key].put(callback)
else:
# Standard Profile
hub = self._xmlrpc_endpoints[recipient_public_id][1]
getattr(hub.samp.client, samp_method_name)(
recipient_private_key, *arg_params
)
except xmlrpc.Fault as exc:
log.debug(
"{} XML-RPC endpoint error (attempt {}): {}".format(
recipient_public_id, attempt + 1, exc.faultString
)
)
time.sleep(0.01)
else:
return
# If we are here, then the above attempts failed
error_message = (
samp_method_name + " failed after " + str(conf.n_retries) + " attempts"
)
raise SAMPHubError(error_message)
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
if self._private_keys[private_key][0] == public_id:
return private_key
return None
def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id):
with self._thread_lock:
self._hub_msg_id_counter += 1
return "msg#{};;{};;{};;{}".format(
self._hub_msg_id_counter,
self._hub_public_id,
sender_public_id,
sender_msg_id,
)
def _update_last_activity_time(self, private_key=None):
with self._thread_lock:
self._last_activity_time = time.time()
if private_key is not None:
self._client_activity_time[private_key] = time.time()
def _receive_notification(self, private_key, sender_id, message):
return ""
def _receive_call(self, private_key, sender_id, msg_id, message):
if private_key == self._hub_private_key:
if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping":
self._reply(
self._hub_private_key,
msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {}},
)
elif "samp.mtype" in message and (
message["samp.mtype"] == "x-samp.query.by-meta"
or message["samp.mtype"] == "samp.query.by-meta"
):
ids_list = self._query_by_metadata(
message["samp.params"]["key"], message["samp.params"]["value"]
)
self._reply(
self._hub_private_key,
msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {"ids": ids_list}},
)
return ""
else:
return ""
def _receive_response(self, private_key, responder_id, msg_tag, response):
return ""
def _web_profile_register(
self, identity_info, client_address=("unknown", 0), origin="unknown"
):
self._update_last_activity_time()
if client_address[0] not in ["localhost", "127.0.0.1"]:
raise SAMPProxyError(403, "Request of registration rejected by the Hub.")
if not origin:
origin = "unknown"
if isinstance(identity_info, dict):
# an old version of the protocol provided just a string with the app name
if "samp.name" not in identity_info:
raise SAMPProxyError(
403,
"Request of registration rejected "
"by the Hub (application name not "
"provided).",
)
# Red semaphore for the other threads
self._web_profile_requests_semaphore.put("wait")
# Set the request to be displayed for the current thread
self._web_profile_requests_queue.put((identity_info, client_address, origin))
# Get the popup dialogue response
response = self._web_profile_requests_result.get()
# OK, semaphore green
self._web_profile_requests_semaphore.get()
if response:
register_map = self._perform_standard_register()
translator_url = "http://localhost:{}/translator/{}?ref=".format(
self._web_port, register_map["samp.private-key"]
)
register_map["samp.url-translator"] = translator_url
self._web_profile_server.add_client(register_map["samp.private-key"])
return register_map
else:
raise SAMPProxyError(403, "Request of registration rejected by the user.")
def _web_profile_allowReverseCallbacks(self, private_key, allow):
self._update_last_activity_time()
if private_key in self._private_keys:
if allow == "0":
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
else:
self._web_profile_callbacks[private_key] = queue.Queue()
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _web_profile_pullCallbacks(self, private_key, timeout_secs):
self._update_last_activity_time()
if private_key in self._private_keys:
callback = []
callback_queue = self._web_profile_callbacks[private_key]
try:
while self._is_running:
item_queued = callback_queue.get_nowait()
callback.append(item_queued)
except queue.Empty:
pass
return callback
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
class WebProfileDialog:
"""
A base class to make writing Web Profile GUI consent dialogs
easier.
The concrete class must:
1) Poll ``handle_queue`` periodically, using the timer services
of the GUI's event loop. This function will call
``self.show_dialog`` when a request requires authorization.
``self.show_dialog`` will be given the arguments:
- ``samp_name``: The name of the application making the request.
- ``details``: A dictionary of details about the client
making the request.
- ``client``: A hostname, port pair containing the client
address.
- ``origin``: A string containing the origin of the
request.
2) Call ``consent`` or ``reject`` based on the user's response to
the dialog.
"""
def handle_queue(self):
try:
request = self.queue_request.get_nowait()
except queue.Empty: # queue is set but empty
pass
except AttributeError: # queue has not been set yet
pass
else:
if isinstance(request[0], str): # To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
self.show_dialog(samp_name, request[0], request[1], request[2])
def consent(self):
self.queue_result.put(True)
def reject(self):
self.queue_result.put(False)
|
1b7711ff0b710dd7ce8849f75d3014665038bed8e6bc5c79545a762f49338468 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: this file should be refactored to use a more thread-safe and
# race-condition-safe lockfile mechanism.
import datetime
import os
import stat
import warnings
import xmlrpc.client as xmlrpc
from contextlib import suppress
from urllib.parse import urlparse
from astropy import log
from astropy.config.paths import _find_home
from astropy.utils.data import get_readable_fileobj
from .errors import SAMPHubError, SAMPWarning
def read_lockfile(lockfilename):
"""
Read in the lockfile given by ``lockfilename`` into a dictionary.
"""
# lockfilename may be a local file or a remote URL, but
# get_readable_fileobj takes care of this.
lockfiledict = {}
with get_readable_fileobj(lockfilename) as f:
for line in f:
if not line.startswith("#"):
kw, val = line.split("=")
lockfiledict[kw.strip()] = val.strip()
return lockfiledict
def write_lockfile(lockfilename, lockfiledict):
lockfile = open(lockfilename, "w")
lockfile.close()
os.chmod(lockfilename, stat.S_IREAD + stat.S_IWRITE)
lockfile = open(lockfilename, "w")
now_iso = datetime.datetime.now().isoformat()
lockfile.write(f"# SAMP lockfile written on {now_iso}\n")
lockfile.write("# Standard Profile required keys\n")
for key, value in lockfiledict.items():
lockfile.write(f"{key}={value}\n")
lockfile.close()
def create_lock_file(lockfilename=None, mode=None, hub_id=None, hub_params=None):
# Remove lock-files of dead hubs
remove_garbage_lock_files()
lockfiledir = ""
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:") :]
lockfile_parsed = urlparse(lockfilename)
if lockfile_parsed[0] != "file":
warnings.warn(
"Unable to start a Hub with lockfile {}. "
"Start-up process aborted.".format(lockfilename),
SAMPWarning,
)
return False
else:
lockfilename = lockfile_parsed[2]
else:
# If it is a fresh Hub instance
if lockfilename is None:
log.debug("Running mode: " + mode)
if mode == "single":
lockfilename = os.path.join(_find_home(), ".samp")
else:
lockfiledir = os.path.join(_find_home(), ".samp-1")
# If missing create .samp-1 directory
try:
os.mkdir(lockfiledir)
except OSError:
pass # directory already exists
finally:
os.chmod(lockfiledir, stat.S_IREAD + stat.S_IWRITE + stat.S_IEXEC)
lockfilename = os.path.join(lockfiledir, f"samp-hub-{hub_id}")
else:
log.debug("Running mode: multiple")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
warnings.warn(
"Another SAMP Hub is already running. Start-up process aborted.",
SAMPWarning,
)
return False
log.debug("Lock-file: " + lockfilename)
write_lockfile(lockfilename, hub_params)
return lockfilename
def get_main_running_hub():
"""
Get either the hub given by the environment variable SAMP_HUB, or the one
given by the lockfile .samp in the user home directory.
"""
hubs = get_running_hubs()
if not hubs:
raise SAMPHubError("Unable to find a running SAMP Hub.")
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:") :]
else:
raise SAMPHubError("SAMP Hub profile not supported.")
else:
lockfilename = os.path.join(_find_home(), ".samp")
return hubs[lockfilename]
def get_running_hubs():
"""
Return a dictionary containing the lock-file contents of all the currently
running hubs (single and/or multiple mode).
The dictionary format is:
``{<lock-file>: {<token-name>: <token-string>, ...}, ...}``
where ``{<lock-file>}`` is the lock-file name, ``{<token-name>}`` and
``{<token-string>}`` are the lock-file tokens (name and content).
Returns
-------
running_hubs : dict
Lock-file contents of all the currently running hubs.
"""
hubs = {}
lockfilename = ""
# HUB SINGLE INSTANCE MODE
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:") :]
else:
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
# HUB MULTIPLE INSTANCE MODE
lockfiledir = ""
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith("samp-hub"):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
return hubs
def check_running_hub(lockfilename):
"""
Test whether a hub identified by ``lockfilename`` is running or not.
Parameters
----------
lockfilename : str
Lock-file name (path + file name) of the Hub to be tested.
Returns
-------
is_running : bool
Whether the hub is running
hub_params : dict
If the hub is running this contains the parameters from the lockfile
"""
is_running = False
lockfiledict = {}
# Check whether a lockfile already exists
try:
lockfiledict = read_lockfile(lockfilename)
except OSError:
return is_running, lockfiledict
if "samp.hub.xmlrpc.url" in lockfiledict:
try:
proxy = xmlrpc.ServerProxy(
lockfiledict["samp.hub.xmlrpc.url"].replace("\\", ""), allow_none=1
)
proxy.samp.hub.ping()
is_running = True
except xmlrpc.ProtocolError:
# There is a protocol error (e.g. for authentication required),
# but the server is alive
is_running = True
except OSError:
pass
return is_running, lockfiledict
def remove_garbage_lock_files():
lockfilename = ""
# HUB SINGLE INSTANCE MODE
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if not hub_is_running:
# If lockfilename belongs to a dead hub, then it is deleted
if os.path.isfile(lockfilename):
with suppress(OSError):
os.remove(lockfilename)
# HUB MULTIPLE INSTANCE MODE
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith("samp-hub"):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if not hub_is_running:
# If lockfilename belongs to a dead hub, then it is deleted
if os.path.isfile(lockfilename):
with suppress(OSError):
os.remove(lockfilename)
|
4e201e9f0c789de06c7fb3773017e357699b805ae4dc547e495c0c08962ea156 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import argparse
import copy
import sys
import time
from astropy import __version__, log
from .hub import SAMPHubServer
__all__ = ["hub_script"]
def hub_script(timeout=0):
"""
This main function is executed by the ``samp_hub`` command line tool.
"""
parser = argparse.ArgumentParser(prog="samp_hub " + __version__)
parser.add_argument(
"-k", "--secret", dest="secret", metavar="CODE", help="custom secret code."
)
parser.add_argument(
"-d", "--addr", dest="addr", metavar="ADDR", help="listening address (or IP)."
)
parser.add_argument(
"-p",
"--port",
dest="port",
metavar="PORT",
type=int,
help="listening port number.",
)
parser.add_argument(
"-f", "--lockfile", dest="lockfile", metavar="FILE", help="custom lockfile."
)
parser.add_argument(
"-w",
"--no-web-profile",
dest="web_profile",
action="store_false",
help="run the Hub disabling the Web Profile.",
default=True,
)
parser.add_argument(
"-P",
"--pool-size",
dest="pool_size",
metavar="SIZE",
type=int,
help="the socket connections pool size.",
default=20,
)
timeout_group = parser.add_argument_group(
"Timeout group",
"Special options to setup hub and client timeouts."
"It contains a set of special options that allows to set up the Hub and "
"clients inactivity timeouts, that is the Hub or client inactivity time "
"interval after which the Hub shuts down or unregisters the client. "
"Notification of samp.hub.disconnect MType is sent to the clients "
"forcibly unregistered for timeout expiration.",
)
timeout_group.add_argument(
"-t",
"--timeout",
dest="timeout",
metavar="SECONDS",
help=(
"set the Hub inactivity timeout in SECONDS. By default it "
"is set to 0, that is the Hub never expires."
),
type=int,
default=0,
)
timeout_group.add_argument(
"-c",
"--client-timeout",
dest="client_timeout",
metavar="SECONDS",
help=(
"set the client inactivity timeout in SECONDS. By default it "
"is set to 0, that is the client never expires."
),
type=int,
default=0,
)
parser.add_argument_group(timeout_group)
log_group = parser.add_argument_group(
"Logging options",
"Additional options which allow to customize the logging output. By "
"default the SAMP Hub uses the standard output and standard error "
"devices to print out INFO level logging messages. Using the options "
"here below it is possible to modify the logging level and also "
"specify the output files where redirect the logging messages.",
)
log_group.add_argument(
"-L",
"--log-level",
dest="loglevel",
metavar="LEVEL",
help="set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).",
type=str,
choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"],
default="INFO",
)
log_group.add_argument(
"-O",
"--log-output",
dest="logout",
metavar="FILE",
help="set the output file for the log messages.",
default="",
)
parser.add_argument_group(log_group)
adv_group = parser.add_argument_group(
"Advanced group",
"Advanced options addressed to facilitate administrative tasks and "
"allow new non-standard Hub behaviors. In particular the --label "
"options is used to assign a value to hub.label token and is used to "
"assign a name to the Hub instance. "
"The very special --multi option allows to start a Hub in multi-instance mode. "
"Multi-instance mode is a non-standard Hub behavior that enables "
"multiple contemporaneous running Hubs. Multi-instance hubs place "
"their non-standard lock-files within the <home directory>/.samp-1 "
"directory naming them making use of the format: "
"samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an "
"internal ID (integer).",
)
adv_group.add_argument(
"-l",
"--label",
dest="label",
metavar="LABEL",
help="assign a LABEL to the Hub.",
default="",
)
adv_group.add_argument(
"-m",
"--multi",
dest="mode",
help=(
"run the Hub in multi-instance mode generating a custom "
"lockfile with a random name."
),
action="store_const",
const="multiple",
default="single",
)
parser.add_argument_group(adv_group)
options = parser.parse_args()
try:
if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"):
log.setLevel(options.loglevel)
if options.logout != "":
context = log.log_to_file(options.logout)
else:
class dummy_context:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
context = dummy_context()
with context:
args = copy.deepcopy(options.__dict__)
del args["loglevel"]
del args["logout"]
hub = SAMPHubServer(**args)
hub.start(False)
if not timeout:
while hub.is_running:
time.sleep(0.01)
else:
time.sleep(timeout)
hub.stop()
except KeyboardInterrupt:
try:
hub.stop()
except NameError:
pass
except OSError as e:
print(f"[SAMP] Error: I/O error({e.errno}): {e.strerror}")
sys.exit(1)
except SystemExit:
pass
|
40e84d2cf0653036bae08f61cce55446b3b76b2497c7def3ebc99fa17abfa9f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines custom errors and exceptions used in `astropy.samp`.
"""
import xmlrpc.client as xmlrpc
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["SAMPWarning", "SAMPHubError", "SAMPClientError", "SAMPProxyError"]
class SAMPWarning(AstropyUserWarning):
"""
SAMP-specific Astropy warning class.
"""
class SAMPHubError(Exception):
"""
SAMP Hub exception.
"""
class SAMPClientError(Exception):
"""
SAMP Client exceptions.
"""
class SAMPProxyError(xmlrpc.Fault):
"""
SAMP Proxy Hub exception.
"""
|
d1db8e1a31564f4032d940b80a705870388b9dac9f5d0e585bbd6a4754b1216f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .client import SAMPClient
from .hub_proxy import SAMPHubProxy
__all__ = ["SAMPIntegratedClient"]
__doctest_skip__ = ["SAMPIntegratedClient.*"]
class SAMPIntegratedClient:
"""
A Simple SAMP client.
This class is meant to simplify the client usage providing a proxy class
that merges the :class:`~astropy.samp.SAMPClient` and
:class:`~astropy.samp.SAMPHubProxy` functionalities in a
simplified API.
Parameters
----------
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(
self,
name=None,
description=None,
metadata=None,
addr=None,
port=0,
callable=True,
):
self.hub = SAMPHubProxy()
self.client_arguments = {
"name": name,
"description": description,
"metadata": metadata,
"addr": addr,
"port": port,
"callable": callable,
}
"""
Collected arguments that should be passed on to the SAMPClient below.
The SAMPClient used to be instantiated in __init__; however, this
caused problems with disconnecting and reconnecting to the HUB.
The client_arguments is used to maintain backwards compatibility.
"""
self.client = None
"The client will be instantiated upon connect()."
# GENERAL
@property
def is_connected(self):
"""
Testing method to verify the client connection with a running Hub.
Returns
-------
is_connected : bool
True if the client is connected to a Hub, False otherwise.
"""
return self.hub.is_connected and self.client.is_running
def connect(self, hub=None, hub_params=None, pool_size=20):
"""
Connect with the current or specified SAMP Hub, start and register the
client.
Parameters
----------
hub : `~astropy.samp.SAMPHubServer`, optional
The hub to connect to.
hub_params : dict, optional
Optional dictionary containing the lock-file content of the Hub
with which to connect. This dictionary has the form
``{<token-name>: <token-string>, ...}``.
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self.hub.connect(hub, hub_params, pool_size)
# The client has to be instantiated here and not in __init__() because
# this allows disconnecting and reconnecting to the HUB. Nonetheless,
# the client_arguments are set in __init__() because the
# instantiation of the client used to happen there and this retains
# backwards compatibility.
self.client = SAMPClient(self.hub, **self.client_arguments)
self.client.start()
self.client.register()
def disconnect(self):
"""
Unregister the client from the current SAMP Hub, stop the client and
disconnect from the Hub.
"""
if self.is_connected:
try:
self.client.unregister()
finally:
if self.client.is_running:
self.client.stop()
self.hub.disconnect()
# HUB
def ping(self):
"""
Proxy to ``ping`` SAMP Hub method (Standard Profile only).
"""
return self.hub.ping()
def declare_metadata(self, metadata):
"""
Proxy to ``declareMetadata`` SAMP Hub method.
"""
return self.client.declare_metadata(metadata)
def get_metadata(self, client_id):
"""
Proxy to ``getMetadata`` SAMP Hub method.
"""
return self.hub.get_metadata(self.get_private_key(), client_id)
def get_subscriptions(self, client_id):
"""
Proxy to ``getSubscriptions`` SAMP Hub method.
"""
return self.hub.get_subscriptions(self.get_private_key(), client_id)
def get_registered_clients(self):
"""
Proxy to ``getRegisteredClients`` SAMP Hub method.
This returns all the registered clients, excluding the current client.
"""
return self.hub.get_registered_clients(self.get_private_key())
def get_subscribed_clients(self, mtype):
"""
Proxy to ``getSubscribedClients`` SAMP Hub method.
"""
return self.hub.get_subscribed_clients(self.get_private_key(), mtype)
def _format_easy_msg(self, mtype, params):
msg = {}
if "extra_kws" in params:
extra = params["extra_kws"]
del params["extra_kws"]
msg = {"samp.mtype": mtype, "samp.params": params}
msg.update(extra)
else:
msg = {"samp.mtype": mtype, "samp.params": params}
return msg
def notify(self, recipient_id, message):
"""
Proxy to ``notify`` SAMP Hub method.
"""
return self.hub.notify(self.get_private_key(), recipient_id, message)
def enotify(self, recipient_id, mtype, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.notify`.
This is a proxy to ``notify`` method that allows to send the
notification message in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
recipient_id : str
Recipient ID
mtype : str
the MType to be notified
params : dict or set of str
Variable keyword set which contains the list of parameters for the
specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> cli.enotify("samp.msg.progress", msgid = "xyz", txt = "initialization",
... percent = "10", extra_kws = {"my.extra.info": "just an example"})
"""
return self.notify(recipient_id, self._format_easy_msg(mtype, params))
def notify_all(self, message):
"""
Proxy to ``notifyAll`` SAMP Hub method.
"""
return self.hub.notify_all(self.get_private_key(), message)
def enotify_all(self, mtype, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.notify_all`.
This is a proxy to ``notifyAll`` method that allows to send the
notification message in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
mtype : str
MType to be notified.
params : dict or set of str
Variable keyword set which contains the list of parameters for
the specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> cli.enotify_all("samp.msg.progress", txt = "initialization",
... percent = "10",
... extra_kws = {"my.extra.info": "just an example"})
"""
return self.notify_all(self._format_easy_msg(mtype, params))
def call(self, recipient_id, msg_tag, message):
"""
Proxy to ``call`` SAMP Hub method.
"""
return self.hub.call(self.get_private_key(), recipient_id, msg_tag, message)
def ecall(self, recipient_id, msg_tag, mtype, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.call`.
This is a proxy to ``call`` method that allows to send a call message
in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
recipient_id : str
Recipient ID
msg_tag : str
Message tag to use
mtype : str
MType to be sent
params : dict of set of str
Variable keyword set which contains the list of parameters for
the specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> msgid = cli.ecall("abc", "xyz", "samp.msg.progress",
... txt = "initialization", percent = "10",
... extra_kws = {"my.extra.info": "just an example"})
"""
return self.call(recipient_id, msg_tag, self._format_easy_msg(mtype, params))
def call_all(self, msg_tag, message):
"""
Proxy to ``callAll`` SAMP Hub method.
"""
return self.hub.call_all(self.get_private_key(), msg_tag, message)
def ecall_all(self, msg_tag, mtype, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.call_all`.
This is a proxy to ``callAll`` method that allows to send the call
message in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
msg_tag : str
Message tag to use
mtype : str
MType to be sent
params : dict of set of str
Variable keyword set which contains the list of parameters for
the specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> msgid = cli.ecall_all("xyz", "samp.msg.progress",
... txt = "initialization", percent = "10",
... extra_kws = {"my.extra.info": "just an example"})
"""
self.call_all(msg_tag, self._format_easy_msg(mtype, params))
def call_and_wait(self, recipient_id, message, timeout):
"""
Proxy to ``callAndWait`` SAMP Hub method.
"""
return self.hub.call_and_wait(
self.get_private_key(), recipient_id, message, timeout
)
def ecall_and_wait(self, recipient_id, mtype, timeout, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.call_and_wait`.
This is a proxy to ``callAndWait`` method that allows to send the call
message in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
recipient_id : str
Recipient ID
mtype : str
MType to be sent
timeout : str
Call timeout in seconds
params : dict of set of str
Variable keyword set which contains the list of parameters for
the specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> cli.ecall_and_wait("xyz", "samp.msg.progress", "5",
... txt = "initialization", percent = "10",
... extra_kws = {"my.extra.info": "just an example"})
"""
return self.call_and_wait(
recipient_id, self._format_easy_msg(mtype, params), timeout
)
def reply(self, msg_id, response):
"""
Proxy to ``reply`` SAMP Hub method.
"""
return self.hub.reply(self.get_private_key(), msg_id, response)
def _format_easy_response(self, status, result, error):
msg = {"samp.status": status}
if result is not None:
msg.update({"samp.result": result})
if error is not None:
msg.update({"samp.error": error})
return msg
def ereply(self, msg_id, status, result=None, error=None):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.reply`.
This is a proxy to ``reply`` method that allows to send a reply
message in a simplified way.
Parameters
----------
msg_id : str
Message ID to which reply.
status : str
Content of the ``samp.status`` response keyword.
result : dict
Content of the ``samp.result`` response keyword.
error : dict
Content of the ``samp.error`` response keyword.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient, SAMP_STATUS_ERROR
>>> cli = SAMPIntegratedClient()
>>> ...
>>> cli.ereply("abd", SAMP_STATUS_ERROR, result={},
... error={"samp.errortxt": "Test error message"})
"""
return self.reply(msg_id, self._format_easy_response(status, result, error))
# CLIENT
def receive_notification(self, private_key, sender_id, message):
return self.client.receive_notification(private_key, sender_id, message)
receive_notification.__doc__ = SAMPClient.receive_notification.__doc__
def receive_call(self, private_key, sender_id, msg_id, message):
return self.client.receive_call(private_key, sender_id, msg_id, message)
receive_call.__doc__ = SAMPClient.receive_call.__doc__
def receive_response(self, private_key, responder_id, msg_tag, response):
return self.client.receive_response(
private_key, responder_id, msg_tag, response
)
receive_response.__doc__ = SAMPClient.receive_response.__doc__
def bind_receive_message(self, mtype, function, declare=True, metadata=None):
self.client.bind_receive_message(mtype, function, declare=True, metadata=None)
bind_receive_message.__doc__ = SAMPClient.bind_receive_message.__doc__
def bind_receive_notification(self, mtype, function, declare=True, metadata=None):
self.client.bind_receive_notification(mtype, function, declare, metadata)
bind_receive_notification.__doc__ = SAMPClient.bind_receive_notification.__doc__
def bind_receive_call(self, mtype, function, declare=True, metadata=None):
self.client.bind_receive_call(mtype, function, declare, metadata)
bind_receive_call.__doc__ = SAMPClient.bind_receive_call.__doc__
def bind_receive_response(self, msg_tag, function):
self.client.bind_receive_response(msg_tag, function)
bind_receive_response.__doc__ = SAMPClient.bind_receive_response.__doc__
def unbind_receive_notification(self, mtype, declare=True):
self.client.unbind_receive_notification(mtype, declare)
unbind_receive_notification.__doc__ = SAMPClient.unbind_receive_notification.__doc__
def unbind_receive_call(self, mtype, declare=True):
self.client.unbind_receive_call(mtype, declare)
unbind_receive_call.__doc__ = SAMPClient.unbind_receive_call.__doc__
def unbind_receive_response(self, msg_tag):
self.client.unbind_receive_response(msg_tag)
unbind_receive_response.__doc__ = SAMPClient.unbind_receive_response.__doc__
def declare_subscriptions(self, subscriptions=None):
self.client.declare_subscriptions(subscriptions)
declare_subscriptions.__doc__ = SAMPClient.declare_subscriptions.__doc__
def get_private_key(self):
return self.client.get_private_key()
get_private_key.__doc__ = SAMPClient.get_private_key.__doc__
def get_public_id(self):
return self.client.get_public_id()
get_public_id.__doc__ = SAMPClient.get_public_id.__doc__
|
e075854903974245a2b8fdcc9dbe9025bcd3252d1c832135de975f7c5eeb6530 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import socketserver
import sys
import traceback
import warnings
import xmlrpc.client as xmlrpc
from xmlrpc.server import SimpleXMLRPCRequestHandler, SimpleXMLRPCServer
from .constants import SAMP_ICON
from .errors import SAMPWarning
__all__ = []
class SAMPSimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""
XMLRPC handler of Standard Profile requests.
"""
def do_GET(self):
if self.path == "/samp/icon":
self.send_response(200, "OK")
self.send_header("Content-Type", "image/png")
self.end_headers()
self.wfile.write(SAMP_ICON)
def do_POST(self):
"""
Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's ``_dispatch`` method for
handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = b"".join(L)
params, method = xmlrpc.loads(data)
if method == "samp.webhub.register":
params = list(params)
params.append(self.client_address)
if "Origin" in self.headers:
params.append(self.headers.get("Origin"))
else:
params.append("unknown")
params = tuple(params)
data = xmlrpc.dumps(params, methodname=method)
elif method in (
"samp.hub.notify",
"samp.hub.notifyAll",
"samp.hub.call",
"samp.hub.callAll",
"samp.hub.callAndWait",
):
user = "unknown"
if method == "samp.hub.callAndWait":
params[2]["host"] = self.address_string()
params[2]["user"] = user
else:
params[-1]["host"] = self.address_string()
params[-1]["user"] = user
data = xmlrpc.dumps(params, methodname=method)
data = self.decode_request_content(data)
if data is None:
return # response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, "_dispatch", None), self.path
)
except Exception as e:
# This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if (
hasattr(self.server, "_send_traceback_header")
and self.server._send_traceback_header
):
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode("ASCII", "backslashreplace"), "ASCII")
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = xmlrpc.gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class ThreadingXMLRPCServer(socketserver.ThreadingMixIn, SimpleXMLRPCServer):
"""
Asynchronous multithreaded XMLRPC server.
"""
def __init__(
self,
addr,
log=None,
requestHandler=SAMPSimpleXMLRPCRequestHandler,
logRequests=True,
allow_none=True,
encoding=None,
):
self.log = log
SimpleXMLRPCServer.__init__(
self, addr, requestHandler, logRequests, allow_none, encoding
)
def handle_error(self, request, client_address):
if self.log is None:
socketserver.BaseServer.handle_error(self, request, client_address)
else:
warnings.warn(
"Exception happened during processing of request from {}: {}".format(
client_address, sys.exc_info()[1]
),
SAMPWarning,
)
|
b4d7bba433bbc8d953adde59e5aa9b763b6f84bea1441e4c1878acd4c094f7ec | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import warnings
from urllib.parse import urlunparse
from .constants import SAMP_STATUS_OK, SAMP_STATUS_WARNING
from .errors import SAMPClientError, SAMPWarning
from .hub import SAMPHubServer
from .standard_profile import ThreadingXMLRPCServer
from .utils import get_num_args, internet_on
__all__ = ["SAMPClient"]
class SAMPClient:
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable client application.
Parameters
----------
hub : :class:`~astropy.samp.SAMPHubProxy`
An instance of :class:`~astropy.samp.SAMPHubProxy` to be
used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
# TODO: define what is meant by callable
def __init__(
self,
hub,
name=None,
description=None,
metadata=None,
addr=None,
port=0,
callable=True,
):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._addr = addr
self._port = port
self._xmlrpcAddr = None
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {
"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}],
}
self._response_bindings = {}
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name, self._port or 0)
except OSError:
self._host_name = "127.0.0.1"
self.hub = hub
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
self.client = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port),
logRequests=False,
allow_none=True,
)
self.client.register_introspection_functions()
self.client.register_function(
self.receive_notification, "samp.client.receiveNotification"
)
self.client.register_function(self.receive_call, "samp.client.receiveCall")
self.client.register_function(
self.receive_response, "samp.client.receiveResponse"
)
# If the port was set to zero, then the operating system has
# selected a free port. We now check what this port number is.
if self._port == 0:
self._port = self.client.socket.getsockname()[1]
protocol = "http"
self._xmlrpcAddr = urlunparse(
(
protocol,
f"{self._addr or self._host_name}:{self._port}",
"",
"",
"",
"",
)
)
def start(self):
"""
Start the client in a separate thread (non-blocking).
This only has an effect if ``callable`` was set to `True` when
initializing the client.
"""
if self._callable:
self._is_running = True
self._run_client()
def stop(self, timeout=10.0):
"""
Stop the client.
Parameters
----------
timeout : float
Timeout after which to give up if the client cannot be cleanly
shut down.
"""
# Setting _is_running to False causes the loop in _serve_forever to
# exit. The thread should then stop running. We wait for the thread to
# terminate until the timeout, then we continue anyway.
self._is_running = False
if self._callable and self._thread.is_alive():
self._thread.join(timeout)
if self._thread.is_alive():
raise SAMPClientError(
f"Client was not shut down successfully (timeout={timeout}s)"
)
@property
def is_running(self):
"""
Whether the client is currently running.
"""
return self._is_running
@property
def is_registered(self):
"""
Whether the client is currently registered.
"""
return self._is_registered
def _run_client(self):
if self._callable:
self._thread.start()
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self.client.socket], [], [], 0.1)[0]
except OSError as exc:
warnings.warn(
f"Call to select in SAMPClient failed: {exc}", SAMPWarning
)
else:
if read_ready:
self.client.handle_request()
self.client.server_close()
def _ping(self, private_key, sender_id, msg_id, msg_mtype, msg_params, message):
reply = {"samp.status": SAMP_STATUS_OK, "samp.result": {}}
self.hub.reply(private_key, msg_id, reply)
def _client_env_get(
self, private_key, sender_id, msg_id, msg_mtype, msg_params, message
):
if msg_params["name"] in os.environ:
reply = {
"samp.status": SAMP_STATUS_OK,
"samp.result": {"value": os.environ[msg_params["name"]]},
}
else:
reply = {
"samp.status": SAMP_STATUS_WARNING,
"samp.result": {"value": ""},
"samp.error": {"samp.errortxt": "Environment variable not defined."},
}
self.hub.reply(private_key, msg_id, reply)
def _handle_notification(self, private_key, sender_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._notification_bindings:
bound_func = self._notification_bindings[mtype][0]
if get_num_args(bound_func) == 5:
bound_func(
private_key, sender_id, msg_mtype, msg_params, message
)
else:
bound_func(
private_key, sender_id, None, msg_mtype, msg_params, message
)
return ""
def receive_notification(self, private_key, sender_id, message):
"""
Standard callable client ``receive_notification`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
method is used to bind distinct operations to MTypes. In case of a
customized callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_notification(private_key, sender_id, message)
def _handle_call(self, private_key, sender_id, msg_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._call_bindings:
self._call_bindings[mtype][0](
private_key, sender_id, msg_id, msg_mtype, msg_params, message
)
return ""
def receive_call(self, private_key, sender_id, msg_id, message):
"""
Standard callable client ``receive_call`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_call` method is
used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
msg_id : str
Message ID received.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_call(private_key, sender_id, msg_id, message)
def _handle_response(self, private_key, responder_id, msg_tag, response):
if private_key == self.get_private_key() and msg_tag in self._response_bindings:
self._response_bindings[msg_tag](
private_key, responder_id, msg_tag, response
)
return ""
def receive_response(self, private_key, responder_id, msg_tag, response):
"""
Standard callable client ``receive_response`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_response` method
is used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
responder_id : str
Responder public ID.
msg_tag : str
Response message tag.
response : dict
Received response.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_response(private_key, responder_id, msg_tag, response)
def bind_receive_message(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType to a function or class method, being intended for
a call or a notification.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id (calls only,
otherwise is `None`), ``mtype`` is the message MType, ``params`` is the
message parameter set (content of ``"samp.params"``) and ``extra`` is a
dictionary containing any extra message map entry. The client is
automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
self.bind_receive_call(mtype, function, declare=declare, metadata=metadata)
self.bind_receive_notification(
mtype, function, declare=declare, metadata=metadata
)
def bind_receive_notification(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType notification to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, mtype,
params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``mtype`` is the message MType, ``params`` is
the notified message parameter set (content of ``"samp.params"``) and
``extra`` is a dictionary containing any extra message map entry. The
client is automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._notification_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_call(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType call to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id, ``mtype`` is
the message MType, ``params`` is the message parameter set (content of
``"samp.params"``) and ``extra`` is a dictionary containing any extra
message map entry. The client is automatically declared subscribed to
the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._call_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_response(self, msg_tag, function):
"""
Bind a specific msg-tag response to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, responder_id,
msg_tag, response)
where ``private_key`` is the client private-key, ``responder_id`` is
the message responder ID, ``msg_tag`` is the message-tag provided at
call time and ``response`` is the response received.
Parameters
----------
msg_tag : str
Message-tag to be caught.
function : callable
Application function to be used when ``msg_tag`` is received.
"""
if self._callable:
self._response_bindings[msg_tag] = function
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_notification(self, mtype, declare=True):
"""
Remove from the notifications binding table the specified MType and
unsubscribe the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._notification_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_call(self, mtype, declare=True):
"""
Remove from the calls binding table the specified MType and unsubscribe
the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._call_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_response(self, msg_tag):
"""
Remove from the responses binding table the specified message-tag.
Parameters
----------
msg_tag : str
Message-tag to be removed.
"""
if self._callable:
del self._response_bindings[msg_tag]
else:
raise SAMPClientError("Client not callable.")
def declare_subscriptions(self, subscriptions=None):
"""
Declares the MTypes the client wishes to subscribe to, implicitly
defined with the MType binding methods
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
and :meth:`~astropy.samp.client.SAMPClient.bind_receive_call`.
An optional ``subscriptions`` map can be added to the final map passed
to the :meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
Parameters
----------
subscriptions : dict, optional
Dictionary containing the list of MTypes to subscribe to, with the
same format of the ``subscriptions`` map passed to the
:meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
"""
if self._callable:
self._declare_subscriptions(subscriptions)
else:
raise SAMPClientError("Client not callable.")
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register(self.hub.lockfile["samp.secret"])
if result["samp.self-id"] == "":
raise SAMPClientError(
"Registration failed - samp.self-id was not set by the hub."
)
if result["samp.private-key"] == "":
raise SAMPClientError(
"Registration failed - samp.private-key was not set by the hub."
)
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._set_xmlrpc_callback()
self._declare_subscriptions()
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
else:
raise SAMPClientError(
"Unable to register to the SAMP Hub. Hub proxy not connected."
)
def unregister(self):
"""
Unregister the client from the SAMP Hub.
"""
if self.hub.is_connected:
self._is_registered = False
self.hub.unregister(self._private_key)
self._hub_id = None
self._public_id = None
self._private_key = None
else:
raise SAMPClientError(
"Unable to unregister from the SAMP Hub. Hub proxy not connected."
)
def _set_xmlrpc_callback(self):
if self.hub.is_connected and self._private_key is not None:
self.hub.set_xmlrpc_callback(self._private_key, self._xmlrpcAddr)
def _declare_subscriptions(self, subscriptions=None):
if self.hub.is_connected and self._private_key is not None:
mtypes_dict = {}
# Collect notification mtypes and metadata
for mtype in self._notification_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(
self._notification_bindings[mtype][1]
)
# Collect notification mtypes and metadata
for mtype in self._call_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._call_bindings[mtype][1])
# Add optional subscription map
if subscriptions:
mtypes_dict.update(copy.deepcopy(subscriptions))
self.hub.declare_subscriptions(self._private_key, mtypes_dict)
else:
raise SAMPClientError(
"Unable to declare subscriptions. Hub "
"unreachable or not connected or client "
"not registered."
)
def declare_metadata(self, metadata=None):
"""
Declare the client application metadata supported.
Parameters
----------
metadata : dict, optional
Dictionary containing the client application metadata as defined in
the SAMP definition document. If omitted, then no metadata are
declared.
"""
if self.hub.is_connected and self._private_key is not None:
if metadata is not None:
self._metadata.update(metadata)
self.hub.declare_metadata(self._private_key, self._metadata)
else:
raise SAMPClientError(
"Unable to declare metadata. Hub "
"unreachable or not connected or client "
"not registered."
)
def get_private_key(self):
"""
Return the client private key used for the Standard Profile
communications obtained at registration time (``samp.private-key``).
Returns
-------
key : str
Client private key.
"""
return self._private_key
def get_public_id(self):
"""
Return public client ID obtained at registration time
(``samp.self-id``).
Returns
-------
id : str
Client public ID.
"""
return self._public_id
|
fc3b96dccb8824dd2a52ccd946a0da43cf9c528bfd6400405c9fcd7d401d63c5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utility functions and classes.
"""
import inspect
import queue
import traceback
import xmlrpc.client as xmlrpc
from io import StringIO
from urllib.request import urlopen
from .constants import SAMP_STATUS_ERROR
from .errors import SAMPProxyError
def internet_on():
from . import conf
if not conf.use_internet:
return False
else:
try:
urlopen("http://google.com", timeout=1.0)
return True
except Exception:
return False
__all__ = ["SAMPMsgReplierWrapper"]
__doctest_skip__ = ["."]
def getattr_recursive(variable, attribute):
"""
Get attributes recursively.
"""
if "." in attribute:
top, remaining = attribute.split(".", 1)
return getattr_recursive(getattr(variable, top), remaining)
else:
return getattr(variable, attribute)
class _ServerProxyPoolMethod:
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, proxies, name):
self.__proxies = proxies
self.__name = name
def __getattr__(self, name):
return _ServerProxyPoolMethod(self.__proxies, f"{self.__name}.{name}")
def __call__(self, *args, **kwrds):
proxy = self.__proxies.get()
function = getattr_recursive(proxy, self.__name)
try:
response = function(*args, **kwrds)
except xmlrpc.Fault as exc:
raise SAMPProxyError(exc.faultCode, exc.faultString)
finally:
self.__proxies.put(proxy)
return response
class ServerProxyPool:
"""
A thread-safe pool of `xmlrpc.ServerProxy` objects.
"""
def __init__(self, size, proxy_class, *args, **keywords):
self._proxies = queue.Queue(size)
for i in range(size):
self._proxies.put(proxy_class(*args, **keywords))
def __getattr__(self, name):
# magic method dispatcher
return _ServerProxyPoolMethod(self._proxies, name)
def shutdown(self):
"""Shut down the proxy pool by closing all active connections."""
while True:
try:
proxy = self._proxies.get_nowait()
except queue.Empty:
break
# An undocumented but apparently supported way to call methods on
# an ServerProxy that are not dispatched to the remote server
proxy("close")
class SAMPMsgReplierWrapper:
"""
Function decorator that allows to automatically grab errors and returned
maps (if any) from a function bound to a SAMP call (or notify).
Parameters
----------
cli : :class:`~astropy.samp.SAMPIntegratedClient` or :class:`~astropy.samp.SAMPClient`
SAMP client instance. Decorator initialization, accepting the instance
of the client that receives the call or notification.
"""
def __init__(self, cli):
self.cli = cli
def __call__(self, f):
def wrapped_f(*args):
if get_num_args(f) == 5 or args[2] is None: # notification
f(*args)
else: # call
try:
result = f(*args)
if result:
self.cli.hub.reply(
self.cli.get_private_key(),
args[2],
{"samp.status": SAMP_STATUS_ERROR, "samp.result": result},
)
except Exception:
err = StringIO()
traceback.print_exc(file=err)
txt = err.getvalue()
self.cli.hub.reply(
self.cli.get_private_key(),
args[2],
{"samp.status": SAMP_STATUS_ERROR, "samp.result": {"txt": txt}},
)
return wrapped_f
class _HubAsClient:
def __init__(self, handler):
self._handler = handler
def __getattr__(self, name):
# magic method dispatcher
return _HubAsClientMethod(self._handler, name)
class _HubAsClientMethod:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _HubAsClientMethod(self.__send, f"{self.__name}.{name}")
def __call__(self, *args):
return self.__send(self.__name, args)
def get_num_args(f):
"""
Find the number of arguments a function or method takes (excluding ``self``).
"""
if inspect.ismethod(f):
return f.__func__.__code__.co_argcount - 1
elif inspect.isfunction(f):
return f.__code__.co_argcount
else:
raise TypeError("f should be a function or a method")
|
5dbc2361dcace124723405b1999c6ca955146d85868b64bc42e3416ad3e6cf99 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import xmlrpc.client as xmlrpc
from .errors import SAMPHubError
from .lockfile_helpers import get_main_running_hub
from .utils import ServerProxyPool
__all__ = ["SAMPHubProxy"]
class SAMPHubProxy:
"""
Proxy class to simplify the client interaction with a SAMP hub (via the
standard profile).
"""
def __init__(self):
self.proxy = None
self._connected = False
@property
def is_connected(self):
"""
Whether the hub proxy is currently connected to a hub.
"""
return self._connected
def connect(self, hub=None, hub_params=None, pool_size=20):
"""
Connect to the current SAMP Hub.
Parameters
----------
hub : `~astropy.samp.SAMPHubServer`, optional
The hub to connect to.
hub_params : dict, optional
Optional dictionary containing the lock-file content of the Hub
with which to connect. This dictionary has the form
``{<token-name>: <token-string>, ...}``.
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
self.lockfile = {}
if hub is not None and hub_params is not None:
raise ValueError("Cannot specify both hub and hub_params")
if hub_params is None:
if hub is not None:
if not hub.is_running:
raise SAMPHubError("Hub is not running")
else:
hub_params = hub.params
else:
hub_params = get_main_running_hub()
try:
url = hub_params["samp.hub.xmlrpc.url"].replace("\\", "")
self.proxy = ServerProxyPool(
pool_size, xmlrpc.ServerProxy, url, allow_none=1
)
self.ping()
self.lockfile = copy.deepcopy(hub_params)
self._connected = True
except xmlrpc.ProtocolError as p:
# 401 Unauthorized
if p.errcode == 401:
raise SAMPHubError(
"Unauthorized access. Basic Authentication required or failed."
)
else:
raise SAMPHubError(f"Protocol Error {p.errcode}: {p.errmsg}")
def disconnect(self):
"""
Disconnect from the current SAMP Hub.
"""
if self.proxy is not None:
self.proxy.shutdown()
self.proxy = None
self._connected = False
self.lockfile = {}
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for other profiles.
"""
return self.proxy.samp.hub
def ping(self):
"""
Proxy to ``ping`` SAMP Hub method (Standard Profile only).
"""
return self._samp_hub.ping()
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
"""
Proxy to ``setXmlrpcCallback`` SAMP Hub method (Standard Profile only).
"""
return self._samp_hub.setXmlrpcCallback(private_key, xmlrpc_addr)
def register(self, secret):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(secret)
def unregister(self, private_key):
"""
Proxy to ``unregister`` SAMP Hub method.
"""
return self._samp_hub.unregister(private_key)
def declare_metadata(self, private_key, metadata):
"""
Proxy to ``declareMetadata`` SAMP Hub method.
"""
return self._samp_hub.declareMetadata(private_key, metadata)
def get_metadata(self, private_key, client_id):
"""
Proxy to ``getMetadata`` SAMP Hub method.
"""
return self._samp_hub.getMetadata(private_key, client_id)
def declare_subscriptions(self, private_key, subscriptions):
"""
Proxy to ``declareSubscriptions`` SAMP Hub method.
"""
return self._samp_hub.declareSubscriptions(private_key, subscriptions)
def get_subscriptions(self, private_key, client_id):
"""
Proxy to ``getSubscriptions`` SAMP Hub method.
"""
return self._samp_hub.getSubscriptions(private_key, client_id)
def get_registered_clients(self, private_key):
"""
Proxy to ``getRegisteredClients`` SAMP Hub method.
"""
return self._samp_hub.getRegisteredClients(private_key)
def get_subscribed_clients(self, private_key, mtype):
"""
Proxy to ``getSubscribedClients`` SAMP Hub method.
"""
return self._samp_hub.getSubscribedClients(private_key, mtype)
def notify(self, private_key, recipient_id, message):
"""
Proxy to ``notify`` SAMP Hub method.
"""
return self._samp_hub.notify(private_key, recipient_id, message)
def notify_all(self, private_key, message):
"""
Proxy to ``notifyAll`` SAMP Hub method.
"""
return self._samp_hub.notifyAll(private_key, message)
def call(self, private_key, recipient_id, msg_tag, message):
"""
Proxy to ``call`` SAMP Hub method.
"""
return self._samp_hub.call(private_key, recipient_id, msg_tag, message)
def call_all(self, private_key, msg_tag, message):
"""
Proxy to ``callAll`` SAMP Hub method.
"""
return self._samp_hub.callAll(private_key, msg_tag, message)
def call_and_wait(self, private_key, recipient_id, message, timeout):
"""
Proxy to ``callAndWait`` SAMP Hub method.
"""
return self._samp_hub.callAndWait(private_key, recipient_id, message, timeout)
def reply(self, private_key, msg_id, response):
"""
Proxy to ``reply`` SAMP Hub method.
"""
return self._samp_hub.reply(private_key, msg_id, response)
|
2be0d27e6df82b3f077cfdaf74950c2a381abd0390770b50f400e4f3486a95bc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
import functools
import inspect
import textwrap
import threading
import types
import warnings
from inspect import signature
from .exceptions import (
AstropyDeprecationWarning,
AstropyPendingDeprecationWarning,
AstropyUserWarning,
)
__all__ = [
"classproperty",
"deprecated",
"deprecated_attribute",
"deprecated_renamed_argument",
"format_doc",
"lazyproperty",
"sharedmethod",
]
_NotFound = object()
def deprecated(
since,
message="",
name="",
alternative="",
pending=False,
obj_type=None,
warning_type=AstropyDeprecationWarning,
):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
``warning_type``.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ""
old_doc = textwrap.dedent(old_doc).strip("\n")
new_doc = f"\n.. deprecated:: {since}\n {message.strip()}\n\n" + old_doc
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r"\ "
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message, warning_type=warning_type):
"""
Returns a wrapped function that displays ``warning_type``
when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = warning_type
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__["__add__"]):
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message, warning_type=warning_type):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(
get_function(cls.__init__), message, warning_type
)
else:
cls.__new__ = deprecate_function(
get_function(cls.__new__), message, warning_type
)
return cls
def deprecate(
obj,
message=message,
name=name,
alternative=alternative,
pending=pending,
warning_type=warning_type,
):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = "class"
elif inspect.isfunction(obj):
obj_type_name = "function"
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = "method"
else:
obj_type_name = "object"
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ""
if not message or type(message) is type(deprecate):
if pending:
message = (
"The {func} {obj_type} will be deprecated in a future version."
)
else:
message = (
"The {func} {obj_type} is deprecated and may "
"be removed in a future version."
)
if alternative:
altmessage = f"\n Use {alternative} instead."
message = (
message.format(
**{
"func": name,
"name": name,
"alternative": alternative,
"obj_type": obj_type_name,
}
)
) + altmessage
if isinstance(obj, type):
return deprecate_class(obj, message, warning_type)
else:
return deprecate_function(obj, message, warning_type)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(
name,
since,
message=None,
alternative=None,
pending=False,
warning_type=AstropyDeprecationWarning,
):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``), or set an alternative explicitly.
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of
``warning_type``.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = deprecated_attribute("old_name", "0.1")
def method(self):
self._old_name = 42
class MyClass2:
old_name = deprecated_attribute(
"old_name", "1.2", alternative="new_name"
)
def method(self):
self.new_name = 24
"""
private_name = alternative or "_" + name
specific_deprecated = deprecated(
since,
name=name,
obj_type="attribute",
message=message,
alternative=alternative,
pending=pending,
warning_type=warning_type,
)
@specific_deprecated
def get(self):
return getattr(self, private_name)
@specific_deprecated
def set(self, val):
setattr(self, private_name, val)
@specific_deprecated
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
def deprecated_renamed_argument(
old_name,
new_name,
since,
arg_in_kwargs=False,
relax=False,
pending=False,
warning_type=AstropyDeprecationWarning,
alternative="",
message="",
):
"""Deprecate a _renamed_ or _removed_ function argument.
The decorator assumes that the argument with the ``old_name`` was removed
from the function signature and the ``new_name`` replaced it at the
**same position** in the signature. If the ``old_name`` argument is
given when calling the decorated function the decorator will catch it and
issue a deprecation warning and pass it on as ``new_name`` argument.
Parameters
----------
old_name : str or sequence of str
The old name of the argument.
new_name : str or sequence of str or None
The new name of the argument. Set this to `None` to remove the
argument ``old_name`` instead of renaming it.
since : str or number or sequence of str or number
The release at which the old argument became deprecated.
arg_in_kwargs : bool or sequence of bool, optional
If the argument is not a named argument (for example it
was meant to be consumed by ``**kwargs``) set this to
``True``. Otherwise the decorator will throw an Exception
if the ``new_name`` cannot be found in the signature of
the decorated function.
Default is ``False``.
relax : bool or sequence of bool, optional
If ``False`` a ``TypeError`` is raised if both ``new_name`` and
``old_name`` are given. If ``True`` the value for ``new_name`` is used
and a Warning is issued.
Default is ``False``.
pending : bool or sequence of bool, optional
If ``True`` this will hide the deprecation warning and ignore the
corresponding ``relax`` parameter value.
Default is ``False``.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object if ``new_name`` is None. The deprecation
warning will tell the user about this alternative if provided.
message : str, optional
A custom warning message. If provided then ``since`` and
``alternative`` options will have no effect.
Raises
------
TypeError
If the new argument name cannot be found in the function
signature and arg_in_kwargs was False or if it is used to
deprecate the name of the ``*args``-, ``**kwargs``-like arguments.
At runtime such an Error is raised if both the new_name
and old_name were specified when calling the function and
"relax=False".
Notes
-----
The decorator should be applied to a function where the **name**
of an argument was changed but it applies the same logic.
.. warning::
If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must
also be a list or tuple with the same number of entries. ``relax`` and
``arg_in_kwarg`` can be a single bool (applied to all) or also a
list/tuple with the same number of entries like ``new_name``, etc.
Examples
--------
The deprecation warnings are not shown in the following examples.
To deprecate a positional or keyword argument::
>>> from astropy.utils.decorators import deprecated_renamed_argument
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0')
... def test(sigma):
... return sigma
>>> test(2)
2
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
To deprecate an argument caught inside the ``**kwargs`` the
``arg_in_kwargs`` has to be set::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0',
... arg_in_kwargs=True)
... def test(**kwargs):
... return kwargs['sigma']
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
By default providing the new and old keyword will lead to an Exception. If
a Warning is desired set the ``relax`` argument::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True)
... def test(sigma):
... return sigma
>>> test(sig=2) # doctest: +SKIP
2
It is also possible to replace multiple arguments. The ``old_name``,
``new_name`` and ``since`` have to be `tuple` or `list` and contain the
same number of entries::
>>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'],
... ['1.0', 1.2])
... def test(alpha, beta):
... return alpha, beta
>>> test(a=2, b=3) # doctest: +SKIP
(2, 3)
In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which
is applied to all renamed arguments) or must also be a `tuple` or `list`
with values for each of the arguments.
"""
cls_iter = (list, tuple)
if isinstance(old_name, cls_iter):
n = len(old_name)
# Assume that new_name and since are correct (tuple/list with the
# appropriate length) in the spirit of the "consenting adults". But the
# optional parameters may not be set, so if these are not iterables
# wrap them.
if not isinstance(arg_in_kwargs, cls_iter):
arg_in_kwargs = [arg_in_kwargs] * n
if not isinstance(relax, cls_iter):
relax = [relax] * n
if not isinstance(pending, cls_iter):
pending = [pending] * n
if not isinstance(message, cls_iter):
message = [message] * n
else:
# To allow a uniform approach later on, wrap all arguments in lists.
n = 1
old_name = [old_name]
new_name = [new_name]
since = [since]
arg_in_kwargs = [arg_in_kwargs]
relax = [relax]
pending = [pending]
message = [message]
def decorator(function):
# The named arguments of the function.
arguments = signature(function).parameters
keys = list(arguments.keys())
position = [None] * n
for i in range(n):
# Determine the position of the argument.
if arg_in_kwargs[i]:
pass
else:
if new_name[i] is None:
param = arguments[old_name[i]]
elif new_name[i] in arguments:
param = arguments[new_name[i]]
# In case the argument is not found in the list of arguments
# the only remaining possibility is that it should be caught
# by some kind of **kwargs argument.
# This case has to be explicitly specified, otherwise throw
# an exception!
else:
raise TypeError(
f'"{new_name[i]}" was not specified in the function '
"signature. If it was meant to be part of "
'"**kwargs" then set "arg_in_kwargs" to "True"'
)
# There are several possibilities now:
# 1.) Positional or keyword argument:
if param.kind == param.POSITIONAL_OR_KEYWORD:
if new_name[i] is None:
position[i] = keys.index(old_name[i])
else:
position[i] = keys.index(new_name[i])
# 2.) Keyword only argument:
elif param.kind == param.KEYWORD_ONLY:
# These cannot be specified by position.
position[i] = None
# 3.) positional-only argument, varargs, varkwargs or some
# unknown type:
else:
raise TypeError(
f'cannot replace argument "{new_name[i]}" '
f"of kind {repr(param.kind)}."
)
@functools.wraps(function)
def wrapper(*args, **kwargs):
for i in range(n):
msg = message[i] or (
f'"{old_name[i]}" was deprecated in '
f"version {since[i]} and will be removed "
"in a future version. "
)
# The only way to have oldkeyword inside the function is
# that it is passed as kwarg because the oldkeyword
# parameter was renamed to newkeyword.
if old_name[i] in kwargs:
value = kwargs.pop(old_name[i])
# Display the deprecation warning only when it's not
# pending.
if not pending[i]:
if not message[i]:
if new_name[i] is not None:
msg += f'Use argument "{new_name[i]}" instead.'
elif alternative:
msg += f"\n Use {alternative} instead."
warnings.warn(msg, warning_type, stacklevel=2)
# Check if the newkeyword was given as well.
newarg_in_args = position[i] is not None and len(args) > position[i]
newarg_in_kwargs = new_name[i] in kwargs
if newarg_in_args or newarg_in_kwargs:
if not pending[i]:
# If both are given print a Warning if relax is
# True or raise an Exception is relax is False.
if relax[i]:
warnings.warn(
f'"{old_name[i]}" and "{new_name[i]}" '
"keywords were set. "
f'Using the value of "{new_name[i]}".',
AstropyUserWarning,
)
else:
raise TypeError(
f'cannot specify both "{old_name[i]}" and '
f'"{new_name[i]}".'
)
else:
# Pass the value of the old argument with the
# name of the new argument to the function
if new_name[i] is not None:
kwargs[new_name[i]] = value
# If old argument has no replacement, cast it back.
# https://github.com/astropy/astropy/issues/9914
else:
kwargs[old_name[i]] = value
# Deprecated keyword without replacement is given as
# positional argument.
elif (
not pending[i]
and not new_name[i]
and position[i]
and len(args) > position[i]
):
if alternative and not message[i]:
msg += f"\n Use {alternative} instead."
warnings.warn(msg, warning_type, stacklevel=2)
return function(*args, **kwargs)
return wrapper
return decorator
# TODO: This can still be made to work for setters by implementing an
# accompanying metaclass that supports it; we just don't need that right this
# second
class classproperty(property):
"""
Similar to `property`, but allows class-level properties. That is,
a property whose getter is like a `classmethod`.
The wrapped method may explicitly use the `classmethod` decorator (which
must become before this decorator), or the `classmethod` may be omitted
(it is implicit through use of this decorator).
.. note::
classproperty only works for *read-only* properties. It does not
currently allow writeable/deletable properties, due to subtleties of how
Python descriptors work. In order to implement such properties on a class
a metaclass for that class must be implemented.
Parameters
----------
fget : callable
The function that computes the value of this property (in particular,
the function when this is used as a decorator) a la `property`.
doc : str, optional
The docstring for the property--by default inherited from the getter
function.
lazy : bool, optional
If True, caches the value returned by the first call to the getter
function, so that it is only called once (used for lazy evaluation
of an attribute). This is analogous to `lazyproperty`. The ``lazy``
argument can also be used when `classproperty` is used as a decorator
(see the third example below). When used in the decorator syntax this
*must* be passed in as a keyword argument.
Examples
--------
::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal + 1
...
>>> Foo.bar
2
>>> foo_instance = Foo()
>>> foo_instance.bar
2
>>> foo_instance._bar_internal = 2
>>> foo_instance.bar # Ignores instance attributes
2
As previously noted, a `classproperty` is limited to implementing
read-only attributes::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal
... @bar.setter
... def bar(cls, value):
... cls._bar_internal = value
...
Traceback (most recent call last):
...
NotImplementedError: classproperty can only be read-only; use a
metaclass to implement modifiable class-level properties
When the ``lazy`` option is used, the getter is only called once::
>>> class Foo:
... @classproperty(lazy=True)
... def bar(cls):
... print("Performing complicated calculation")
... return 1
...
>>> Foo.bar
Performing complicated calculation
1
>>> Foo.bar
1
If a subclass inherits a lazy `classproperty` the property is still
re-evaluated for the subclass::
>>> class FooSub(Foo):
... pass
...
>>> FooSub.bar
Performing complicated calculation
1
>>> FooSub.bar
1
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._lock = threading.RLock() # Protects _cache
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
if self._lazy:
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread initialised before we locked.
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
val = self.fget.__wrapped__(objtype)
self._cache[objtype] = val
else:
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties"
)
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties"
)
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
# Adapted from the recipe at
# http://code.activestate.com/recipes/363602-lazy-property-evaluation
class lazyproperty(property):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest:
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
By default, a setter and deleter are used which simply overwrite and
delete, respectively, the value stored in ``__dict__``. Any user-specified
setter or deleter is executed before executing these default actions.
The one exception is that the default setter is not run if the user setter
already sets the new value in ``__dict__`` and returns that value and the
returned value is not ``None``.
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
self._lock = threading.RLock()
def __get__(self, obj, owner=None):
try:
obj_dict = obj.__dict__
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread beat us to it.
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
val = self.fget(obj)
obj_dict[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
ret = self.fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it
# took over setting the value in obj.__dict__; this
# mechanism allows it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
obj.__dict__.pop(self._key, None) # Delete if present
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example:
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> class Example(metaclass=ExampleMeta):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
def format_doc(docstring, *args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
Examples
--------
Replacing the current docstring is very easy::
>>> from astropy.utils.decorators import format_doc
>>> @format_doc('''Perform num1 + num2''')
... def add(num1, num2):
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform num1 + num2
sometimes instead of replacing you only want to add to it::
>>> doc = '''
... {__doc__}
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... '''
>>> @format_doc(doc)
... def add(num1, num2):
... '''Perform addition.'''
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
in case one might want to format it further::
>>> doc = '''
... Perform {0}.
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... result of num1 {op} num2
... {__doc__}
... '''
>>> @format_doc(doc, 'addition', op='+')
... def add(num1, num2):
... return num1+num2
...
>>> @format_doc(doc, 'subtraction', op='-')
... def subtract(num1, num2):
... '''Notes: This one has additional notes.'''
... return num1-num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
>>> help(subtract) # doctest: +SKIP
Help on function subtract in module __main__:
<BLANKLINE>
subtract(num1, num2)
Perform subtraction.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 - num2
Notes : This one has additional notes.
These methods can be combined; even taking the docstring from another
object is possible as docstring attribute. You just have to specify the
object::
>>> @format_doc(add)
... def another_add(num1, num2):
... return num1 + num2
...
>>> help(another_add) # doctest: +SKIP
Help on function another_add in module __main__:
<BLANKLINE>
another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
But be aware that this decorator *only* formats the given docstring not
the strings passed as ``args`` or ``kwargs`` (not even the original
docstring)::
>>> @format_doc(doc, 'addition', op='+')
... def yet_another_add(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(yet_another_add) # doctest: +SKIP
Help on function yet_another_add in module __main__:
<BLANKLINE>
yet_another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
This one is good for {0}.
To work around it you could specify the docstring to be ``None``::
>>> @format_doc(None, 'addition')
... def last_add_i_swear(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(last_add_i_swear) # doctest: +SKIP
Help on function last_add_i_swear in module __main__:
<BLANKLINE>
last_add_i_swear(num1, num2)
This one is good for addition.
Using it with ``None`` as docstring allows to use the decorator twice
on an object to first parse the new docstring and then to parse the
original docstring or the ``args`` and ``kwargs``.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError(
"docstring must be a string or containing a "
"docstring that is not empty."
)
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs["__doc__"] = obj.__doc__ or ""
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
34d51c0d53269a55a5b3d416ce6f631765f98b22acb4529b0ce8ced44a7cb0e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for generating new Python code at runtime."""
import inspect
import itertools
import keyword
import os
import re
import textwrap
from .introspection import find_current_module
__all__ = ["make_function_with_signature"]
_ARGNAME_RE = re.compile(r"^[A-Za-z][A-Za-z_]*")
"""
Regular expression used my make_func which limits the allowed argument
names for the created function. Only valid Python variable names in
the ASCII range and not beginning with '_' are allowed, currently.
"""
def make_function_with_signature(
func, args=(), kwargs={}, varargs=None, varkwargs=None, name=None
):
"""
Make a new function from an existing function but with the desired
signature.
The desired signature must of course be compatible with the arguments
actually accepted by the input function.
The ``args`` are strings that should be the names of the positional
arguments. ``kwargs`` can map names of keyword arguments to their
default values. It may be either a ``dict`` or a list of ``(keyword,
default)`` tuples.
If ``varargs`` is a string it is added to the positional arguments as
``*<varargs>``. Likewise ``varkwargs`` can be the name for a variable
keyword argument placeholder like ``**<varkwargs>``.
If not specified the name of the new function is taken from the original
function. Otherwise, the ``name`` argument can be used to specify a new
name.
Note, the names may only be valid Python variable names.
"""
pos_args = []
key_args = []
if isinstance(kwargs, dict):
iter_kwargs = kwargs.items()
else:
iter_kwargs = iter(kwargs)
# Check that all the argument names are valid
for item in itertools.chain(args, iter_kwargs):
if isinstance(item, tuple):
argname = item[0]
key_args.append(item)
else:
argname = item
pos_args.append(item)
if keyword.iskeyword(argname) or not _ARGNAME_RE.match(argname):
raise SyntaxError(f"invalid argument name: {argname}")
for item in (varargs, varkwargs):
if item is not None:
if keyword.iskeyword(item) or not _ARGNAME_RE.match(item):
raise SyntaxError(f"invalid argument name: {item}")
def_signature = [", ".join(pos_args)]
if varargs:
def_signature.append(f", *{varargs}")
call_signature = def_signature[:]
if name is None:
name = func.__name__
global_vars = {f"__{name}__func": func}
local_vars = {}
# Make local variables to handle setting the default args
for idx, item in enumerate(key_args):
key, value = item
default_var = f"_kwargs{idx}"
local_vars[default_var] = value
def_signature.append(f", {key}={default_var}")
call_signature.append(", {0}={0}".format(key))
if varkwargs:
def_signature.append(f", **{varkwargs}")
call_signature.append(f", **{varkwargs}")
def_signature = "".join(def_signature).lstrip(", ")
call_signature = "".join(call_signature).lstrip(", ")
mod = find_current_module(2)
frm = inspect.currentframe().f_back
if mod:
filename = mod.__file__
modname = mod.__name__
if filename.endswith(".pyc"):
filename = os.path.splitext(filename)[0] + ".py"
else:
filename = "<string>"
modname = "__main__"
# Subtract 2 from the line number since the length of the template itself
# is two lines. Therefore we have to subtract those off in order for the
# pointer in tracebacks from __{name}__func to point to the right spot.
lineno = frm.f_lineno - 2
# The lstrip is in case there were *no* positional arguments (a rare case)
# in any context this will actually be used...
template = textwrap.dedent(
"""{0}\
def {name}({sig1}):
return __{name}__func({sig2})
""".format(
"\n" * lineno, name=name, sig1=def_signature, sig2=call_signature
)
)
code = compile(template, filename, "single")
eval(code, global_vars, local_vars)
new_func = local_vars[name]
new_func.__module__ = modname
new_func.__doc__ = func.__doc__
return new_func
|
02537eb8495a99f55fd2c407e105d425bf3767e37a7bb2ee543892f23b4b28e8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for console input and output.
"""
import codecs
import locale
import math
import multiprocessing
import os
import re
import struct
import sys
import threading
import time
# concurrent.futures imports moved inside functions using them to avoid
# import failure when running in pyodide/Emscripten
try:
import fcntl
import signal
import termios
_CAN_RESIZE_TERMINAL = True
except ImportError:
_CAN_RESIZE_TERMINAL = False
from astropy import conf
from .decorators import classproperty
from .misc import isiterable
__all__ = [
"isatty",
"color_print",
"human_time",
"human_file_size",
"ProgressBar",
"Spinner",
"print_code_line",
"ProgressBarOrSpinner",
"terminal_size",
]
_DEFAULT_ENCODING = "utf-8"
class _IPython:
"""Singleton class given access to IPython streams, etc."""
@classproperty
def get_ipython(cls):
try:
from IPython import get_ipython
except ImportError:
pass
return get_ipython
@classproperty
def OutStream(cls):
if not hasattr(cls, "_OutStream"):
cls._OutStream = None
try:
cls.get_ipython()
except NameError:
return None
try:
from ipykernel.iostream import OutStream
except ImportError:
try:
from IPython.zmq.iostream import OutStream
except ImportError:
from IPython import version_info
if version_info[0] >= 4:
return None
try:
from IPython.kernel.zmq.iostream import OutStream
except ImportError:
return None
cls._OutStream = OutStream
return cls._OutStream
@classproperty
def ipyio(cls):
if not hasattr(cls, "_ipyio"):
try:
from IPython.utils import io
except ImportError:
cls._ipyio = None
else:
cls._ipyio = io
return cls._ipyio
@classmethod
def get_stream(cls, stream):
return getattr(cls.ipyio, stream)
def _get_stdout(stderr=False):
"""
This utility function contains the logic to determine what streams to use
by default for standard out/err.
Typically this will just return `sys.stdout`, but it contains additional
logic for use in IPython on Windows to determine the correct stream to use
(usually ``IPython.util.io.stdout`` but only if sys.stdout is a TTY).
"""
if stderr:
stream = "stderr"
else:
stream = "stdout"
sys_stream = getattr(sys, stream)
return sys_stream
def isatty(file):
"""
Returns `True` if ``file`` is a tty.
Most built-in Python file-like objects have an `isatty` member,
but some user-defined types may not, so this assumes those are not
ttys.
"""
if (
multiprocessing.current_process().name != "MainProcess"
or threading.current_thread().name != "MainThread"
):
return False
if hasattr(file, "isatty"):
return file.isatty()
if _IPython.OutStream is None or (not isinstance(file, _IPython.OutStream)):
return False
# File is an IPython OutStream. Check whether:
# - File name is 'stdout'; or
# - File wraps a Console
if getattr(file, "name", None) == "stdout":
return True
if hasattr(file, "stream"):
# FIXME: pyreadline has no had new release since 2015, drop it when
# IPython minversion is 5.x.
# On Windows, in IPython 2 the standard I/O streams will wrap
# pyreadline.Console objects if pyreadline is available; this should
# be considered a TTY.
try:
from pyreadline.console import Console as PyreadlineConsole
except ImportError:
return False
return isinstance(file.stream, PyreadlineConsole)
return False
def terminal_size(file=None):
"""
Returns a tuple (height, width) containing the height and width of
the terminal.
This function will look for the width in height in multiple areas
before falling back on the width and height in astropy's
configuration.
"""
if file is None:
file = _get_stdout()
try:
s = struct.pack("HHHH", 0, 0, 0, 0)
x = fcntl.ioctl(file, termios.TIOCGWINSZ, s)
(lines, width, xpixels, ypixels) = struct.unpack("HHHH", x)
if lines > 12:
lines -= 6
if width > 10:
width -= 1
if lines <= 0 or width <= 0:
raise Exception("unable to get terminal size")
return (lines, width)
except Exception:
try:
# see if POSIX standard variables will work
return (int(os.environ.get("LINES")), int(os.environ.get("COLUMNS")))
except TypeError:
# fall back on configuration variables, or if not
# set, (25, 80)
lines = conf.max_lines
width = conf.max_width
if lines is None:
lines = 25
if width is None:
width = 80
return lines, width
def _color_text(text, color):
"""Returns a string wrapped in ANSI color codes for coloring the text in a terminal.
::
colored_text = color_text('Here is a message', 'blue')
This won't actually effect the text until it is printed to the
terminal.
Parameters
----------
text : str
The string to return, bounded by the color codes.
color : str
An ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
"""
color_mapping = {
"black": "0;30",
"red": "0;31",
"green": "0;32",
"brown": "0;33",
"blue": "0;34",
"magenta": "0;35",
"cyan": "0;36",
"lightgrey": "0;37",
"default": "0;39",
"darkgrey": "1;30",
"lightred": "1;31",
"lightgreen": "1;32",
"yellow": "1;33",
"lightblue": "1;34",
"lightmagenta": "1;35",
"lightcyan": "1;36",
"white": "1;37",
}
if sys.platform == "win32" and _IPython.OutStream is None:
# On Windows do not colorize text unless in IPython
return text
color_code = color_mapping.get(color, "0;39")
return f"\033[{color_code}m{text}\033[0m"
def _decode_preferred_encoding(s):
"""Decode the supplied byte string using the preferred encoding
for the locale (`locale.getpreferredencoding`) or, if the default encoding
is invalid, fall back first on utf-8, then on latin-1 if the message cannot
be decoded with utf-8.
"""
enc = locale.getpreferredencoding()
try:
try:
return s.decode(enc)
except LookupError:
enc = _DEFAULT_ENCODING
return s.decode(enc)
except UnicodeDecodeError:
return s.decode("latin-1")
def _write_with_fallback(s, write, fileobj):
"""Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'.
"""
try:
write(s)
return write
except UnicodeEncodeError:
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter(_DEFAULT_ENCODING)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter("latin-1")
f = Writer(fileobj)
write = f.write
# If this doesn't work let the exception bubble up; I'm out of ideas
write(s)
return write
def color_print(*args, end="\n", **kwargs):
"""
Prints colors and styles to the terminal uses ANSI escape
sequences.
::
color_print('This is the color ', 'default', 'GREEN', 'green')
Parameters
----------
positional args : str
The positional arguments come in pairs (*msg*, *color*), where
*msg* is the string to display and *color* is the color to
display it in.
*color* is an ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
file : writable file-like, optional
Where to write to. Defaults to `sys.stdout`. If file is not
a tty (as determined by calling its `isatty` member, if one
exists), no coloring will be included.
end : str, optional
The ending of the message. Defaults to ``\\n``. The end will
be printed after resetting any color or font state.
"""
file = kwargs.get("file", _get_stdout())
write = file.write
if isatty(file) and conf.use_color:
for i in range(0, len(args), 2):
msg = args[i]
if i + 1 == len(args):
color = ""
else:
color = args[i + 1]
if color:
msg = _color_text(msg, color)
# Some file objects support writing unicode sensibly on some Python
# versions; if this fails try creating a writer using the locale's
# preferred encoding. If that fails too give up.
write = _write_with_fallback(msg, write, file)
write(end)
else:
for i in range(0, len(args), 2):
msg = args[i]
write(msg)
write(end)
def strip_ansi_codes(s):
"""
Remove ANSI color codes from the string.
"""
return re.sub("\033\\[([0-9]+)(;[0-9]+)*m", "", s)
def human_time(seconds):
"""
Returns a human-friendly time string that is always exactly 6
characters long.
Depending on the number of seconds given, can be one of::
1w 3d
2d 4h
1h 5m
1m 4s
15s
Will be in color if console coloring is turned on.
Parameters
----------
seconds : int
The number of seconds to represent
Returns
-------
time : str
A human-friendly representation of the given number of seconds
that is always exactly 6 characters.
"""
units = [
("y", 60 * 60 * 24 * 7 * 52),
("w", 60 * 60 * 24 * 7),
("d", 60 * 60 * 24),
("h", 60 * 60),
("m", 60),
("s", 1),
]
seconds = int(seconds)
if seconds < 60:
return f" {seconds:2d}s"
for i in range(len(units) - 1):
unit1, limit1 = units[i]
unit2, limit2 = units[i + 1]
if seconds >= limit1:
return "{:2d}{}{:2d}{}".format(
seconds // limit1, unit1, (seconds % limit1) // limit2, unit2
)
return " ~inf"
def human_file_size(size):
"""
Returns a human-friendly string representing a file size
that is 2-4 characters long.
For example, depending on the number of bytes given, can be one
of::
256b
64k
1.1G
Parameters
----------
size : int
The size of the file (in bytes)
Returns
-------
size : str
A human-friendly representation of the size of the file
"""
if hasattr(size, "unit"):
# Import units only if necessary because the import takes a
# significant time [#4649]
from astropy import units as u
size = u.Quantity(size, u.byte).value
suffixes = " kMGTPEZY"
if size == 0:
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
if num_scale > 7:
suffix = "?"
else:
suffix = suffixes[num_scale]
num_scale = int(math.pow(1000, num_scale))
value = size / num_scale
str_value = str(value)
if suffix == " ":
str_value = str_value[: str_value.index(".")]
elif str_value[2] == ".":
str_value = str_value[:2]
else:
str_value = str_value[:3]
return f"{str_value:>3s}{suffix}"
class _mapfunc:
"""
A function wrapper to support ProgressBar.map().
"""
def __init__(self, func):
self._func = func
def __call__(self, i_arg):
i, arg = i_arg
return i, self._func(arg)
class ProgressBar:
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the ``with`` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, ipython_widget=False, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the progress bar will be
completely silent.
"""
if file is None:
file = _get_stdout()
if not ipython_widget and not isatty(file):
self.update = self._silent_update
self._silent = True
else:
self._silent = False
if isiterable(total_or_items):
self._items = iter(total_or_items)
self._total = len(total_or_items)
else:
try:
self._total = int(total_or_items)
except TypeError:
raise TypeError("First argument must be int or sequence")
else:
self._items = iter(range(self._total))
self._file = file
self._start_time = time.time()
self._human_total = human_file_size(self._total)
self._ipython_widget = ipython_widget
self._signal_set = False
if not ipython_widget:
self._should_handle_resize = _CAN_RESIZE_TERMINAL and self._file.isatty()
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
self.update(0)
def _handle_resize(self, signum=None, frame=None):
terminal_width = terminal_size(self._file)[1]
self._bar_length = terminal_width - 37
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write("\n")
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def __next__(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update progress bar via the console or notebook accordingly.
"""
# Update self.value
if value is None:
value = self._current_value + 1
self._current_value = value
# Choose the appropriate environment
if self._ipython_widget:
self._update_ipython_widget(value)
else:
self._update_console(value)
def _update_console(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write("\r|")
color_print("=" * bar_fill, "blue", file=file, end="")
if bar_fill < self._bar_length:
color_print(">", "green", file=file, end="")
write("-" * (self._bar_length - bar_fill - 1))
write("|")
if value >= self._total:
t = time.time() - self._start_time
prefix = " "
elif value <= 0:
t = None
prefix = ""
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix = " ETA "
write(f" {human_file_size(value):>4s}/{self._human_total:>4s}")
write(f" ({frac:>6.2%})")
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush()
def _update_ipython_widget(self, value=None):
"""
Update the progress bar to the given value (out of a total
given to the constructor).
This method is for use in the IPython notebook 2+.
"""
# Create and display an empty progress bar widget,
# if none exists.
if not hasattr(self, "_widget"):
# Import only if an IPython widget, i.e., widget in iPython NB
from IPython import version_info
if version_info[0] < 4:
from IPython.html import widgets
self._widget = widgets.FloatProgressWidget()
else:
_IPython.get_ipython()
from ipywidgets import widgets
self._widget = widgets.FloatProgress()
from IPython.display import display
display(self._widget)
self._widget.value = 0
# Calculate percent completion, and update progress bar
frac = value / self._total
self._widget.value = frac * 100
self._widget.description = f" ({frac:>6.2%})"
def _silent_update(self, value=None):
pass
@classmethod
def map(
cls,
function,
items,
multiprocess=False,
file=None,
step=100,
ipython_widget=False,
multiprocessing_start_method=None,
):
"""Map function over items while displaying a progress bar with percentage complete.
The map operation may run in arbitrary order on the items, but the results are
returned in sequential order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
if multiprocess:
function = _mapfunc(function)
items = list(enumerate(items))
results = cls.map_unordered(
function,
items,
multiprocess=multiprocess,
file=file,
step=step,
ipython_widget=ipython_widget,
multiprocessing_start_method=multiprocessing_start_method,
)
if multiprocess:
_, results = zip(*sorted(results))
results = list(results)
return results
@classmethod
def map_unordered(
cls,
function,
items,
multiprocess=False,
file=None,
step=100,
ipython_widget=False,
multiprocessing_start_method=None,
):
"""Map function over items, reporting the progress.
Does a `map` operation while displaying a progress bar with
percentage complete. The map operation may run on arbitrary order
on the items, and the results may be returned in arbitrary order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
# concurrent.futures import here to avoid import failure when running
# in pyodide/Emscripten
from concurrent.futures import ProcessPoolExecutor, as_completed
results = []
if file is None:
file = _get_stdout()
with cls(len(items), ipython_widget=ipython_widget, file=file) as bar:
if bar._ipython_widget:
chunksize = step
else:
default_step = max(int(float(len(items)) / bar._bar_length), 1)
chunksize = min(default_step, step)
if not multiprocess or multiprocess < 1:
for i, item in enumerate(items):
results.append(function(item))
if (i % chunksize) == 0:
bar.update(i)
else:
ctx = multiprocessing.get_context(multiprocessing_start_method)
kwargs = dict(mp_context=ctx)
with ProcessPoolExecutor(
max_workers=(
int(multiprocess) if multiprocess is not True else None
),
**kwargs,
) as p:
for i, f in enumerate(
as_completed(p.submit(function, item) for item in items)
):
bar.update(i)
results.append(f.result())
return results
class Spinner:
"""
A class to display a spinner in the terminal.
It is designed to be used with the ``with`` statement::
with Spinner("Reticulating splines", "green") as s:
for item in enumerate(items):
s.update()
"""
_default_unicode_chars = "◓◑◒◐"
_default_ascii_chars = "-/|\\"
def __init__(self, msg, color="default", file=None, step=1, chars=None):
"""
Parameters
----------
msg : str
The message to print
color : str, optional
An ANSI terminal color name. Must be one of: black, red,
green, brown, blue, magenta, cyan, lightgrey, default,
darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white.
file : writable file-like, optional
The file to write the spinner to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the spinner will be
completely silent.
step : int, optional
Only update the spinner every *step* steps
chars : str, optional
The character sequence to use for the spinner
"""
if file is None:
file = _get_stdout()
self._msg = msg
self._color = color
self._file = file
self._step = step
if chars is None:
if conf.unicode_output:
chars = self._default_unicode_chars
else:
chars = self._default_ascii_chars
self._chars = chars
self._silent = not isatty(file)
if self._silent:
self._iter = self._silent_iterator()
else:
self._iter = self._iterator()
def _iterator(self):
chars = self._chars
index = 0
file = self._file
write = file.write
flush = file.flush
try_fallback = True
while True:
write("\r")
color_print(self._msg, self._color, file=file, end="")
write(" ")
try:
if try_fallback:
write = _write_with_fallback(chars[index], write, file)
else:
write(chars[index])
except UnicodeError:
# If even _write_with_fallback failed for any reason just give
# up on trying to use the unicode characters
chars = self._default_ascii_chars
write(chars[index])
try_fallback = False # No good will come of using this again
flush()
yield
for i in range(self._step):
yield
index = (index + 1) % len(chars)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
file = self._file
write = file.write
flush = file.flush
if not self._silent:
write("\r")
color_print(self._msg, self._color, file=file, end="")
if exc_type is None:
color_print(" [Done]", "green", file=file)
else:
color_print(" [Failed]", "red", file=file)
flush()
def __iter__(self):
return self
def __next__(self):
next(self._iter)
def update(self, value=None):
"""Update the spin wheel in the terminal.
Parameters
----------
value : int, optional
Ignored (present just for compatibility with `ProgressBar.update`).
"""
next(self)
def _silent_iterator(self):
color_print(self._msg, self._color, file=self._file, end="")
self._file.flush()
while True:
yield
class ProgressBarOrSpinner:
"""
A class that displays either a `ProgressBar` or `Spinner`
depending on whether the total size of the operation is
known or not.
It is designed to be used with the ``with`` statement::
if file.has_length():
length = file.get_length()
else:
length = None
bytes_read = 0
with ProgressBarOrSpinner(length) as bar:
while file.read(blocksize):
bytes_read += blocksize
bar.update(bytes_read)
"""
def __init__(self, total, msg, color="default", file=None):
"""
Parameters
----------
total : int or None
If an int, the number of increments in the process being
tracked and a `ProgressBar` is displayed. If `None`, a
`Spinner` is displayed.
msg : str
The message to display above the `ProgressBar` or
alongside the `Spinner`.
color : str, optional
The color of ``msg``, if any. Must be an ANSI terminal
color name. Must be one of: black, red, green, brown,
blue, magenta, cyan, lightgrey, default, darkgrey,
lightred, lightgreen, yellow, lightblue, lightmagenta,
lightcyan, white.
file : writable file-like, optional
The file to write the to. Defaults to `sys.stdout`. If
``file`` is not a tty (as determined by calling its `isatty`
member, if any), only ``msg`` will be displayed: the
`ProgressBar` or `Spinner` will be silent.
"""
if file is None:
file = _get_stdout()
if total is None or not isatty(file):
self._is_spinner = True
self._obj = Spinner(msg, color=color, file=file)
else:
self._is_spinner = False
color_print(msg, color, file=file)
self._obj = ProgressBar(total, file=file)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._obj.__exit__(exc_type, exc_value, traceback)
def update(self, value):
"""
Update the progress bar to the given value (out of the total
given to the constructor.
"""
self._obj.update(value)
def print_code_line(line, col=None, file=None, tabwidth=8, width=70):
"""
Prints a line of source code, highlighting a particular character
position in the line. Useful for displaying the context of error
messages.
If the line is more than ``width`` characters, the line is truncated
accordingly and '…' characters are inserted at the front and/or
end.
It looks like this::
there_is_a_syntax_error_here :
^
Parameters
----------
line : unicode
The line of code to display
col : int, optional
The character in the line to highlight. ``col`` must be less
than ``len(line)``.
file : writable file-like, optional
Where to write to. Defaults to `sys.stdout`.
tabwidth : int, optional
The number of spaces per tab (``'\\t'``) character. Default
is 8. All tabs will be converted to spaces to ensure that the
caret lines up with the correct column.
width : int, optional
The width of the display, beyond which the line will be
truncated. Defaults to 70 (this matches the default in the
standard library's `textwrap` module).
"""
if file is None:
file = _get_stdout()
if conf.unicode_output:
ellipsis = "…"
else:
ellipsis = "..."
write = file.write
if col is not None:
if col >= len(line):
raise ValueError("col must be less the the line length.")
ntabs = line[:col].count("\t")
col += ntabs * (tabwidth - 1)
line = line.rstrip("\n")
line = line.replace("\t", " " * tabwidth)
if col is not None and col > width:
new_col = min(width // 2, len(line) - col)
offset = col - new_col
line = line[offset + len(ellipsis) :]
width -= len(ellipsis)
new_col = col
col -= offset
color_print(ellipsis, "darkgrey", file=file, end="")
if len(line) > width:
write(line[: width - len(ellipsis)])
color_print(ellipsis, "darkgrey", file=file)
else:
write(line)
write("\n")
if col is not None:
write(" " * col)
color_print("^", "red", file=file)
# The following four Getch* classes implement unbuffered character reading from
# stdin on Windows, linux, MacOSX. This is taken directly from ActiveState
# Code Recipes:
# http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/
#
class Getch:
"""Get a single character from standard input without screen echo.
Returns
-------
char : str (one character)
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except (ImportError, AttributeError):
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import sys # noqa: F401
# import termios now or else you'll get the Unix
# version on the Mac
import termios # noqa: F401
import tty # noqa: F401
def __call__(self):
import sys
import termios
import tty
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt # noqa: F401
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt # see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ""
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
|
4fd9ad2203ab518312f9e763b4404007f57c27e35d231e1dc0a369bbceffd93f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions related to Python runtime introspection."""
import collections
import importlib
import inspect
import os
import sys
import types
from importlib import metadata
from packaging.version import Version
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ["resolve_name", "minversion", "find_current_module", "isinstancemethod"]
__doctest_skip__ = ["find_current_module"]
if sys.version_info[:2] >= (3, 10):
from importlib.metadata import packages_distributions
else:
def packages_distributions():
"""
Return a mapping of top-level packages to their distributions.
Note: copied from https://github.com/python/importlib_metadata/pull/287.
"""
pkg_to_dist = collections.defaultdict(list)
for dist in metadata.distributions():
for pkg in (dist.read_text("top_level.txt") or "").split():
pkg_to_dist[pkg].append(dist.metadata["Name"])
return dict(pkg_to_dist)
def resolve_name(name, *additional_parts):
"""Resolve a name like ``module.object`` to an object and return it.
This ends up working like ``from module import object`` but is easier
to deal with than the `__import__` builtin and supports digging into
submodules.
Parameters
----------
name : `str`
A dotted path to a Python object--that is, the name of a function,
class, or other object in a module with the full path to that module,
including parent modules, separated by dots. Also known as the fully
qualified name of the object.
additional_parts : iterable, optional
If more than one positional arguments are given, those arguments are
automatically dotted together with ``name``.
Examples
--------
>>> resolve_name('astropy.utils.introspection.resolve_name')
<function resolve_name at 0x...>
>>> resolve_name('astropy', 'utils', 'introspection', 'resolve_name')
<function resolve_name at 0x...>
Raises
------
`ImportError`
If the module or named object is not found.
"""
additional_parts = ".".join(additional_parts)
if additional_parts:
name = name + "." + additional_parts
parts = name.split(".")
if len(parts) == 1:
# No dots in the name--just a straight up module import
cursor = 1
fromlist = []
else:
cursor = len(parts) - 1
fromlist = [parts[-1]]
module_name = parts[:cursor]
while cursor > 0:
try:
ret = __import__(".".join(module_name), fromlist=fromlist)
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
fromlist = [parts[cursor]]
ret = ""
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
@deprecated_renamed_argument("version_path", None, "5.0")
def minversion(module, version, inclusive=True, version_path="__version__"):
"""
Returns `True` if the specified Python module satisfies a minimum version
requirement, and `False` if not.
.. deprecated::
``version_path`` is not used anymore and is deprecated in
``astropy`` 5.0.
Parameters
----------
module : module or `str`
An imported module of which to check the version, or the name of
that module (in which case an import of that module is attempted--
if this fails `False` is returned).
version : `str`
The version as a string that this module must have at a minimum (e.g.
``'0.12'``).
inclusive : `bool`
The specified version meets the requirement inclusively (i.e. ``>=``)
as opposed to strictly greater than (default: `True`).
Examples
--------
>>> import astropy
>>> minversion(astropy, '0.4.4')
True
"""
if isinstance(module, types.ModuleType):
module_name = module.__name__
module_version = getattr(module, "__version__", None)
elif isinstance(module, str):
module_name = module
module_version = None
try:
module = resolve_name(module_name)
except ImportError:
return False
else:
raise ValueError(
"module argument must be an actual imported "
"module, or the import name of the module; "
f"got {repr(module)}"
)
if module_version is None:
try:
module_version = metadata.version(module_name)
except metadata.PackageNotFoundError:
# Maybe the distribution name is different from package name.
# Calling packages_distributions is costly so we do it only
# if necessary, as only a few packages don't have the same
# distribution name.
dist_names = packages_distributions()
module_version = metadata.version(dist_names[module_name][0])
if inclusive:
return Version(module_version) >= Version(version)
else:
return Version(module_version) > Version(version)
def find_current_module(depth=1, finddiff=False):
"""
Determines the module/package from which this function is called.
This function has two modes, determined by the ``finddiff`` option. it
will either simply go the requested number of frames up the call
stack (if ``finddiff`` is False), or it will go up the call stack until
it reaches a module that is *not* in a specified set.
Parameters
----------
depth : int
Specifies how far back to go in the call stack (0-indexed, so that
passing in 0 gives back `astropy.utils.misc`).
finddiff : bool or list
If False, the returned ``mod`` will just be ``depth`` frames up from
the current frame. Otherwise, the function will start at a frame
``depth`` up from current, and continue up the call stack to the
first module that is *different* from those in the provided list.
In this case, ``finddiff`` can be a list of modules or modules
names. Alternatively, it can be True, which will use the module
``depth`` call stack frames up as the module the returned module
most be different from.
Returns
-------
mod : module or None
The module object or None if the package cannot be found. The name of
the module is available as the ``__name__`` attribute of the returned
object (if it isn't None).
Raises
------
ValueError
If ``finddiff`` is a list with an invalid entry.
Examples
--------
The examples below assume that there are two modules in a package named
``pkg``. ``mod1.py``::
def find1():
from astropy.utils import find_current_module
print find_current_module(1).__name__
def find2():
from astropy.utils import find_current_module
cmod = find_current_module(2)
if cmod is None:
print 'None'
else:
print cmod.__name__
def find_diff():
from astropy.utils import find_current_module
print find_current_module(0,True).__name__
``mod2.py``::
def find():
from .mod1 import find2
find2()
With these modules in place, the following occurs::
>>> from pkg import mod1, mod2
>>> from astropy.utils import find_current_module
>>> mod1.find1()
pkg.mod1
>>> mod1.find2()
None
>>> mod2.find()
pkg.mod2
>>> find_current_module(0)
<module 'astropy.utils.misc' from 'astropy/utils/misc.py'>
>>> mod1.find_diff()
pkg.mod1
"""
frm = inspect.currentframe()
for i in range(depth):
frm = frm.f_back
if frm is None:
return None
if finddiff:
currmod = _get_module_from_frame(frm)
if finddiff is True:
diffmods = [currmod]
else:
diffmods = []
for fd in finddiff:
if inspect.ismodule(fd):
diffmods.append(fd)
elif isinstance(fd, str):
diffmods.append(importlib.import_module(fd))
elif fd is True:
diffmods.append(currmod)
else:
raise ValueError("invalid entry in finddiff")
while frm:
frmb = frm.f_back
modb = _get_module_from_frame(frmb)
if modb not in diffmods:
return modb
frm = frmb
else:
return _get_module_from_frame(frm)
def _get_module_from_frame(frm):
"""Uses inspect.getmodule() to get the module that the current frame's
code is running in.
However, this does not work reliably for code imported from a zip file,
so this provides a fallback mechanism for that case which is less
reliable in general, but more reliable than inspect.getmodule() for this
particular case.
"""
mod = inspect.getmodule(frm)
if mod is not None:
return mod
# Check to see if we're importing from a bundle file. First ensure that
# __file__ is available in globals; this is cheap to check to bail out
# immediately if this fails
if "__file__" in frm.f_globals and "__name__" in frm.f_globals:
filename = frm.f_globals["__file__"]
# Using __file__ from the frame's globals and getting it into the form
# of an absolute path name with .py at the end works pretty well for
# looking up the module using the same means as inspect.getmodule
if filename[-4:].lower() in (".pyc", ".pyo"):
filename = filename[:-4] + ".py"
filename = os.path.realpath(os.path.abspath(filename))
if filename in inspect.modulesbyfile:
return sys.modules.get(inspect.modulesbyfile[filename])
# On Windows, inspect.modulesbyfile appears to have filenames stored
# in lowercase, so we check for this case too.
if filename.lower() in inspect.modulesbyfile:
return sys.modules.get(inspect.modulesbyfile[filename.lower()])
# Otherwise there are still some even trickier things that might be possible
# to track down the module, but we'll leave those out unless we find a case
# where it's really necessary. So return None if the module is not found.
return None
def find_mod_objs(modname, onlylocals=False):
"""Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
``modname``, nor does it include private attributes (those that
begin with '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool or list of str
If `True`, only attributes that are either members of ``modname`` OR
one of its modules or subpackages will be included. If it is a list
of strings, those specify the possible packages that will be
considered "local".
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module ``modname`` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.introspection.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for functions or
classes it can be different if they are actually defined elsewhere and
just referenced in ``modname``.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
mod = resolve_name(modname)
if hasattr(mod, "__all__"):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != "_"]
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, "__module__") and hasattr(obj, "__name__"):
fqnames.append(obj.__module__ + "." + obj.__name__)
else:
fqnames.append(modname + "." + lnm)
if onlylocals:
if onlylocals is True:
onlylocals = [modname]
valids = [any(fqn.startswith(nm) for nm in onlylocals) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
# Note: I would have preferred call this is_instancemethod, but this naming is
# for consistency with other functions in the `inspect` module
def isinstancemethod(cls, obj):
"""
Returns `True` if the given object is an instance method of the class
it is defined on (as opposed to a `staticmethod` or a `classmethod`).
This requires both the class the object is a member of as well as the
object itself in order to make this determination.
Parameters
----------
cls : `type`
The class on which this method was defined.
obj : `object`
A member of the provided class (the membership is not checked directly,
but this function will always return `False` if the given object is not
a member of the given class).
Examples
--------
>>> class MetaClass(type):
... def a_classmethod(cls): pass
...
>>> class MyClass(metaclass=MetaClass):
... def an_instancemethod(self): pass
...
... @classmethod
... def another_classmethod(cls): pass
...
... @staticmethod
... def a_staticmethod(): pass
...
>>> isinstancemethod(MyClass, MyClass.a_classmethod)
False
>>> isinstancemethod(MyClass, MyClass.another_classmethod)
False
>>> isinstancemethod(MyClass, MyClass.a_staticmethod)
False
>>> isinstancemethod(MyClass, MyClass.an_instancemethod)
True
"""
return _isinstancemethod(cls, obj)
def _isinstancemethod(cls, obj):
if not isinstance(obj, types.FunctionType):
return False
# Unfortunately it seems the easiest way to get to the original
# staticmethod object is to look in the class's __dict__, though we
# also need to look up the MRO in case the method is not in the given
# class's dict
name = obj.__name__
for basecls in cls.mro(): # This includes cls
if name in basecls.__dict__:
return not isinstance(basecls.__dict__[name], staticmethod)
# This shouldn't happen, though this is the most sensible response if
# it does.
raise AttributeError(name)
|
466b71c69ab17f94e293d895cdd77bb3df0cd40491adb1c40d52d71fd9053942 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains helper functions and classes for handling metadata.
"""
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from copy import deepcopy
from functools import wraps
import numpy as np
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import dtype_bytes_or_chars
__all__ = [
"MergeConflictError",
"MergeConflictWarning",
"MERGE_STRATEGIES",
"common_dtype",
"MergePlus",
"MergeNpConcatenate",
"MergeStrategy",
"MergeStrategyMeta",
"enable_merge_strategies",
"merge",
"MetaData",
"MetaAttribute",
]
class MergeConflictError(TypeError):
pass
class MergeConflictWarning(AstropyWarning):
pass
MERGE_STRATEGIES = []
def common_dtype(arrs):
"""
Use numpy to find the common dtype for a list of ndarrays.
Only allow arrays within the following fundamental numpy data types:
``np.bool_``, ``np.object_``, ``np.number``, ``np.character``, ``np.void``
Parameters
----------
arrs : list of ndarray
Arrays for which to find the common dtype
Returns
-------
dtype_str : str
String representation of dytpe (dtype ``str`` attribute)
"""
def dtype(arr):
return getattr(arr, "dtype", np.dtype("O"))
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = {
tuple(issubclass(dtype(arr).type, np_type) for np_type in np_types)
for arr in arrs
}
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [dtype(arr).name for arr in arrs]
tme = MergeConflictError(f"Arrays have incompatible types {incompat_types}")
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=dtype(arr)) for arr in arrs]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for i, arr in enumerate(arrs):
if arr.dtype.kind in ("S", "U"):
arrs[i] = [
("0" if arr.dtype.kind == "U" else b"0")
* dtype_bytes_or_chars(arr.dtype)
]
arr_common = np.array([arr[0] for arr in arrs])
return (
arr_common.dtype.str
if arr_common.dtype.names is None
else arr_common.dtype.descr
)
class MergeStrategyMeta(type):
"""
Metaclass that registers MergeStrategy subclasses into the
MERGE_STRATEGIES registry.
"""
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Wrap ``merge`` classmethod to catch any exception and re-raise as
# MergeConflictError.
if "merge" in members and isinstance(members["merge"], classmethod):
orig_merge = members["merge"].__func__
@wraps(orig_merge)
def merge(cls, left, right):
try:
return orig_merge(cls, left, right)
except Exception as err:
raise MergeConflictError(err)
cls.merge = classmethod(merge)
# Register merging class (except for base MergeStrategy class)
if "types" in members:
types = members["types"]
if isinstance(types, tuple):
types = [types]
for left, right in reversed(types):
MERGE_STRATEGIES.insert(0, (left, right, cls))
return cls
class MergeStrategy(metaclass=MergeStrategyMeta):
"""
Base class for defining a strategy for merging metadata from two
sources, left and right, into a single output.
The primary functionality for the class is the ``merge(cls, left, right)``
class method. This takes ``left`` and ``right`` side arguments and
returns a single merged output.
The first class attribute is ``types``. This is defined as a list of
(left_types, right_types) tuples that indicate for which input types the
merge strategy applies. In determining whether to apply this merge
strategy to a pair of (left, right) objects, a test is done:
``isinstance(left, left_types) and isinstance(right, right_types)``. For
example::
types = [(np.ndarray, np.ndarray), # Two ndarrays
(np.ndarray, (list, tuple)), # ndarray and (list or tuple)
((list, tuple), np.ndarray)] # (list or tuple) and ndarray
As a convenience, ``types`` can be defined as a single two-tuple instead of
a list of two-tuples, e.g. ``types = (np.ndarray, np.ndarray)``.
The other class attribute is ``enabled``, which defaults to ``False`` in
the base class. By defining a subclass of ``MergeStrategy`` the new merge
strategy is automatically registered to be available for use in
merging. However, by default the new merge strategy is *not enabled*. This
prevents inadvertently changing the behavior of unrelated code that is
performing metadata merge operations.
In most cases (particularly in library code that others might use) it is
recommended to leave custom strategies disabled and use the
`~astropy.utils.metadata.enable_merge_strategies` context manager to locally
enable the desired strategies. However, if one is confident that the
new strategy will not produce unexpected behavior, then one can globally
enable it by setting the ``enabled`` class attribute to ``True``.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), (int, float)) # (left_types, right_types)
...
... @classmethod
... def merge(cls, left, right):
... return [left, right]
"""
# Set ``enabled = True`` to globally enable applying this merge strategy.
# This is not generally recommended.
enabled = False
# types = [(left_types, right_types), ...]
class MergePlus(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using the plus operator. This
merge strategy is globally enabled by default.
"""
types = [(list, list), (tuple, tuple)]
enabled = True
@classmethod
def merge(cls, left, right):
return left + right
class MergeNpConcatenate(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using np.concatenate. This
merge strategy is globally enabled by default.
This will upcast a list or tuple to np.ndarray and the output is
always ndarray.
"""
types = [
(np.ndarray, np.ndarray),
(np.ndarray, (list, tuple)),
((list, tuple), np.ndarray),
]
enabled = True
@classmethod
def merge(cls, left, right):
left, right = np.asanyarray(left), np.asanyarray(right)
common_dtype([left, right]) # Ensure left and right have compatible dtype
return np.concatenate([left, right])
def _both_isinstance(left, right, cls):
return isinstance(left, cls) and isinstance(right, cls)
def _not_equal(left, right):
try:
return bool(left != right)
except Exception:
return True
class _EnableMergeStrategies:
def __init__(self, *merge_strategies):
self.merge_strategies = merge_strategies
self.orig_enabled = {}
for left_type, right_type, merge_strategy in MERGE_STRATEGIES:
if issubclass(merge_strategy, merge_strategies):
self.orig_enabled[merge_strategy] = merge_strategy.enabled
merge_strategy.enabled = True
def __enter__(self):
pass
def __exit__(self, type, value, tb):
for merge_strategy, enabled in self.orig_enabled.items():
merge_strategy.enabled = enabled
def enable_merge_strategies(*merge_strategies):
"""
Context manager to temporarily enable one or more custom metadata merge
strategies.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), # left side types
... (int, float)) # right side types
... @classmethod
... def merge(cls, left, right):
... return [left, right]
By defining this class the merge strategy is automatically registered to be
available for use in merging. However, by default new merge strategies are
*not enabled*. This prevents inadvertently changing the behavior of
unrelated code that is performing metadata merge operations.
In order to use the new merge strategy, use this context manager as in the
following example::
>>> from astropy.table import Table, vstack
>>> from astropy.utils.metadata import enable_merge_strategies
>>> t1 = Table([[1]], names=['a'])
>>> t2 = Table([[2]], names=['a'])
>>> t1.meta = {'m': 1}
>>> t2.meta = {'m': 2}
>>> with enable_merge_strategies(MergeNumbersAsList):
... t12 = vstack([t1, t2])
>>> t12.meta['m']
[1, 2]
One can supply further merge strategies as additional arguments to the
context manager.
As a convenience, the enabling operation is actually done by checking
whether the registered strategies are subclasses of the context manager
arguments. This means one can define a related set of merge strategies and
then enable them all at once by enabling the base class. As a trivial
example, *all* registered merge strategies can be enabled with::
>>> with enable_merge_strategies(MergeStrategy):
... t12 = vstack([t1, t2])
Parameters
----------
*merge_strategies : `~astropy.utils.metadata.MergeStrategy`
Merge strategies that will be enabled.
"""
return _EnableMergeStrategies(*merge_strategies)
def _warn_str_func(key, left, right):
out = (
f"Cannot merge meta key {key!r} types {type(left)!r}"
f" and {type(right)!r}, choosing {key}={right!r}"
)
return out
def _error_str_func(key, left, right):
out = f"Cannot merge meta key {key!r} types {type(left)!r} and {type(right)!r}"
return out
def merge(
left,
right,
merge_func=None,
metadata_conflicts="warn",
warn_str_func=_warn_str_func,
error_str_func=_error_str_func,
):
"""
Merge the ``left`` and ``right`` metadata objects.
This is a simplistic and limited implementation at this point.
"""
if not _both_isinstance(left, right, dict):
raise MergeConflictError("Can only merge two dict-based objects")
out = deepcopy(left)
for key, val in right.items():
# If no conflict then insert val into out dict and continue
if key not in out:
out[key] = deepcopy(val)
continue
# There is a conflict that must be resolved
if _both_isinstance(left[key], right[key], dict):
out[key] = merge(
left[key], right[key], merge_func, metadata_conflicts=metadata_conflicts
)
else:
try:
if merge_func is None:
for left_type, right_type, merge_cls in MERGE_STRATEGIES:
if not merge_cls.enabled:
continue
if isinstance(left[key], left_type) and isinstance(
right[key], right_type
):
out[key] = merge_cls.merge(left[key], right[key])
break
else:
raise MergeConflictError
else:
out[key] = merge_func(left[key], right[key])
except MergeConflictError:
# Pick the metadata item that is not None, or they are both not
# None, then if they are equal, there is no conflict, and if
# they are different, there is a conflict and we pick the one
# on the right (or raise an error).
if left[key] is None:
# This may not seem necessary since out[key] gets set to
# right[key], but not all objects support != which is
# needed for one of the if clauses.
out[key] = right[key]
elif right[key] is None:
out[key] = left[key]
elif _not_equal(left[key], right[key]):
if metadata_conflicts == "warn":
warnings.warn(
warn_str_func(key, left[key], right[key]),
MergeConflictWarning,
)
elif metadata_conflicts == "error":
raise MergeConflictError(
error_str_func(key, left[key], right[key])
)
elif metadata_conflicts != "silent":
raise ValueError(
"metadata_conflicts argument must be one "
'of "silent", "warn", or "error"'
)
out[key] = right[key]
else:
out[key] = right[key]
return out
class MetaData:
"""
A descriptor for classes that have a ``meta`` property.
This can be set to any valid `~collections.abc.Mapping`.
Parameters
----------
doc : `str`, optional
Documentation for the attribute of the class.
Default is ``""``.
.. versionadded:: 1.2
copy : `bool`, optional
If ``True`` the the value is deepcopied before setting, otherwise it
is saved as reference.
Default is ``True``.
.. versionadded:: 1.2
"""
def __init__(self, doc="", copy=True):
self.__doc__ = doc
self.copy = copy
def __get__(self, instance, owner):
if instance is None:
return self
if not hasattr(instance, "_meta"):
instance._meta = OrderedDict()
return instance._meta
def __set__(self, instance, value):
if value is None:
instance._meta = OrderedDict()
else:
if isinstance(value, Mapping):
if self.copy:
instance._meta = deepcopy(value)
else:
instance._meta = value
else:
raise TypeError("meta attribute must be dict-like")
class MetaAttribute:
"""
Descriptor to define custom attribute which gets stored in the object
``meta`` dict and can have a defined default.
This descriptor is intended to provide a convenient way to add attributes
to a subclass of a complex class such as ``Table`` or ``NDData``.
This requires that the object has an attribute ``meta`` which is a
dict-like object. The value of the MetaAttribute will be stored in a
new dict meta['__attributes__'] that is created when required.
Classes that define MetaAttributes are encouraged to support initializing
the attributes via the class ``__init__``. For example::
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, MetaAttribute):
setattr(self, attr, kwargs.pop(attr))
The name of a ``MetaAttribute`` cannot be the same as any of the following:
- Keyword argument in the owner class ``__init__``
- Method or attribute of the "parent class", where the parent class is
taken to be ``owner.__mro__[1]``.
:param default: default value
"""
def __init__(self, default=None):
self.default = default
def __get__(self, instance, owner):
# When called without an instance, return self to allow access
# to descriptor attributes.
if instance is None:
return self
# If default is None and value has not been set already then return None
# without doing touching meta['__attributes__'] at all. This helps e.g.
# with the Table._hidden_columns attribute so it doesn't auto-create
# meta['__attributes__'] always.
if self.default is None and self.name not in instance.meta.get(
"__attributes__", {}
):
return None
# Get the __attributes__ dict and create if not there already.
attributes = instance.meta.setdefault("__attributes__", {})
try:
value = attributes[self.name]
except KeyError:
if self.default is not None:
attributes[self.name] = deepcopy(self.default)
# Return either specified default or None
value = attributes.get(self.name)
return value
def __set__(self, instance, value):
# Get the __attributes__ dict and create if not there already.
attributes = instance.meta.setdefault("__attributes__", {})
attributes[self.name] = value
def __delete__(self, instance):
# Remove this attribute from meta['__attributes__'] if it exists.
if "__attributes__" in instance.meta:
attrs = instance.meta["__attributes__"]
if self.name in attrs:
del attrs[self.name]
# If this was the last attribute then remove the meta key as well
if not attrs:
del instance.meta["__attributes__"]
def __set_name__(self, owner, name):
import inspect
params = [
param.name
for param in inspect.signature(owner).parameters.values()
if param.kind
not in (inspect.Parameter.VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL)
]
# Reject names from existing params or best guess at parent class
if name in params or hasattr(owner.__mro__[1], name):
raise ValueError(f"{name} not allowed as {self.__class__.__name__}")
self.name = name
def __repr__(self):
return f"<{self.__class__.__name__} name={self.name} default={self.default}>"
|
ae7e2bcd7258ac94145d4b375409a1baa0dfb017d0dbf407d4eaa296076d8031 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains errors/exceptions and warnings of general use for
astropy. Exceptions that are specific to a given subpackage should *not* be
here, but rather in the particular subpackage.
"""
# TODO: deprecate these. This cannot be trivially done with
# astropy.utils.decorators.deprecate, since that module needs the exceptions
# here, leading to circular import problems.
from erfa import ErfaError, ErfaWarning # noqa: F401
__all__ = [
"AstropyWarning",
"AstropyUserWarning",
"AstropyDeprecationWarning",
"AstropyPendingDeprecationWarning",
"AstropyBackwardsIncompatibleChangeWarning",
"DuplicateRepresentationWarning",
"NoValue",
]
class AstropyWarning(Warning):
"""
The base warning class from which all Astropy warnings should inherit.
Any warning inheriting from this class is handled by the Astropy logger.
"""
class AstropyUserWarning(UserWarning, AstropyWarning):
"""
The primary warning class for Astropy.
Use this if you do not need a specific sub-class.
"""
class AstropyDeprecationWarning(AstropyWarning):
"""
A warning class to indicate a deprecated feature.
"""
class AstropyPendingDeprecationWarning(PendingDeprecationWarning, AstropyWarning):
"""
A warning class to indicate a soon-to-be deprecated feature.
"""
class AstropyBackwardsIncompatibleChangeWarning(AstropyWarning):
"""
A warning class indicating a change in astropy that is incompatible
with previous versions.
The suggested procedure is to issue this warning for the version in
which the change occurs, and remove it for all following versions.
"""
class DuplicateRepresentationWarning(AstropyWarning):
"""
A warning class indicating a representation name was already registered.
"""
class _NoValue:
"""Special keyword value.
This class may be used as the default value assigned to a
deprecated keyword in order to check if it has been given a user
defined value.
"""
def __repr__(self):
return "astropy.utils.exceptions.NoValue"
NoValue = _NoValue()
|
3422774b3d8c28eb3dd3fae151133b602747307be2b383d117e572ca152e6415 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains functions and methods that relate to the DataInfo class
which provides a container for informational attributes as well as summary info
methods.
A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in
astropy. Here it allows those classes to be used in Tables and uniformly carry
table column attributes such as name, format, dtype, meta, and description.
"""
# Note: these functions and classes are tested extensively in astropy table
# tests via their use in providing mixin column info, and in
# astropy/tests/test_info for providing table and column info summary data.
import os
import re
import sys
import warnings
import weakref
from collections import OrderedDict
from contextlib import contextmanager
from copy import deepcopy
from functools import partial
from io import StringIO
import numpy as np
from . import metadata
__all__ = [
"data_info_factory",
"dtype_info_name",
"BaseColumnInfo",
"DataInfo",
"MixinInfo",
"ParentDtypeInfo",
]
# Tuple of filterwarnings kwargs to ignore when calling info
IGNORE_WARNINGS = (
dict(
category=RuntimeWarning,
message=(
"All-NaN|"
"Mean of empty slice|Degrees of freedom <= 0|"
"invalid value encountered in sqrt"
),
),
)
@contextmanager
def serialize_context_as(context):
"""Set context for serialization.
This will allow downstream code to understand the context in which a column
is being serialized. Objects like Time or SkyCoord will have different
default serialization representations depending on context.
Parameters
----------
context : str
Context name, e.g. 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml'
"""
old_context = BaseColumnInfo._serialize_context
BaseColumnInfo._serialize_context = context
try:
yield
finally:
BaseColumnInfo._serialize_context = old_context
def dtype_info_name(dtype):
"""Return a human-oriented string name of the ``dtype`` arg.
This can be use by astropy methods that present type information about
a data object.
The output is mostly equivalent to ``dtype.name`` which takes the form
<type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an
optional number of bits which gets included only for numeric types.
The output is shown below for ``bytes`` and ``str`` types, with <N> being
the number of characters. This representation corresponds to the Python
type that matches the dtype::
Numpy S<N> U<N>
Python bytes<N> str<N>
Parameters
----------
dtype : str, `~numpy.dtype`, type
Input as an object that can be converted via :class:`numpy.dtype`.
Returns
-------
dtype_info_name : str
String name of ``dtype``
"""
dtype = np.dtype(dtype)
if dtype.names is not None:
info_names = ", ".join(dtype_info_name(dt[0]) for dt in dtype.fields.values())
return f"({info_names})"
if dtype.subdtype is not None:
dtype, shape = dtype.subdtype
else:
shape = ()
if dtype.kind in ("S", "U"):
type_name = "bytes" if dtype.kind == "S" else "str"
length = re.search(r"(\d+)", dtype.str).group(1)
out = type_name + length
else:
out = dtype.name
if shape:
out += f"[{','.join(str(n) for n in shape)}]"
return out
def data_info_factory(names, funcs):
"""
Factory to create a function that can be used as an ``option``
for outputting data object summary information.
Examples
--------
>>> from astropy.utils.data_info import data_info_factory
>>> from astropy.table import Column
>>> c = Column([4., 3., 2., 1.])
>>> mystats = data_info_factory(names=['min', 'median', 'max'],
... funcs=[np.min, np.median, np.max])
>>> c.info(option=mystats)
min = 1
median = 2.5
max = 4
n_bad = 0
length = 4
Parameters
----------
names : list
List of information attribute names
funcs : list
List of functions that compute the corresponding information attribute
Returns
-------
func : function
Function that can be used as a data info option
"""
def func(dat):
outs = []
for name, func in zip(names, funcs):
try:
if isinstance(func, str):
out = getattr(dat, func)()
else:
out = func(dat)
except Exception:
outs.append("--")
else:
try:
outs.append(f"{out:g}")
except (TypeError, ValueError):
outs.append(str(out))
return OrderedDict(zip(names, outs))
return func
def _get_obj_attrs_map(obj, attrs):
"""
Get the values for object ``attrs`` and return as a dict. This
ignores any attributes that are None. In the context of serializing
the supported core astropy classes this conversion will succeed and
results in more succinct and less python-specific YAML.
"""
out = {}
for attr in attrs:
val = getattr(obj, attr, None)
if val is not None:
out[attr] = val
return out
def _get_data_attribute(dat, attr=None):
"""
Get a data object attribute for the ``attributes`` info summary method.
"""
if attr == "class":
val = type(dat).__name__
elif attr == "dtype":
val = dtype_info_name(dat.info.dtype)
elif attr == "shape":
datshape = dat.shape[1:]
val = datshape if datshape else ""
else:
val = getattr(dat.info, attr)
if val is None:
val = ""
return str(val)
class InfoAttribute:
def __init__(self, attr, default=None):
self.attr = attr
self.default = default
def __get__(self, instance, owner_cls):
if instance is None:
return self
return instance._attrs.get(self.attr, self.default)
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError("cannot set unbound descriptor")
instance._attrs[self.attr] = value
class ParentAttribute:
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner_cls):
if instance is None:
return self
return getattr(instance._parent, self.attr)
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError("cannot set unbound descriptor")
setattr(instance._parent, self.attr, value)
class DataInfoMeta(type):
def __new__(mcls, name, bases, dct):
# Ensure that we do not gain a __dict__, which would mean
# arbitrary attributes could be set.
dct.setdefault("__slots__", [])
return super().__new__(mcls, name, bases, dct)
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
# Define default getters/setters for attributes, if needed.
for attr in cls.attr_names:
if attr not in dct:
# If not defined explicitly for this class, did any of
# its superclasses define it, and, if so, was this an
# automatically defined look-up-on-parent attribute?
cls_attr = getattr(cls, attr, None)
if attr in cls.attrs_from_parent:
# If the attribute is supposed to be stored on the parent,
# and that is stated by this class yet it was not the case
# on the superclass, override it.
if "attrs_from_parent" in dct and not isinstance(
cls_attr, ParentAttribute
):
setattr(cls, attr, ParentAttribute(attr))
elif not cls_attr or isinstance(cls_attr, ParentAttribute):
# If the attribute is not meant to be stored on the parent,
# and if it was not defined already or was previously defined
# as an attribute on the parent, define a regular
# look-up-on-info attribute
setattr(
cls, attr, InfoAttribute(attr, cls._attr_defaults.get(attr))
)
class DataInfo(metaclass=DataInfoMeta):
"""
Descriptor that data classes use to add an ``info`` attribute for storing
data attributes in a uniform and portable way. Note that it *must* be
called ``info`` so that the DataInfo() object can be stored in the
``instance`` using the ``info`` key. Because owner_cls.x is a descriptor,
Python doesn't use __dict__['x'] normally, and the descriptor can safely
store stuff there. Thanks to
https://nbviewer.jupyter.org/urls/gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb
for this trick that works for non-hashable classes.
Parameters
----------
bound : bool
If True this is a descriptor attribute in a class definition, else it
is a DataInfo() object that is bound to a data object instance. Default is False.
"""
_stats = ["mean", "std", "min", "max"]
attrs_from_parent = set()
attr_names = {"name", "unit", "dtype", "format", "description", "meta"}
_attr_defaults = {"dtype": np.dtype("O")}
_attrs_no_copy = set()
_info_summary_attrs = ("dtype", "shape", "unit", "format", "description", "class")
__slots__ = ["_parent_cls", "_parent_ref", "_attrs"]
# This specifies the list of object attributes which must be stored in
# order to re-create the object after serialization. This is independent
# of normal `info` attributes like name or description. Subclasses will
# generally either define this statically (QuantityInfo) or dynamically
# (SkyCoordInfo). These attributes may be scalars or arrays. If arrays
# that match the object length they will be serialized as an independent
# column.
_represent_as_dict_attrs = ()
# This specifies attributes which are to be provided to the class
# initializer as ordered args instead of keyword args. This is needed
# for Quantity subclasses where the keyword for data varies (e.g.
# between Quantity and Angle).
_construct_from_dict_args = ()
# This specifies the name of an attribute which is the "primary" data.
# Then when representing as columns
# (table.serialize._represent_mixin_as_column) the output for this
# attribute will be written with the just name of the mixin instead of the
# usual "<name>.<attr>".
_represent_as_dict_primary_data = None
def __init__(self, bound=False):
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values. Default of None for "unset"
# except for dtype where the default is object.
if bound:
self._attrs = {}
@property
def _parent(self):
try:
parent = self._parent_ref()
except AttributeError:
return None
if parent is None:
raise AttributeError(
"""\
failed to access "info" attribute on a temporary object.
It looks like you have done something like ``col[3:5].info`` or
``col.quantity.info``, i.e. you accessed ``info`` from a temporary slice
object that only exists momentarily. This has failed because the reference to
that temporary object is now lost. Instead force a permanent reference (e.g.
``c = col[3:5]`` followed by ``c.info``)."""
)
return parent
def __get__(self, instance, owner_cls):
if instance is None:
# This is an unbound descriptor on the class
self._parent_cls = owner_cls
return self
info = instance.__dict__.get("info")
if info is None:
info = instance.__dict__["info"] = self.__class__(bound=True)
# We set _parent_ref on every call, since if one makes copies of
# instances, 'info' will be copied as well, which will lose the
# reference.
info._parent_ref = weakref.ref(instance)
return info
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError("cannot set unbound descriptor")
if isinstance(value, DataInfo):
info = instance.__dict__["info"] = self.__class__(bound=True)
attr_names = info.attr_names
if value.__class__ is self.__class__:
# For same class, attributes are guaranteed to be stored in
# _attrs, so speed matters up by not accessing defaults.
# Doing this before difference in for loop helps speed.
attr_names = attr_names & set(value._attrs) # NOT in-place!
else:
# For different classes, copy over the attributes in common.
attr_names = attr_names & (value.attr_names - value._attrs_no_copy)
for attr in attr_names - info.attrs_from_parent - info._attrs_no_copy:
info._attrs[attr] = deepcopy(getattr(value, attr))
else:
raise TypeError("info must be set with a DataInfo instance")
def __getstate__(self):
return self._attrs
def __setstate__(self, state):
self._attrs = state
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
if attrs is None:
attrs = self._represent_as_dict_attrs
return _get_obj_attrs_map(self._parent, attrs)
def _construct_from_dict(self, map):
args = [map.pop(attr) for attr in self._construct_from_dict_args]
return self._parent_cls(*args, **map)
info_summary_attributes = staticmethod(
data_info_factory(
names=_info_summary_attrs,
funcs=[
partial(_get_data_attribute, attr=attr) for attr in _info_summary_attrs
],
)
)
# No nan* methods in numpy < 1.8
info_summary_stats = staticmethod(
data_info_factory(
names=_stats, funcs=[getattr(np, "nan" + stat) for stat in _stats]
)
)
def __call__(self, option="attributes", out=""):
"""
Write summary information about data object to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: data object attributes like ``dtype`` and ``format``
- ``stats``: basic statistics: min, mean, and max
If a function is specified then that function will be called with the
data object as its single argument. The function must return an
OrderedDict containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table import Column
>>> c = Column([1, 2], unit='m', dtype='int32')
>>> c.info()
dtype = int32
unit = m
class = Column
n_bad = 0
length = 2
>>> c.info(['attributes', 'stats'])
dtype = int32
unit = m
class = Column
mean = 1.5
std = 0.5
min = 1
max = 2
n_bad = 0
length = 2
Parameters
----------
option : str, callable, list of (str or callable)
Info option, defaults to 'attributes'.
out : file-like, None
Output destination, defaults to sys.stdout. If None then the
OrderedDict with information attributes is returned
Returns
-------
info : `~collections.OrderedDict` or None
`~collections.OrderedDict` if out==None else None
"""
if out == "":
out = sys.stdout
dat = self._parent
info = OrderedDict()
name = dat.info.name
if name is not None:
info["name"] = name
options = option if isinstance(option, (list, tuple)) else [option]
for option in options:
if isinstance(option, str):
if hasattr(self, "info_summary_" + option):
option = getattr(self, "info_summary_" + option)
else:
raise ValueError(f"{option=} is not an allowed information type")
with warnings.catch_warnings():
for ignore_kwargs in IGNORE_WARNINGS:
warnings.filterwarnings("ignore", **ignore_kwargs)
info.update(option(dat))
if hasattr(dat, "mask"):
n_bad = np.count_nonzero(dat.mask)
else:
try:
n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat))
except Exception:
n_bad = 0
info["n_bad"] = n_bad
try:
info["length"] = len(dat)
except (TypeError, IndexError):
pass
if out is None:
return info
for key, val in info.items():
if val != "":
out.write(f"{key} = {val}" + os.linesep)
def __repr__(self):
if self._parent is None:
return super().__repr__()
out = StringIO()
self.__call__(out=out)
return out.getvalue()
class BaseColumnInfo(DataInfo):
"""Base info class for anything that can be a column in an astropy Table.
There are at least two classes that inherit from this:
ColumnInfo: for native astropy Column / MaskedColumn objects
MixinInfo: for mixin column objects
Note that this class is defined here so that mixins can use it
without importing the table package.
"""
attr_names = DataInfo.attr_names | {"parent_table", "indices"}
_attrs_no_copy = {"parent_table", "indices"}
# Context for serialization. This can be set temporarily via
# ``serialize_context_as(context)`` context manager to allow downstream
# code to understand the context in which a column is being serialized.
# Typical values are 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml'. Objects
# like Time or SkyCoord will have different default serialization
# representations depending on context.
_serialize_context = None
__slots__ = ["_format_funcs", "_copy_indices"]
@property
def parent_table(self):
value = self._attrs.get("parent_table")
if callable(value):
value = value()
return value
@parent_table.setter
def parent_table(self, parent_table):
if parent_table is None:
self._attrs.pop("parent_table", None)
else:
parent_table = weakref.ref(parent_table)
self._attrs["parent_table"] = parent_table
def __init__(self, bound=False):
super().__init__(bound=bound)
# If bound to a data object instance then add a _format_funcs dict
# for caching functions for print formatting.
if bound:
self._format_funcs = {}
def __set__(self, instance, value):
# For Table columns do not set `info` when the instance is a scalar.
try:
if not instance.shape:
return
except AttributeError:
pass
super().__set__(instance, value)
def iter_str_vals(self):
"""
This is a mixin-safe version of Column.iter_str_vals.
"""
col = self._parent
if self.parent_table is None:
from astropy.table.column import FORMATTER as formatter
else:
formatter = self.parent_table.formatter
_pformat_col_iter = formatter._pformat_col_iter
yield from _pformat_col_iter(col, -1, False, False, {})
@property
def indices(self):
# Implementation note: the auto-generation as an InfoAttribute cannot
# be used here, since on access, one should not just return the
# default (empty list is this case), but set _attrs['indices'] so that
# if the list is appended to, it is registered here.
return self._attrs.setdefault("indices", [])
@indices.setter
def indices(self, indices):
self._attrs["indices"] = indices
def adjust_indices(self, index, value, col_len):
"""
Adjust info indices after column modification.
Parameters
----------
index : slice, int, list, or ndarray
Element(s) of column to modify. This parameter can
be a single row number, a list of row numbers, an
ndarray of row numbers, a boolean ndarray (a mask),
or a column slice.
value : int, list, or ndarray
New value(s) to insert
col_len : int
Length of the column
"""
if not self.indices:
return
if isinstance(index, slice):
# run through each key in slice
t = index.indices(col_len)
keys = list(range(*t))
elif isinstance(index, np.ndarray) and index.dtype.kind == "b":
# boolean mask
keys = np.where(index)[0]
else: # single int
keys = [index]
value = np.atleast_1d(value) # turn array(x) into array([x])
if value.size == 1:
# repeat single value
value = list(value) * len(keys)
for key, val in zip(keys, value):
for col_index in self.indices:
col_index.replace(key, self.name, val)
def slice_indices(self, col_slice, item, col_len):
"""
Given a sliced object, modify its indices
to correctly represent the slice.
Parameters
----------
col_slice : `~astropy.table.Column` or mixin
Sliced object. If not a column, it must be a valid mixin, see
https://docs.astropy.org/en/stable/table/mixin_columns.html
item : slice, list, or ndarray
Slice used to create col_slice
col_len : int
Length of original object
"""
from astropy.table.sorted_array import SortedArray
if not getattr(self, "_copy_indices", True):
# Necessary because MaskedArray will perform a shallow copy
col_slice.info.indices = []
return col_slice
elif isinstance(item, slice):
col_slice.info.indices = [x[item] for x in self.indices]
elif self.indices:
if isinstance(item, np.ndarray) and item.dtype.kind == "b":
# boolean mask
item = np.where(item)[0]
# Empirical testing suggests that recreating a BST/RBT index is
# more effective than relabelling when less than ~60% of
# the total number of rows are involved, and is in general
# more effective for SortedArray.
small = len(item) <= 0.6 * col_len
col_slice.info.indices = []
for index in self.indices:
if small or isinstance(index, SortedArray):
new_index = index.get_slice(col_slice, item)
else:
new_index = deepcopy(index)
new_index.replace_rows(item)
col_slice.info.indices.append(new_index)
return col_slice
@staticmethod
def merge_cols_attributes(cols, metadata_conflicts, name, attrs):
"""
Utility method to merge and validate the attributes ``attrs`` for the
input table columns ``cols``.
Note that ``dtype`` and ``shape`` attributes are handled specially.
These should not be passed in ``attrs`` but will always be in the
returned dict of merged attributes.
Parameters
----------
cols : list
List of input Table column objects
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
attrs : list
List of attribute names to be merged
Returns
-------
attrs : dict
Of merged attributes.
"""
from astropy.table.np_utils import TableMergeError
def warn_str_func(key, left, right):
out = (
f"In merged column '{name}' the '{key}' attribute does not match "
f"({left} != {right}). Using {right} for merged output"
)
return out
def getattrs(col):
return {
attr: getattr(col.info, attr)
for attr in attrs
if getattr(col.info, attr, None) is not None
}
out = getattrs(cols[0])
for col in cols[1:]:
out = metadata.merge(
out,
getattrs(col),
metadata_conflicts=metadata_conflicts,
warn_str_func=warn_str_func,
)
# Output dtype is the superset of all dtypes in in_cols
out["dtype"] = metadata.common_dtype(cols)
# Make sure all input shapes are the same
uniq_shapes = {col.shape[1:] for col in cols}
if len(uniq_shapes) != 1:
raise TableMergeError("columns have different shapes")
out["shape"] = uniq_shapes.pop()
# "Merged" output name is the supplied name
if name is not None:
out["name"] = name
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
The base method raises NotImplementedError and must be overridden.
Returns
-------
arrays : list of ndarray
"""
raise NotImplementedError(f"column {self.name} is not sortable")
class MixinInfo(BaseColumnInfo):
@property
def name(self):
return self._attrs.get("name")
@name.setter
def name(self, name):
# For mixin columns that live within a table, rename the column in the
# table when setting the name attribute. This mirrors the same
# functionality in the BaseColumn class.
if self.parent_table is not None:
new_name = None if name is None else str(name)
self.parent_table.columns._rename_column(self.name, new_name)
self._attrs["name"] = name
@property
def groups(self):
# This implementation for mixin columns essentially matches the Column
# property definition. `groups` is a read-only property here and
# depends on the parent table of the column having `groups`. This will
# allow aggregating mixins as long as they support those operations.
from astropy.table import groups
return self._attrs.setdefault("groups", groups.ColumnGroups(self._parent))
class ParentDtypeInfo(MixinInfo):
"""Mixin that gets info.dtype from parent."""
attrs_from_parent = {"dtype"} # dtype and unit taken from parent
|
84baf3fcf99d2bee1f06d2d241019adf614f5a41c0cad7f3e36ae9ad086a7d36 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""The ShapedLikeNDArray mixin class and shape-related functions."""
import abc
from itertools import zip_longest
import numpy as np
__all__ = [
"NDArrayShapeMethods",
"ShapedLikeNDArray",
"check_broadcast",
"IncompatibleShapeError",
"unbroadcast",
]
class NDArrayShapeMethods:
"""Mixin class to provide shape-changing methods.
The class proper is assumed to have some underlying data, which are arrays
or array-like structures. It must define a ``shape`` property, which gives
the shape of those data, as well as an ``_apply`` method that creates a new
instance in which a `~numpy.ndarray` method has been applied to those.
Furthermore, for consistency with `~numpy.ndarray`, it is recommended to
define a setter for the ``shape`` property, which, like the
`~numpy.ndarray.shape` property allows in-place reshaping the internal data
(and, unlike the ``reshape`` method raises an exception if this is not
possible).
This class only provides the shape-changing methods and is meant in
particular for `~numpy.ndarray` subclasses that need to keep track of
other arrays. For other classes, `~astropy.utils.shapes.ShapedLikeNDArray`
is recommended.
"""
# Note to developers: if new methods are added here, be sure to check that
# they work properly with the classes that use this, such as Time and
# BaseRepresentation, i.e., look at their ``_apply`` methods and add
# relevant tests. This is particularly important for methods that imply
# copies rather than views of data (see the special-case treatment of
# 'flatten' in Time).
def __getitem__(self, item):
return self._apply("__getitem__", item)
def copy(self, *args, **kwargs):
"""Return an instance containing copies of the internal data.
Parameters are as for :meth:`~numpy.ndarray.copy`.
"""
return self._apply("copy", *args, **kwargs)
def reshape(self, *args, **kwargs):
"""Returns an instance containing the same data with a new shape.
Parameters are as for :meth:`~numpy.ndarray.reshape`. Note that it is
not always possible to change the shape of an array without copying the
data (see :func:`~numpy.reshape` documentation). If you want an error
to be raise if the data is copied, you should assign the new shape to
the shape attribute (note: this may not be implemented for all classes
using ``NDArrayShapeMethods``).
"""
return self._apply("reshape", *args, **kwargs)
def ravel(self, *args, **kwargs):
"""Return an instance with the array collapsed into one dimension.
Parameters are as for :meth:`~numpy.ndarray.ravel`. Note that it is
not always possible to unravel an array without copying the data.
If you want an error to be raise if the data is copied, you should
should assign shape ``(-1,)`` to the shape attribute.
"""
return self._apply("ravel", *args, **kwargs)
def flatten(self, *args, **kwargs):
"""Return a copy with the array collapsed into one dimension.
Parameters are as for :meth:`~numpy.ndarray.flatten`.
"""
return self._apply("flatten", *args, **kwargs)
def transpose(self, *args, **kwargs):
"""Return an instance with the data transposed.
Parameters are as for :meth:`~numpy.ndarray.transpose`. All internal
data are views of the data of the original.
"""
return self._apply("transpose", *args, **kwargs)
@property
def T(self):
"""Return an instance with the data transposed.
Parameters are as for :attr:`~numpy.ndarray.T`. All internal
data are views of the data of the original.
"""
if self.ndim < 2:
return self
else:
return self.transpose()
def swapaxes(self, *args, **kwargs):
"""Return an instance with the given axes interchanged.
Parameters are as for :meth:`~numpy.ndarray.swapaxes`:
``axis1, axis2``. All internal data are views of the data of the
original.
"""
return self._apply("swapaxes", *args, **kwargs)
def diagonal(self, *args, **kwargs):
"""Return an instance with the specified diagonals.
Parameters are as for :meth:`~numpy.ndarray.diagonal`. All internal
data are views of the data of the original.
"""
return self._apply("diagonal", *args, **kwargs)
def squeeze(self, *args, **kwargs):
"""Return an instance with single-dimensional shape entries removed.
Parameters are as for :meth:`~numpy.ndarray.squeeze`. All internal
data are views of the data of the original.
"""
return self._apply("squeeze", *args, **kwargs)
def take(self, indices, axis=None, out=None, mode="raise"):
"""Return a new instance formed from the elements at the given indices.
Parameters are as for :meth:`~numpy.ndarray.take`, except that,
obviously, no output array can be given.
"""
if out is not None:
return NotImplementedError("cannot pass 'out' argument to 'take.")
return self._apply("take", indices, axis=axis, mode=mode)
class ShapedLikeNDArray(NDArrayShapeMethods, metaclass=abc.ABCMeta):
"""Mixin class to provide shape-changing methods.
The class proper is assumed to have some underlying data, which are arrays
or array-like structures. It must define a ``shape`` property, which gives
the shape of those data, as well as an ``_apply`` method that creates a new
instance in which a `~numpy.ndarray` method has been applied to those.
Furthermore, for consistency with `~numpy.ndarray`, it is recommended to
define a setter for the ``shape`` property, which, like the
`~numpy.ndarray.shape` property allows in-place reshaping the internal data
(and, unlike the ``reshape`` method raises an exception if this is not
possible).
This class also defines default implementations for ``ndim`` and ``size``
properties, calculating those from the ``shape``. These can be overridden
by subclasses if there are faster ways to obtain those numbers.
"""
# Note to developers: if new methods are added here, be sure to check that
# they work properly with the classes that use this, such as Time and
# BaseRepresentation, i.e., look at their ``_apply`` methods and add
# relevant tests. This is particularly important for methods that imply
# copies rather than views of data (see the special-case treatment of
# 'flatten' in Time).
@property
@abc.abstractmethod
def shape(self):
"""The shape of the underlying data."""
@abc.abstractmethod
def _apply(method, *args, **kwargs):
"""Create a new instance, with ``method`` applied to underlying data.
The method is any of the shape-changing methods for `~numpy.ndarray`
(``reshape``, ``swapaxes``, etc.), as well as those picking particular
elements (``__getitem__``, ``take``, etc.). It will be applied to the
underlying arrays (e.g., ``jd1`` and ``jd2`` in `~astropy.time.Time`),
with the results used to create a new instance.
Parameters
----------
method : str
Method to be applied to the instance's internal data arrays.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
@property
def ndim(self):
"""The number of dimensions of the instance and underlying arrays."""
return len(self.shape)
@property
def size(self):
"""The size of the object, as calculated from its shape."""
size = 1
for sh in self.shape:
size *= sh
return size
@property
def isscalar(self):
return self.shape == ()
def __len__(self):
if self.isscalar:
raise TypeError(f"Scalar {self.__class__.__name__!r} object has no len()")
return self.shape[0]
def __bool__(self):
"""Any instance should evaluate to True, except when it is empty."""
return self.size > 0
def __getitem__(self, item):
try:
return self._apply("__getitem__", item)
except IndexError:
if self.isscalar:
raise TypeError(
f"scalar {self.__class.__name__!r} object is not subscriptable."
)
else:
raise
def __iter__(self):
if self.isscalar:
raise TypeError(
f"scalar {self.__class__.__name__!r} object is not iterable."
)
# We cannot just write a generator here, since then the above error
# would only be raised once we try to use the iterator, rather than
# upon its definition using iter(self).
def self_iter():
for idx in range(len(self)):
yield self[idx]
return self_iter()
# Functions that change shape or essentially do indexing.
_APPLICABLE_FUNCTIONS = {
np.moveaxis,
np.rollaxis,
np.atleast_1d,
np.atleast_2d,
np.atleast_3d,
np.expand_dims,
np.broadcast_to,
np.flip,
np.fliplr,
np.flipud,
np.rot90,
np.roll,
np.delete,
}
# Functions that themselves defer to a method. Those are all
# defined in np.core.fromnumeric, but exclude alen as well as
# sort and partition, which make copies before calling the method.
_METHOD_FUNCTIONS = {
getattr(np, name): {
"amax": "max",
"amin": "min",
"around": "round",
"round_": "round",
"alltrue": "all",
"sometrue": "any",
}.get(name, name)
for name in np.core.fromnumeric.__all__
if name not in ["alen", "sort", "partition"]
}
# Add np.copy, which we may as well let defer to our method.
_METHOD_FUNCTIONS[np.copy] = "copy"
# Could be made to work with a bit of effort:
# np.where, np.compress, np.extract,
# np.diag_indices_from, np.triu_indices_from, np.tril_indices_from
# np.tile, np.repeat (need .repeat method)
# TODO: create a proper implementation.
# Furthermore, some arithmetic functions such as np.mean, np.median,
# could work for Time, and many more for TimeDelta, so those should
# override __array_function__.
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions that make sense."""
if function in self._APPLICABLE_FUNCTIONS:
if function is np.broadcast_to:
# Ensure that any ndarray subclasses used are
# properly propagated.
kwargs.setdefault("subok", True)
elif (
function in {np.atleast_1d, np.atleast_2d, np.atleast_3d}
and len(args) > 1
):
return tuple(function(arg, **kwargs) for arg in args)
if self is not args[0]:
return NotImplemented
return self._apply(function, *args[1:], **kwargs)
# For functions that defer to methods, use the corresponding
# method/attribute if we have it. Otherwise, fall through.
if self is args[0] and function in self._METHOD_FUNCTIONS:
method = getattr(self, self._METHOD_FUNCTIONS[function], None)
if method is not None:
if callable(method):
return method(*args[1:], **kwargs)
else:
# For np.shape, etc., just return the attribute.
return method
# Fall-back, just pass the arguments on since perhaps the function
# works already (see above).
return function.__wrapped__(*args, **kwargs)
class IncompatibleShapeError(ValueError):
def __init__(self, shape_a, shape_a_idx, shape_b, shape_b_idx):
super().__init__(shape_a, shape_a_idx, shape_b, shape_b_idx)
def check_broadcast(*shapes):
"""
Determines whether two or more Numpy arrays can be broadcast with each
other based on their shape tuple alone.
Parameters
----------
*shapes : tuple
All shapes to include in the comparison. If only one shape is given it
is passed through unmodified. If no shapes are given returns an empty
`tuple`.
Returns
-------
broadcast : `tuple`
If all shapes are mutually broadcastable, returns a tuple of the full
broadcast shape.
"""
if len(shapes) == 0:
return ()
elif len(shapes) == 1:
return shapes[0]
reversed_shapes = (reversed(shape) for shape in shapes)
full_shape = []
for dims in zip_longest(*reversed_shapes, fillvalue=1):
max_dim = 1
max_dim_idx = None
for idx, dim in enumerate(dims):
if dim == 1:
continue
if max_dim == 1:
# The first dimension of size greater than 1
max_dim = dim
max_dim_idx = idx
elif dim != max_dim:
raise IncompatibleShapeError(
shapes[max_dim_idx], max_dim_idx, shapes[idx], idx
)
full_shape.append(max_dim)
return tuple(full_shape[::-1])
def unbroadcast(array):
"""
Given an array, return a new array that is the smallest subset of the
original array that can be re-broadcasted back to the original array.
See https://stackoverflow.com/questions/40845769/un-broadcasting-numpy-arrays
for more details.
"""
if array.ndim == 0:
return array
array = array[
tuple((slice(0, 1) if stride == 0 else slice(None)) for stride in array.strides)
]
# Remove leading ones, which are not needed in numpy broadcasting.
first_not_unity = next(
(i for (i, s) in enumerate(array.shape) if s > 1), array.ndim
)
return array.reshape(array.shape[first_not_unity:])
|
5a247ebaf8d7330bc3ec083405bfb0eae618faaf8c2ade346d4881467e66655b | """Utilities and extensions for use with `argparse`."""
import argparse
import os
def directory(arg):
"""
An argument type (for use with the ``type=`` argument to
`argparse.ArgumentParser.add_argument` which determines if the argument is
an existing directory (and returns the absolute path).
"""
if not isinstance(arg, str) and os.path.isdir(arg):
raise argparse.ArgumentTypeError(
f"{arg} is not a directory or does not exist (the directory must "
"be created first)"
)
return os.path.abspath(arg)
def readable_directory(arg):
"""
An argument type (for use with the ``type=`` argument to
`argparse.ArgumentParser.add_argument` which determines if the argument is
a directory that exists and is readable (and returns the absolute path).
"""
arg = directory(arg)
if not os.access(arg, os.R_OK):
raise argparse.ArgumentTypeError(
f"{arg} exists but is not readable with its current permissions"
)
return arg
def writeable_directory(arg):
"""
An argument type (for use with the ``type=`` argument to
`argparse.ArgumentParser.add_argument` which determines if the argument is
a directory that exists and is writeable (and returns the absolute path).
"""
arg = directory(arg)
if not os.access(arg, os.W_OK):
raise argparse.ArgumentTypeError(
f"{arg} exists but is not writeable with its current permissions"
)
return arg
|
1ec8387ea2e7af8d72a5d6640dce80b33e229834cf8d468c02e2b476813984db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for accessing, downloading, and caching data files."""
import atexit
import contextlib
import errno
import fnmatch
import ftplib
import functools
import hashlib
import io
import os
import re
import shutil
# import ssl moved inside functions using ssl to avoid import failure
# when running in pyodide/Emscripten
import sys
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir, mkdtemp
from warnings import warn
try:
import certifi
except ImportError:
# certifi support is optional; when available it will be used for TLS/SSL
# downloads
certifi = None
import astropy.config.paths
from astropy import config as _config
from astropy.utils.compat.optional_deps import HAS_FSSPEC
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.introspection import find_current_module, resolve_name
# Order here determines order in the autosummary
__all__ = [
"Conf",
"conf",
"download_file",
"download_files_in_parallel",
"get_readable_fileobj",
"get_pkg_data_fileobj",
"get_pkg_data_filename",
"get_pkg_data_contents",
"get_pkg_data_fileobjs",
"get_pkg_data_filenames",
"get_pkg_data_path",
"is_url",
"is_url_in_cache",
"get_cached_urls",
"cache_total_size",
"cache_contents",
"export_download_cache",
"import_download_cache",
"import_file_to_cache",
"check_download_cache",
"clear_download_cache",
"compute_hash",
"get_free_space_in_dir",
"check_free_space_in_dir",
"get_file_contents",
"CacheMissingWarning",
"CacheDamaged",
]
_dataurls_to_alias = {}
class _NonClosingBufferedReader(io.BufferedReader):
def __del__(self):
try:
# NOTE: self.raw will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __del__(self):
try:
# NOTE: self.stream will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
"http://data.astropy.org/", "Primary URL for astropy remote data site."
)
dataurl_mirror = _config.ConfigItem(
"http://www.astropy.org/astropy-data/",
"Mirror URL for astropy remote data site.",
)
default_http_user_agent = _config.ConfigItem(
"astropy",
"Default User-Agent for HTTP request headers. This can be overwritten "
"for a particular call via http_headers option, where available. "
"This only provides the default value when not set by https_headers.",
)
remote_timeout = _config.ConfigItem(
10.0,
"Time to wait for remote data queries (in seconds).",
aliases=["astropy.coordinates.name_resolve.name_resolve_timeout"],
)
allow_internet = _config.ConfigItem(
True, "If False, prevents any attempt to download from Internet."
)
compute_hash_block_size = _config.ConfigItem(
2**16, "Block size for computing file hashes." # 64K
)
download_block_size = _config.ConfigItem(
2**16, "Number of bytes of remote data to download per step." # 64K
)
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
"If True, temporary download files created when the cache is "
"inaccessible will be deleted at the end of the python session.",
)
conf = Conf()
class CacheMissingWarning(AstropyWarning):
"""
This warning indicates the standard cache directory is not accessible, with
the first argument providing the warning message. If args[1] is present, it
is a filename indicating the path to a temporary file that was created to
store a remote data download in the absence of the cache.
"""
def is_url(string):
"""
Test whether a string is a valid URL for :func:`download_file`.
Parameters
----------
string : str
The string to test.
Returns
-------
status : bool
String is URL or not.
"""
url = urllib.parse.urlparse(string)
# we can't just check that url.scheme is not an empty string, because
# file paths in windows would return a non-empty scheme (e.g. e:\\
# returns 'e').
return url.scheme.lower() in ["http", "https", "ftp", "sftp", "ssh", "file"]
# Backward compatibility because some downstream packages allegedly uses it.
_is_url = is_url
def _requires_fsspec(url):
"""Does the `url` require the optional ``fsspec`` dependency to open?"""
return isinstance(url, str) and url.startswith(("s3://", "gs://"))
def _is_inside(path, parent_path):
# We have to try realpath too to avoid issues with symlinks, but we leave
# abspath because some systems like debian have the absolute path (with no
# symlinks followed) match, but the real directories in different
# locations, so need to try both cases.
return os.path.abspath(path).startswith(
os.path.abspath(parent_path)
) or os.path.realpath(path).startswith(os.path.realpath(parent_path))
@contextlib.contextmanager
def get_readable_fileobj(
name_or_obj,
encoding=None,
cache=False,
show_progress=True,
remote_timeout=None,
sources=None,
http_headers=None,
*,
use_fsspec=None,
fsspec_kwargs=None,
close_files=True,
):
"""Yield a readable, seekable file-like object from a file or URL.
This supports passing filenames, URLs, and readable file-like objects,
any of which can be compressed in gzip, bzip2 or lzma (xz) if the
appropriate compression libraries are provided by the Python installation.
Notes
-----
This function is a context manager, and should be used for example
as::
with get_readable_fileobj('file.dat') as f:
contents = f.read()
If a URL is provided and the cache is in use, the provided URL will be the
name used in the cache. The contents may already be stored in the cache
under this URL provided, they may be downloaded from this URL, or they may
be downloaded from one of the locations listed in ``sources``. See
`~download_file` for details.
Parameters
----------
name_or_obj : str or file-like
The filename of the file to access (if given as a string), or
the file-like object to access.
If a file-like object, it must be opened in binary mode.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
check the remote URL for a new version but store the result
in the cache.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name_or_obj`` starts with the Amazon S3 storage prefix ``s3://``
or the Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g. ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
close_files : bool, optional
Close the file object when exiting the context manager.
Default is `True`.
.. versionadded:: 5.2
Returns
-------
file : readable file-like
"""
# close_fds is a list of file handles created by this function
# that need to be closed. We don't want to always just close the
# returned file handle, because it may simply be the file handle
# passed in. In that case it is not the responsibility of this
# function to close it: doing so could result in a "double close"
# and an "invalid file descriptor" exception.
close_fds = []
delete_fds = []
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
# Have `use_fsspec` default to ``True`` if the user passed an Amazon S3
# or Google Cloud Storage URI.
if use_fsspec is None and _requires_fsspec(name_or_obj):
use_fsspec = True
if use_fsspec:
if not isinstance(name_or_obj, str):
raise TypeError("`name_or_obj` must be a string when `use_fsspec=True`")
if fsspec_kwargs is None:
fsspec_kwargs = {}
# name_or_obj could be an os.PathLike object
if isinstance(name_or_obj, os.PathLike):
name_or_obj = os.fspath(name_or_obj)
# Get a file object to the content
if isinstance(name_or_obj, str):
# Use fsspec to open certain cloud-hosted files (e.g., AWS S3, Google Cloud Storage)
if use_fsspec:
if not HAS_FSSPEC:
raise ModuleNotFoundError("please install `fsspec` to open this file")
import fsspec # local import because it is a niche dependency
openfileobj = fsspec.open(name_or_obj, **fsspec_kwargs)
close_fds.append(openfileobj)
fileobj = openfileobj.open()
close_fds.append(fileobj)
else:
is_url = _is_url(name_or_obj)
if is_url:
name_or_obj = download_file(
name_or_obj,
cache=cache,
show_progress=show_progress,
timeout=remote_timeout,
sources=sources,
http_headers=http_headers,
)
fileobj = io.FileIO(name_or_obj, "r")
if is_url and not cache:
delete_fds.append(fileobj)
close_fds.append(fileobj)
else:
fileobj = name_or_obj
# Check if the file object supports random access, and if not,
# then wrap it in a BytesIO buffer. It would be nicer to use a
# BufferedReader to avoid reading loading the whole file first,
# but that might not be compatible with all possible I/O classes.
if not hasattr(fileobj, "seek"):
try:
# py.path.LocalPath objects have .read() method but it uses
# text mode, which won't work. .read_binary() does, and
# surely other ducks would return binary contents when
# called like this.
# py.path.LocalPath is what comes from the legacy tmpdir fixture
# in pytest.
fileobj = io.BytesIO(fileobj.read_binary())
except AttributeError:
fileobj = io.BytesIO(fileobj.read())
# Now read enough bytes to look at signature
signature = fileobj.read(4)
fileobj.seek(0)
if signature[:3] == b"\x1f\x8b\x08": # gzip
import struct
try:
import gzip
fileobj_new = gzip.GzipFile(fileobj=fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really gzip
except (OSError, EOFError, struct.error): # invalid gzip file
fileobj.seek(0)
fileobj_new.close()
else:
fileobj_new.seek(0)
fileobj = fileobj_new
elif signature[:3] == b"BZh": # bzip2
try:
import bz2
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
try:
# bz2.BZ2File does not support file objects, only filenames, so we
# need to write the data to a temporary file
with NamedTemporaryFile("wb", delete=False) as tmp:
tmp.write(fileobj.read())
tmp.close()
fileobj_new = bz2.BZ2File(tmp.name, mode="rb")
fileobj_new.read(1) # need to check that the file is really bzip2
except OSError: # invalid bzip2 file
fileobj.seek(0)
fileobj_new.close()
# raise
else:
fileobj_new.seek(0)
close_fds.append(fileobj_new)
fileobj = fileobj_new
elif signature[:3] == b"\xfd7z": # xz
try:
import lzma
fileobj_new = lzma.LZMAFile(fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really xz
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the lzma module."
)
except (OSError, EOFError): # invalid xz file
fileobj.seek(0)
fileobj_new.close()
# should we propagate this to the caller to signal bad content?
# raise ValueError(e)
else:
fileobj_new.seek(0)
fileobj = fileobj_new
# By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
# or lzma.LZMAFile instance opened in binary mode (that is, read
# returns bytes). Now we need to, if requested, wrap it in a
# io.TextIOWrapper so read will return unicode based on the
# encoding parameter.
needs_textio_wrapper = encoding != "binary"
if needs_textio_wrapper:
# A bz2.BZ2File can not be wrapped by a TextIOWrapper,
# so we decompress it to a temporary file and then
# return a handle to that.
try:
import bz2
except ImportError:
pass
else:
if isinstance(fileobj, bz2.BZ2File):
tmp = NamedTemporaryFile("wb", delete=False)
data = fileobj.read()
tmp.write(data)
tmp.close()
delete_fds.append(tmp)
fileobj = io.FileIO(tmp.name, "r")
close_fds.append(fileobj)
fileobj = _NonClosingBufferedReader(fileobj)
fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding)
# Ensure that file is at the start - io.FileIO will for
# example not always be at the start:
# >>> import io
# >>> f = open('test.fits', 'rb')
# >>> f.read(4)
# 'SIMP'
# >>> f.seek(0)
# >>> fileobj = io.FileIO(f.fileno())
# >>> fileobj.tell()
# 4096L
fileobj.seek(0)
try:
yield fileobj
finally:
if close_files:
for fd in close_fds:
fd.close()
for fd in delete_fds:
os.remove(fd.name)
def get_file_contents(*args, **kwargs):
"""
Retrieves the contents of a filename or file-like object.
See the `get_readable_fileobj` docstring for details on parameters.
Returns
-------
object
The content of the file (as requested by ``encoding``).
"""
with get_readable_fileobj(*args, **kwargs) as f:
return f.read()
@contextlib.contextmanager
def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations for the package and
provides the file as a file-like object that reads bytes.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
fileobj : file-like
An object with the contents of the data file available via
``read`` function. Can be used as part of a ``with`` statement,
automatically closing itself after the ``with`` block.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Examples
--------
This will retrieve a data file and its contents for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('data/3d_cd.hdr',
... package='astropy.wcs.tests') as fobj:
... fcontents = fobj.read()
...
This next example would download a data file from the astropy data server
because the ``allsky/allsky_rosat.fits`` file is not present in the
source distribution. It will also save the file locally so the
next time it is accessed it won't need to be downloaded.::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
This does the same thing but does *not* cache it locally::
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_filename : returns a local name for a file containing the data
"""
datafn = get_pkg_data_path(data_name, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
with get_readable_fileobj(datafn, encoding=encoding) as fileobj:
yield fileobj
else: # remote file
with get_readable_fileobj(
conf.dataurl + data_name,
encoding=encoding,
cache=cache,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
) as fileobj:
# We read a byte to trigger any URLErrors
fileobj.read(1)
fileobj.seek(0)
yield fileobj
def get_pkg_data_filename(
data_name, package=None, show_progress=True, remote_timeout=None
):
"""
Retrieves a data file from the standard locations for the package and
provides a local filename for the data.
This function is similar to `get_pkg_data_fileobj` but returns the
file *name* instead of a readable file-like object. This means
that this function must always cache remote files locally, unlike
`get_pkg_data_fileobj`.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for the requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Returns
-------
filename : str
A file path on the local file system corresponding to the data
requested in ``data_name``.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('data/3d_cd.hdr',
... package='astropy.wcs.tests')
>>> with open(fn) as f:
... fcontents = f.read()
...
This retrieves a data file by hash either locally or from the astropy data
server::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP
>>> with open(fn) as f:
... fcontents = f.read()
...
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_fileobj : returns a file-like object with the data
"""
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
if data_name.startswith("hash/"):
# first try looking for a local version if a hash is specified
hashfn = _find_hash_fn(data_name[5:])
if hashfn is None:
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
else:
return hashfn
else:
fs_path = os.path.normpath(data_name)
datafn = get_pkg_data_path(fs_path, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
return datafn
else: # remote file
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations and returns its
contents as a bytes object.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
* A URL to some other file.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
contents : bytes
The complete contents of the file as a bytes object.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
See Also
--------
get_pkg_data_fileobj : returns a file-like object with the data
get_pkg_data_filename : returns a local name for a file containing the data
"""
with get_pkg_data_fileobj(
data_name, package=package, encoding=encoding, cache=cache
) as fd:
contents = fd.read()
return contents
def get_pkg_data_filenames(datadir, package=None, pattern="*"):
"""
Returns the path of all of the data files in a given directory
that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``.
* Remote URLs are not currently supported.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
Returns
-------
filenames : iterator of str
Paths on the local filesystem in *datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... with open(fn) as f:
... fcontents = f.read()
...
"""
path = get_pkg_data_path(datadir, package=package)
if os.path.isfile(path):
raise OSError(
"Tried to access a data directory that's actually a package data file"
)
elif os.path.isdir(path):
for filename in os.listdir(path):
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(path, filename)
else:
raise OSError("Path not found")
def get_pkg_data_fileobjs(datadir, package=None, pattern="*", encoding=None):
"""
Returns readable file objects for all of the data files in a given
directory that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``
* Remote URLs are not currently supported
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
Returns
-------
fileobjs : iterator of file object
File objects for each of the files on the local filesystem in
*datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... fcontents = fd.read()
...
"""
for fn in get_pkg_data_filenames(datadir, package=package, pattern=pattern):
with get_readable_fileobj(fn, encoding=encoding) as fd:
yield fd
def compute_hash(localfn):
"""Computes the MD5 hash for a file.
The hash for a data file is used for looking up data files in a unique
fashion. This is of particular use for tests; a test may require a
particular version of a particular file, in which case it can be accessed
via hash to get the appropriate version.
Typically, if you wish to write a test that requires a particular data
file, you will want to submit that file to the astropy data servers, and
use
e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``,
but with the hash for your file in place of the hash in the example.
Parameters
----------
localfn : str
The path to the file for which the hash should be generated.
Returns
-------
hash : str
The hex digest of the cryptographic hash for the contents of the
``localfn`` file.
"""
with open(localfn, "rb") as f:
h = hashlib.md5()
block = f.read(conf.compute_hash_block_size)
while block:
h.update(block)
block = f.read(conf.compute_hash_block_size)
return h.hexdigest()
def get_pkg_data_path(*path, package=None):
"""Get path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings for ``os.path`` joining.
package : str or None, optional, keyword-only
If specified, look for a file relative to the given package, rather
than the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
Raises
------
ImportError
Given package or module is not importable.
RuntimeError
If the local data file is outside of the package's tree.
"""
if package is None:
module = find_current_module(1, finddiff=["astropy.utils.data", "contextlib"])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, "__package__") or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if "." in module.__name__:
package = module.__name__.rpartition(".")[0]
else:
package = module.__name__
else:
package = module.__package__
else:
# package errors if it isn't a str
# so there is no need for checks in the containing if/else
module = resolve_name(package)
# module path within package
module_path = os.path.dirname(module.__file__)
full_path = os.path.join(module_path, *path)
# Check that file is inside tree.
rootpkgname = package.partition(".")[0]
rootpkg = resolve_name(rootpkgname)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(full_path, root_dir):
raise RuntimeError(
f"attempted to get a local data file outside of the {rootpkgname} tree."
)
return full_path
def _find_hash_fn(hexdigest, pkgname="astropy"):
"""
Looks for a local file by hash - returns file name if found and a valid
file, otherwise returns None.
"""
for v in cache_contents(pkgname=pkgname).values():
if compute_hash(v) == hexdigest:
return v
return None
def get_free_space_in_dir(path, unit=False):
"""
Given a path to a directory, returns the amount of free space
on that filesystem.
Parameters
----------
path : str
The path to a directory.
unit : bool or `~astropy.units.Unit`
Return the amount of free space as Quantity in the given unit,
if provided. Default is `False` for backward-compatibility.
Returns
-------
free_space : int or `~astropy.units.Quantity`
The amount of free space on the partition that the directory is on.
If ``unit=False``, it is returned as plain integer (in bytes).
"""
if not os.path.isdir(path):
raise OSError(
"Can only determine free space associated with directories, not files."
)
# Actually you can on Linux but I want to avoid code that fails
# on Windows only.
free_space = shutil.disk_usage(path).free
if unit:
from astropy import units as u
# TODO: Automatically determine best prefix to use.
if unit is True:
unit = u.byte
free_space = u.Quantity(free_space, u.byte).to(unit)
return free_space
def check_free_space_in_dir(path, size):
"""
Determines if a given directory has enough space to hold a file of
a given size.
Parameters
----------
path : str
The path to a directory.
size : int or `~astropy.units.Quantity`
A proposed filesize. If not a Quantity, assume it is in bytes.
Raises
------
OSError
There is not enough room on the filesystem.
"""
space = get_free_space_in_dir(path, unit=getattr(size, "unit", False))
if space < size:
from astropy.utils.console import human_file_size
raise OSError(
f"Not enough free space in {path} "
f"to download a {human_file_size(size)} file, "
f"only {human_file_size(space)} left"
)
class _ftptlswrapper(urllib.request.ftpwrapper):
def init(self):
self.busy = 0
self.ftp = ftplib.FTP_TLS()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
self.ftp.prot_p()
_target = "/".join(self.dirs)
self.ftp.cwd(_target)
class _FTPTLSHandler(urllib.request.FTPHandler):
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
return _ftptlswrapper(user, passwd, host, port, dirs, timeout, persistent=False)
@functools.lru_cache
def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False):
"""
Helper for building a `urllib.request.build_opener` which handles TLS/SSL.
"""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
ssl_context = dict(it for it in ssl_context) if ssl_context else {}
cert_chain = {}
if "certfile" in ssl_context:
cert_chain.update(
{
"certfile": ssl_context.pop("certfile"),
"keyfile": ssl_context.pop("keyfile", None),
"password": ssl_context.pop("password", None),
}
)
elif "password" in ssl_context or "keyfile" in ssl_context:
raise ValueError(
"passing 'keyfile' or 'password' in the ssl_context argument "
"requires passing 'certfile' as well"
)
if "cafile" not in ssl_context and certifi is not None:
ssl_context["cafile"] = certifi.where()
ssl_context = ssl.create_default_context(**ssl_context)
if allow_insecure:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
if cert_chain:
ssl_context.load_cert_chain(**cert_chain)
https_handler = urllib.request.HTTPSHandler(context=ssl_context)
if ftp_tls:
urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler)
else:
urlopener = urllib.request.build_opener(https_handler)
return urlopener
def _try_url_open(
source_url,
timeout=None,
http_headers=None,
ftp_tls=False,
ssl_context=None,
allow_insecure=False,
):
"""Helper for opening a URL while handling TLS/SSL verification issues."""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
# Always try first with a secure connection
# _build_urlopener uses lru_cache, so the ssl_context argument must be
# converted to a hashshable type (a set of 2-tuples)
ssl_context = frozenset(ssl_context.items() if ssl_context else [])
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=False
)
req = urllib.request.Request(source_url, headers=http_headers)
try:
return urlopener.open(req, timeout=timeout)
except urllib.error.URLError as exc:
reason = exc.reason
if (
isinstance(reason, ssl.SSLError)
and reason.reason == "CERTIFICATE_VERIFY_FAILED"
):
msg = (
f"Verification of TLS/SSL certificate at {source_url} "
"failed: this can mean either the server is "
"misconfigured or your local root CA certificates are "
"out-of-date; in the latter case this can usually be "
'addressed by installing the Python package "certifi" '
"(see the documentation for astropy.utils.data.download_url)"
)
if not allow_insecure:
msg += (
" or in both cases you can work around this by "
"passing allow_insecure=True, but only if you "
"understand the implications; the original error "
f"was: {reason}"
)
raise urllib.error.URLError(msg)
else:
msg += ". Re-trying with allow_insecure=True."
warn(msg, AstropyWarning)
# Try again with a new urlopener allowing insecure connections
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=True
)
return urlopener.open(req, timeout=timeout)
raise
def _download_file_from_source(
source_url,
show_progress=True,
timeout=None,
remote_url=None,
cache=False,
pkgname="astropy",
http_headers=None,
ftp_tls=None,
ssl_context=None,
allow_insecure=False,
):
from astropy.utils.console import ProgressBarOrSpinner
if not conf.allow_internet:
raise urllib.error.URLError(
f"URL {remote_url} was supposed to be downloaded but "
f"allow_internet is {conf.allow_internet}; "
"if this is unexpected check the astropy.cfg file for the option "
"allow_internet"
)
if remote_url is None:
remote_url = source_url
if http_headers is None:
http_headers = {}
if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp":
try:
return _download_file_from_source(
source_url,
show_progress=show_progress,
timeout=timeout,
remote_url=remote_url,
cache=cache,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=False,
)
except urllib.error.URLError as e:
# e.reason might not be a string, e.g. socket.gaierror
# URLError changed to report original exception in Python 3.10, 3.11 (bpo-43564)
if str(e.reason).lstrip("ftp error: ").startswith(("error_perm", "5")):
ftp_tls = True
else:
raise
with _try_url_open(
source_url,
timeout=timeout,
http_headers=http_headers,
ftp_tls=ftp_tls,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
) as remote:
info = remote.info()
try:
size = int(info["Content-Length"])
except (KeyError, ValueError, TypeError):
size = None
if size is not None:
check_free_space_in_dir(gettempdir(), size)
if cache:
dldir = _get_download_cache_loc(pkgname)
check_free_space_in_dir(dldir, size)
# If a user has overridden sys.stdout it might not have the
# isatty method, in that case assume it's not a tty
is_tty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
if show_progress and is_tty:
progress_stream = sys.stdout
else:
progress_stream = io.StringIO()
if source_url == remote_url:
dlmsg = f"Downloading {remote_url}"
else:
dlmsg = f"Downloading {remote_url} from {source_url}"
with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
with NamedTemporaryFile(
prefix=f"astropy-download-{os.getpid()}-", delete=False
) as f:
try:
bytes_read = 0
block = remote.read(conf.download_block_size)
while block:
f.write(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(conf.download_block_size)
if size is not None and bytes_read > size:
raise urllib.error.URLError(
f"File was supposed to be {size} bytes but "
f"server provides more, at least {bytes_read} "
"bytes. Download failed."
)
if size is not None and bytes_read < size:
raise urllib.error.ContentTooShortError(
f"File was supposed to be {size} bytes but we "
f"only got {bytes_read} bytes. Download failed.",
content=None,
)
except BaseException:
if os.path.exists(f.name):
try:
os.remove(f.name)
except OSError:
pass
raise
return f.name
def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {"User-Agent": conf.default_http_user_agent, "Accept": "*/*"}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
cache = False
missing_cache = (
f"Cache directory cannot be read or created ({e}), "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(
f"Cache value '{cache}' was requested but "
"'update' is the only recognized string; "
"otherwise use a boolean"
)
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (
hasattr(e, "reason")
and hasattr(e.reason, "errno")
and e.reason.errno == 8
):
e.reason.strerror = f"{e.reason.strerror}. requested URL: {remote_url}"
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
"Please include primary URL in sources if you want it to be "
"included as a valid source."
)
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}"
) from errors[sources[0]]
if cache:
try:
return import_file_to_cache(
url_key,
f_name,
remove_original=True,
replace=(cache == "update"),
pkgname=pkgname,
)
except PermissionError as e:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only ({e}), unable to import "
f"downloaded file, providing data in temporary file {f_name} "
"instead."
)
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
def is_url_in_cache(url_key, pkgname="astropy"):
"""Check if a download for ``url_key`` is in the cache.
The provided ``url_key`` will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
Parameters
----------
url_key : str
The URL retrieved
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
in_cache : bool
`True` if a download for ``url_key`` is in the cache, `False` if not
or if the cache does not exist at all.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
return False
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
return os.path.exists(filename)
def cache_total_size(pkgname="astropy"):
"""Return the total size in bytes of all files in the cache."""
size = 0
dldir = _get_download_cache_loc(pkgname=pkgname)
for root, dirs, files in os.walk(dldir):
size += sum(os.path.getsize(os.path.join(root, name)) for name in files)
return size
def _do_download_files_in_parallel(kwargs):
with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")):
with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")):
return download_file(**kwargs)
def download_files_in_parallel(
urls,
cache="update",
show_progress=True,
timeout=None,
sources=None,
multiprocessing_start_method=None,
pkgname="astropy",
):
"""Download multiple files in parallel from the given URLs.
Blocks until all files have downloaded. The result is a list of
local file paths corresponding to the given urls.
The results will be stored in the cache under the values in ``urls`` even
if they are obtained from some other location via ``sources``. See
`~download_file` for details.
Parameters
----------
urls : list of str
The URLs to retrieve.
cache : bool or "update", optional
Whether to use the cache (default is `True`). If "update",
always download the remote URLs to see if new data is available
and store the result in cache.
.. versionchanged:: 4.0
The default was changed to ``"update"`` and setting it to
``False`` will print a Warning and set it to ``"update"`` again,
because the function will not work properly without cache. Using
``True`` will work as expected.
.. versionchanged:: 3.0
The default was changed to ``True`` and setting it to ``False``
will print a Warning and set it to ``True`` again, because the
function will not work properly without cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`)
timeout : float, optional
Timeout for each individual requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
sources : dict, optional
If provided, for each URL a list of URLs to try to obtain the
file from. The result will be stored under the original URL.
For any URL in this dictionary, the original URL will *not* be
tried unless it is in this list; this is to prevent long waits
for a primary server that is known to be inaccessible at the
moment.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
paths : list of str
The local file paths corresponding to the downloaded URLs.
Notes
-----
If a URL is unreachable, the downloading will grind to a halt and the
exception will propagate upward, but an unpredictable number of
files will have been successfully downloaded and will remain in
the cache.
"""
from .console import ProgressBar
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = {}
if not cache:
# See issue #6662, on windows won't work because the files are removed
# again before they can be used. On *NIX systems it will behave as if
# cache was set to True because multiprocessing cannot insert the items
# in the list of to-be-removed files. This could be fixed, but really,
# just use the cache, with update_cache if appropriate.
warn(
"Disabling the cache does not work because of multiprocessing, "
'it will be set to ``"update"``. You may need to manually remove '
"the cached files with clear_download_cache() afterwards.",
AstropyWarning,
)
cache = "update"
if show_progress:
progress = sys.stdout
else:
progress = io.BytesIO()
# Combine duplicate URLs
combined_urls = list(set(urls))
combined_paths = ProgressBar.map(
_do_download_files_in_parallel,
[
dict(
remote_url=u,
cache=cache,
show_progress=False,
timeout=timeout,
sources=sources.get(u, None),
pkgname=pkgname,
temp_cache=astropy.config.paths.set_temp_cache._temp_path,
temp_config=astropy.config.paths.set_temp_config._temp_path,
)
for u in combined_urls
],
file=progress,
multiprocess=True,
multiprocessing_start_method=multiprocessing_start_method,
)
paths = []
for url in urls:
paths.append(combined_paths[combined_urls.index(url)])
return paths
# This is used by download_file and _deltemps to determine the files to delete
# when the interpreter exits
_tempfilestodel = []
@atexit.register
def _deltemps():
global _tempfilestodel
if _tempfilestodel is not None:
while len(_tempfilestodel) > 0:
fn = _tempfilestodel.pop()
if os.path.isfile(fn):
try:
os.remove(fn)
except OSError:
# oh well we tried
# could be held open by some process, on Windows
pass
elif os.path.isdir(fn):
try:
shutil.rmtree(fn)
except OSError:
# couldn't get rid of it, sorry
# could be held open by some process, on Windows
pass
def clear_download_cache(hashorurl=None, pkgname="astropy"):
"""Clears the data file cache by deleting the local file(s).
If a URL is provided, it will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
For the purposes of this function, a file can also be identified by a hash
of its contents or by the filename under which the data is stored (as
returned by `~download_file`, for example).
Parameters
----------
hashorurl : str or None
If None, the whole cache is cleared. Otherwise, specify
a hash for the cached file that is supposed to be deleted,
the full path to a file in the cache that should be deleted,
or a URL that should be removed from the cache if present.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
# Problem arose when trying to open the cache
# Just a warning, though
msg = "Not clearing data cache - cache inaccessible due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return
try:
if hashorurl is None:
# Optional: delete old incompatible caches too
_rmtree(dldir)
elif _is_url(hashorurl):
filepath = os.path.join(dldir, _url_to_dirname(hashorurl))
_rmtree(filepath)
else:
# Not a URL, it should be either a filename or a hash
filepath = os.path.join(dldir, hashorurl)
rp = os.path.relpath(filepath, dldir)
if rp.startswith(".."):
raise RuntimeError(
"attempted to use clear_download_cache on the path "
f"{filepath} outside the data cache directory {dldir}"
)
d, f = os.path.split(rp)
if d and f in ["contents", "url"]:
# It's a filename not the hash of a URL
# so we want to zap the directory containing the
# files "url" and "contents"
filepath = os.path.join(dldir, d)
if os.path.exists(filepath):
_rmtree(filepath)
elif len(hashorurl) == 2 * hashlib.md5().digest_size and re.match(
r"[0-9a-f]+", hashorurl
):
# It's the hash of some file contents, we have to find the right file
filename = _find_hash_fn(hashorurl)
if filename is not None:
clear_download_cache(filename)
except OSError as e:
msg = "Not clearing data from cache - problem arose "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
def _get_download_cache_loc(pkgname="astropy"):
"""Finds the path to the cache directory and makes them if they don't exist.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
datadir : str
The path to the data cache directory.
"""
try:
datadir = os.path.join(
astropy.config.paths.get_cache_dir(pkgname), "download", "url"
)
if not os.path.exists(datadir):
try:
os.makedirs(datadir)
except OSError:
if not os.path.exists(datadir):
raise
elif not os.path.isdir(datadir):
raise OSError(f"Data cache directory {datadir} is not a directory")
return datadir
except OSError as e:
msg = "Remote data cache could not be accessed due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
raise
def _url_to_dirname(url):
if not _is_url(url):
raise ValueError(f"Malformed URL: '{url}'")
# Make domain names case-insensitive
# Also makes the http:// case-insensitive
urlobj = list(urllib.parse.urlsplit(url))
urlobj[1] = urlobj[1].lower()
if urlobj[0].lower() in ["http", "https"] and urlobj[1] and urlobj[2] == "":
urlobj[2] = "/"
url_c = urllib.parse.urlunsplit(urlobj)
return hashlib.md5(url_c.encode("utf-8")).hexdigest()
class ReadOnlyDict(dict):
def __setitem__(self, key, value):
raise TypeError("This object is read-only.")
_NOTHING = ReadOnlyDict({})
class CacheDamaged(ValueError):
"""Record the URL or file that was a problem.
Using clear_download_cache on the .bad_file or .bad_url attribute,
whichever is not None, should resolve this particular problem.
"""
def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs):
super().__init__(*args, **kwargs)
self.bad_urls = bad_urls if bad_urls is not None else []
self.bad_files = bad_files if bad_files is not None else []
def check_download_cache(pkgname="astropy"):
"""Do a consistency check on the cache.
.. note::
Since v5.0, this function no longer returns anything.
Because the cache is shared by all versions of ``astropy`` in all virtualenvs
run by your user, possibly concurrently, it could accumulate problems.
This could lead to hard-to-debug problems or wasted space. This function
detects a number of incorrect conditions, including nonexistent files that
are indexed, files that are indexed but in the wrong place, and, if you
request it, files whose content does not match the hash that is indexed.
This function also returns a list of non-indexed files. A few will be
associated with the shelve object; their exact names depend on the backend
used but will probably be based on ``urlmap``. The presence of other files
probably indicates that something has gone wrong and inaccessible files
have accumulated in the cache. These can be removed with
:func:`clear_download_cache`, either passing the filename returned here, or
with no arguments to empty the entire cache and return it to a
reasonable, if empty, state.
Parameters
----------
pkgname : str, optional
The package name to use to locate the download cache, i.e., for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Raises
------
`~astropy.utils.data.CacheDamaged`
To indicate a problem with the cache contents; the exception contains
a ``.bad_files`` attribute containing a set of filenames to allow the
user to use :func:`clear_download_cache` to remove the offending items.
OSError, RuntimeError
To indicate some problem with the cache structure. This may need a full
:func:`clear_download_cache` to resolve, or may indicate some kind of
misconfiguration.
"""
bad_files = set()
messages = set()
dldir = _get_download_cache_loc(pkgname=pkgname)
with os.scandir(dldir) as it:
for entry in it:
f = os.path.abspath(os.path.join(dldir, entry.name))
if entry.name.startswith("rmtree-"):
if f not in _tempfilestodel:
bad_files.add(f)
messages.add(f"Cache entry {entry.name} not scheduled for deletion")
elif entry.is_dir():
for sf in os.listdir(f):
if sf in ["url", "contents"]:
continue
sf = os.path.join(f, sf)
bad_files.add(sf)
messages.add(f"Unexpected file f{sf}")
urlf = os.path.join(f, "url")
url = None
if not os.path.isfile(urlf):
bad_files.add(urlf)
messages.add(f"Problem with URL file f{urlf}")
else:
url = get_file_contents(urlf, encoding="utf-8")
if not _is_url(url):
bad_files.add(f)
messages.add(f"Malformed URL: {url}")
else:
hashname = _url_to_dirname(url)
if entry.name != hashname:
bad_files.add(f)
messages.add(
f"URL hashes to {hashname} but is stored in"
f" {entry.name}"
)
if not os.path.isfile(os.path.join(f, "contents")):
bad_files.add(f)
if url is None:
messages.add(f"Hash {entry.name} is missing contents")
else:
messages.add(
f"URL {url} with hash {entry.name} is missing contents"
)
else:
bad_files.add(f)
messages.add(f"Left-over non-directory {f} in cache")
if bad_files:
raise CacheDamaged("\n".join(messages), bad_files=bad_files)
@contextlib.contextmanager
def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None):
"""Temporary directory context manager.
This will not raise an exception if the temporary directory goes away
before it's supposed to be deleted. Specifically, what is deleted will
be the directory *name* produced; if no such directory exists, no
exception will be raised.
It would be safer to delete it only if it's really the same directory
- checked by file descriptor - and if it's still called the same thing.
But that opens a platform-specific can of worms.
It would also be more robust to use ExitStack and TemporaryDirectory,
which is more aggressive about removing readonly things.
"""
d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield d
finally:
try:
shutil.rmtree(d)
except OSError:
pass
def _rmtree(path, replace=None):
"""More-atomic rmtree. Ignores missing directory."""
with TemporaryDirectory(
prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path))
) as d:
try:
os.rename(path, os.path.join(d, "to-zap"))
except FileNotFoundError:
pass
except PermissionError:
warn(
CacheMissingWarning(
f"Unable to remove directory {path} because a file in it "
"is in use and you are on Windows",
path,
)
)
raise
if replace is not None:
try:
os.rename(replace, path)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
def import_file_to_cache(
url_key, filename, remove_original=False, pkgname="astropy", *, replace=True
):
"""Import the on-disk file specified by filename to the cache.
The provided ``url_key`` will be the name used in the cache. The file
should contain the contents of this URL, at least notionally (the URL may
be temporarily or permanently unavailable). It is using ``url_key`` that
users will request these contents from the cache. See :func:`download_file` for
details.
If ``url_key`` already exists in the cache, it will be updated to point to
these imported contents, and its old contents will be deleted from the
cache.
Parameters
----------
url_key : str
The key to index the file under. This should probably be
the URL where the file was located, though if you obtained
it from a mirror you should use the URL of the primary
location.
filename : str
The file whose contents you want to import.
remove_original : bool
Whether to remove the original file (``filename``) once import is
complete.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
replace : boolean, optional
Whether or not to replace an existing object in the cache, if one exists.
If replacement is not requested but the object exists, silently pass.
"""
cache_dir = _get_download_cache_loc(pkgname=pkgname)
cache_dirname = _url_to_dirname(url_key)
local_dirname = os.path.join(cache_dir, cache_dirname)
local_filename = os.path.join(local_dirname, "contents")
with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir:
temp_filename = os.path.join(temp_dir, "contents")
# Make sure we're on the same filesystem
# This will raise an exception if the url_key doesn't turn into a valid filename
shutil.copy(filename, temp_filename)
with open(os.path.join(temp_dir, "url"), "w", encoding="utf-8") as f:
f.write(url_key)
if replace:
_rmtree(local_dirname, replace=temp_dir)
else:
try:
os.rename(temp_dir, local_dirname)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
if remove_original:
os.remove(filename)
return os.path.abspath(local_filename)
def get_cached_urls(pkgname="astropy"):
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
The listed URLs are the keys programs should use to access the file
contents, but those contents may have actually been obtained from a mirror.
See `~download_file` for details.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
cached_urls : list
List of cached URLs.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
return sorted(cache_contents(pkgname=pkgname).keys())
def cache_contents(pkgname="astropy"):
"""Obtain a dict mapping cached URLs to filenames.
This dictionary is a read-only snapshot of the state of the cache when this
function was called. If other processes are actively working with the
cache, it is possible for them to delete files that are listed in this
dictionary. Use with some caution if you are working on a system that is
busy with many running astropy processes, although the same issues apply to
most functions in this module.
"""
r = {}
try:
dldir = _get_download_cache_loc(pkgname=pkgname)
except OSError:
return _NOTHING
with os.scandir(dldir) as it:
for entry in it:
if entry.is_dir:
url = get_file_contents(
os.path.join(dldir, entry.name, "url"), encoding="utf-8"
)
r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents"))
return ReadOnlyDict(r)
def export_download_cache(
filename_or_obj, urls=None, overwrite=False, pkgname="astropy"
):
"""Exports the cache contents as a ZIP file.
Parameters
----------
filename_or_obj : str or file-like
Where to put the created ZIP file. Must be something the zipfile
module can write to.
urls : iterable of str or None
The URLs to include in the exported cache. The default is all
URLs currently in the cache. If a URL is included in this list
but is not currently in the cache, a KeyError will be raised.
To ensure that all are in the cache use `~download_file`
or `~download_files_in_parallel`.
overwrite : bool, optional
If filename_or_obj is a filename that exists, it will only be
overwritten if this is True.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
import_download_cache : import the contents of such a ZIP file
import_file_to_cache : import a single file directly
"""
if urls is None:
urls = get_cached_urls(pkgname)
with zipfile.ZipFile(filename_or_obj, "w" if overwrite else "x") as z:
for u in urls:
fn = download_file(u, cache=True, sources=[], pkgname=pkgname)
# Do not use os.path.join because ZIP files want
# "/" on all platforms
z_fn = urllib.parse.quote(u, safe="")
z.write(fn, z_fn)
def import_download_cache(
filename_or_obj, urls=None, update_cache=False, pkgname="astropy"
):
"""Imports the contents of a ZIP file into the cache.
Each member of the ZIP file should be named by a quoted version of the
URL whose contents it stores. These names are decoded with
:func:`~urllib.parse.unquote`.
Parameters
----------
filename_or_obj : str or file-like
Where the stored ZIP file is. Must be something the :mod:`~zipfile`
module can read from.
urls : set of str or list of str or None
The URLs to import from the ZIP file. The default is all
URLs in the file.
update_cache : bool, optional
If True, any entry in the ZIP file will overwrite the value in the
cache; if False, leave untouched any entry already in the cache.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
export_download_cache : export the contents the cache to of such a ZIP file
import_file_to_cache : import a single file directly
"""
with zipfile.ZipFile(filename_or_obj, "r") as z, TemporaryDirectory() as d:
for i, zf in enumerate(z.infolist()):
url = urllib.parse.unquote(zf.filename)
# FIXME(aarchiba): do we want some kind of validation on this URL?
# urllib.parse might do something sensible...but what URLs might
# they have?
# is_url in this file is probably a good check, not just here
# but throughout this file.
if urls is not None and url not in urls:
continue
if not update_cache and is_url_in_cache(url, pkgname=pkgname):
continue
f_temp_name = os.path.join(d, str(i))
with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp:
block = f_zip.read(conf.download_block_size)
while block:
f_temp.write(block)
block = f_zip.read(conf.download_block_size)
import_file_to_cache(
url, f_temp_name, remove_original=True, pkgname=pkgname
)
|
7c6b22da5d2ef3aeb6fb35f7aaf66fdf7a45f3ce802fc35c9e306f09614a2ce5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A "grab bag" of relatively small general-purpose utilities that don't have
a clear module/package to live in.
"""
import abc
import contextlib
import difflib
import inspect
import json
import locale
import os
import re
import signal
import sys
import threading
import traceback
import unicodedata
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
from astropy.utils.decorators import deprecated
__all__ = [
"isiterable",
"silence",
"format_exception",
"NumpyRNGContext",
"find_api_page",
"is_path_hidden",
"walk_skip_hidden",
"JsonCustomEncoder",
"indent",
"dtype_bytes_or_chars",
"OrderedDescriptor",
"OrderedDescriptorContainer",
]
# Because they are deprecated.
__doctest_skip__ = ["OrderedDescriptor", "OrderedDescriptorContainer"]
NOT_OVERWRITING_MSG = (
"File {} already exists. If you mean to replace it "
'then use the argument "overwrite=True".'
)
# A useful regex for tests.
_NOT_OVERWRITING_MSG_MATCH = (
r"File .* already exists\. If you mean to "
r"replace it then use the argument "
r'"overwrite=True"\.'
)
def isiterable(obj):
"""Returns `True` if the given object is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = "\n".join(" " * (width * shift) + l if l else "" for l in s.splitlines())
if s[-1] == "\n":
indented += "\n"
return indented
class _DummyFile:
"""A noop writeable object."""
def write(self, s):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
yield
sys.stdout = old_stdout
sys.stderr = old_stderr
def format_exception(msg, *args, **kwargs):
"""Fill in information about the exception that occurred.
Given an exception message string, uses new-style formatting arguments
``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in
information about the exception that occurred. For example:
try:
1/0
except:
raise ZeroDivisionError(
format_except('A divide by zero occurred in {filename} at '
'line {lineno} of function {func}.'))
Any additional positional or keyword arguments passed to this function are
also used to format the message.
.. note::
This uses `sys.exc_info` to gather up the information needed to fill
in the formatting arguments. Since `sys.exc_info` is not carried
outside a handled exception, it's not wise to use this
outside of an ``except`` clause - if it is, this will substitute
'<unknown>' for the 4 formatting arguments.
"""
tb = traceback.extract_tb(sys.exc_info()[2], limit=1)
if len(tb) > 0:
filename, lineno, func, text = tb[0]
else:
filename = lineno = func = text = "<unknown>"
return msg.format(
*args, filename=filename, lineno=lineno, func=func, text=text, **kwargs
)
class NumpyRNGContext:
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
from numpy import random
self.startstate = random.get_state()
random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
from numpy import random
random.set_state(self.startstate)
def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
from zlib import decompress
from astropy.utils.data import get_readable_fileobj
if (
not isinstance(obj, str)
and hasattr(obj, "__module__")
and hasattr(obj, "__name__")
):
obj = obj.__module__ + "." + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from astropy import version
if version.release:
version = "v" + version.version
else:
version = "dev"
if "://" in version:
if version.endswith("index.html"):
baseurl = version[:-10]
elif version.endswith("/"):
baseurl = version
else:
baseurl = version + "/"
elif version == "dev" or version == "latest":
baseurl = "http://devdocs.astropy.org/"
else:
baseurl = f"https://docs.astropy.org/en/{version}/"
# Custom request headers; see
# https://github.com/astropy/astropy/issues/8990
url = baseurl + "objects.inv"
headers = {"User-Agent": f"Astropy/{version}"}
with get_readable_fileobj(
url, encoding="binary", remote_timeout=timeout, http_headers=headers
) as uf:
oiread = uf.read()
# need to first read/remove the first four lines, which have info before
# the compressed section with the actual object inventory
idx = -1
headerlines = []
for _ in range(4):
oldidx = idx
idx = oiread.index(b"\n", oldidx + 1)
headerlines.append(oiread[(oldidx + 1) : idx].decode("utf-8"))
# intersphinx version line, project name, and project version
ivers, proj, vers, compr = headerlines
if "The remainder of this file is compressed using zlib" not in compr:
raise ValueError(
f"The file downloaded from {baseurl}objects.inv does not seem to be"
"the usual Sphinx objects.inv format. Maybe it "
"has changed?"
)
compressed = oiread[(idx + 1) :]
decompressed = decompress(compressed).decode("utf-8")
resurl = None
for l in decompressed.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith("$"):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError(f"Could not find the docs for the object {obj}")
elif openinbrowser:
webbrowser.open(resurl)
return resurl
def signal_number_to_name(signum):
"""
Given an OS signal number, returns a signal name. If the signal
number is unknown, returns ``'UNKNOWN'``.
"""
# Since these numbers and names are platform specific, we use the
# builtin signal module and build a reverse mapping.
signal_to_name_map = {
k: v for v, k in signal.__dict__.items() if v.startswith("SIG")
}
return signal_to_name_map.get(signum, "UNKNOWN")
if sys.platform == "win32":
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
https://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection.
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
result = bool(attrs & 2) and attrs != -1
except AttributeError:
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b".")
else:
is_dotted = name.startswith(".")
return is_dotted or _has_hidden_attribute(filepath)
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter ``topdown`` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror, followlinks=followlinks
):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
class JsonCustomEncoder(json.JSONEncoder):
"""Support for data types that JSON default encoder
does not do.
This includes:
* Numpy array or number
* Complex number
* Set
* Bytes
* astropy.UnitBase
* astropy.Quantity
Examples
--------
>>> import json
>>> import numpy as np
>>> from astropy.utils.misc import JsonCustomEncoder
>>> json.dumps(np.arange(3), cls=JsonCustomEncoder)
'[0, 1, 2]'
"""
def default(self, obj):
import numpy as np
from astropy import units as u
if isinstance(obj, u.Quantity):
return dict(value=obj.value, unit=obj.unit.to_string())
if isinstance(obj, (np.number, np.ndarray)):
return obj.tolist()
elif isinstance(obj, complex):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)):
if obj == u.dimensionless_unscaled:
obj = "dimensionless_unit"
else:
return obj.to_string()
return json.JSONEncoder.default(self, obj)
def strip_accents(s):
"""
Remove accents from a Unicode string.
This helps with matching "ångström" to "angstrom", for example.
"""
return "".join(
c for c in unicodedata.normalize("NFD", s) if unicodedata.category(c) != "Mn"
)
def did_you_mean(s, candidates, n=3, cutoff=0.8, fix=None):
"""
When a string isn't found in a set of candidates, we can be nice
to provide a list of alternatives in the exception. This
convenience function helps to format that part of the exception.
Parameters
----------
s : str
candidates : sequence of str or dict of str keys
n : int
The maximum number of results to include. See
`difflib.get_close_matches`.
cutoff : float
In the range [0, 1]. Possibilities that don't score at least
that similar to word are ignored. See
`difflib.get_close_matches`.
fix : callable
A callable to modify the results after matching. It should
take a single string and return a sequence of strings
containing the fixed matches.
Returns
-------
message : str
Returns the string "Did you mean X, Y, or Z?", or the empty
string if no alternatives were found.
"""
if isinstance(s, str):
s = strip_accents(s)
s_lower = s.lower()
# Create a mapping from the lower case name to all capitalization
# variants of that name.
candidates_lower = {}
for candidate in candidates:
candidate_lower = candidate.lower()
candidates_lower.setdefault(candidate_lower, [])
candidates_lower[candidate_lower].append(candidate)
# The heuristic here is to first try "singularizing" the word. If
# that doesn't match anything use difflib to find close matches in
# original, lower and upper case.
if s_lower.endswith("s") and s_lower[:-1] in candidates_lower:
matches = [s_lower[:-1]]
else:
matches = difflib.get_close_matches(
s_lower, candidates_lower, n=n, cutoff=cutoff
)
if len(matches):
capitalized_matches = set()
for match in matches:
capitalized_matches.update(candidates_lower[match])
matches = capitalized_matches
if fix is not None:
mapped_matches = []
for match in matches:
mapped_matches.extend(fix(match))
matches = mapped_matches
matches = list(set(matches))
matches = sorted(matches)
if len(matches) == 1:
matches = matches[0]
else:
matches = ", ".join(matches[:-1]) + " or " + matches[-1]
return f"Did you mean {matches}?"
return ""
_ordered_descriptor_deprecation_message = """\
The {func} {obj_type} is deprecated and may be removed in a future version.
You can replace its functionality with a combination of the
__init_subclass__ and __set_name__ magic methods introduced in Python 3.6.
See https://github.com/astropy/astropy/issues/11094 for recipes on how to
replicate their functionality.
"""
@deprecated("4.3", _ordered_descriptor_deprecation_message)
class OrderedDescriptor(metaclass=abc.ABCMeta):
"""
Base class for descriptors whose order in the class body should be
preserved. Intended for use in concert with the
`OrderedDescriptorContainer` metaclass.
Subclasses of `OrderedDescriptor` must define a value for a class attribute
called ``_class_attribute_``. This is the name of a class attribute on the
*container* class for these descriptors, which will be set to an
`~collections.OrderedDict` at class creation time. This
`~collections.OrderedDict` will contain a mapping of all class attributes
that were assigned instances of the `OrderedDescriptor` subclass, to the
instances themselves. See the documentation for
`OrderedDescriptorContainer` for a concrete example.
Optionally, subclasses of `OrderedDescriptor` may define a value for a
class attribute called ``_name_attribute_``. This should be the name of
an attribute on instances of the subclass. When specified, during
creation of a class containing these descriptors, the name attribute on
each instance will be set to the name of the class attribute it was
assigned to on the class.
.. note::
Although this class is intended for use with *descriptors* (i.e.
classes that define any of the ``__get__``, ``__set__``, or
``__delete__`` magic methods), this base class is not itself a
descriptor, and technically this could be used for classes that are
not descriptors too. However, use with descriptors is the original
intended purpose.
"""
# This id increments for each OrderedDescriptor instance created, so they
# are always ordered in the order they were created. Class bodies are
# guaranteed to be executed from top to bottom. Not sure if this is
# thread-safe though.
_nextid = 1
@property
@abc.abstractmethod
def _class_attribute_(self):
"""
Subclasses should define this attribute to the name of an attribute on
classes containing this subclass. That attribute will contain the mapping
of all instances of that `OrderedDescriptor` subclass defined in the class
body. If the same descriptor needs to be used with different classes,
each with different names of this attribute, multiple subclasses will be
needed.
"""
_name_attribute_ = None
"""
Subclasses may optionally define this attribute to specify the name of an
attribute on instances of the class that should be filled with the
instance's attribute name at class creation time.
"""
def __init__(self, *args, **kwargs):
# The _nextid attribute is shared across all subclasses so that
# different subclasses of OrderedDescriptors can be sorted correctly
# between themselves
self.__order = OrderedDescriptor._nextid
OrderedDescriptor._nextid += 1
super().__init__()
def __lt__(self, other):
"""
Defined for convenient sorting of `OrderedDescriptor` instances, which
are defined to sort in their creation order.
"""
if isinstance(self, OrderedDescriptor) and isinstance(other, OrderedDescriptor):
try:
return self.__order < other.__order
except AttributeError:
raise RuntimeError(
f"Could not determine ordering for {self} and {other}; at least "
"one of them is not calling super().__init__ in its "
"__init__."
)
else:
return NotImplemented
@deprecated("4.3", _ordered_descriptor_deprecation_message)
class OrderedDescriptorContainer(type):
"""
Classes should use this metaclass if they wish to use `OrderedDescriptor`
attributes, which are class attributes that "remember" the order in which
they were defined in the class body.
Every subclass of `OrderedDescriptor` has an attribute called
``_class_attribute_``. For example, if we have
.. code:: python
class ExampleDecorator(OrderedDescriptor):
_class_attribute_ = '_examples_'
Then when a class with the `OrderedDescriptorContainer` metaclass is
created, it will automatically be assigned a class attribute ``_examples_``
referencing an `~collections.OrderedDict` containing all instances of
``ExampleDecorator`` defined in the class body, mapped to by the names of
the attributes they were assigned to.
When subclassing a class with this metaclass, the descriptor dict (i.e.
``_examples_`` in the above example) will *not* contain descriptors
inherited from the base class. That is, this only works by default with
decorators explicitly defined in the class body. However, the subclass
*may* define an attribute ``_inherit_decorators_`` which lists
`OrderedDescriptor` classes that *should* be added from base classes.
See the examples section below for an example of this.
Examples
--------
>>> from astropy.utils import OrderedDescriptor, OrderedDescriptorContainer
>>> class TypedAttribute(OrderedDescriptor):
... \"\"\"
... Attributes that may only be assigned objects of a specific type,
... or subclasses thereof. For some reason we care about their order.
... \"\"\"
...
... _class_attribute_ = 'typed_attributes'
... _name_attribute_ = 'name'
... # A default name so that instances not attached to a class can
... # still be repr'd; useful for debugging
... name = '<unbound>'
...
... def __init__(self, type):
... # Make sure not to forget to call the super __init__
... super().__init__()
... self.type = type
...
... def __get__(self, obj, objtype=None):
... if obj is None:
... return self
... if self.name in obj.__dict__:
... return obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __set__(self, obj, value):
... if not isinstance(value, self.type):
... raise ValueError('{0}.{1} must be of type {2!r}'.format(
... obj.__class__.__name__, self.name, self.type))
... obj.__dict__[self.name] = value
...
... def __delete__(self, obj):
... if self.name in obj.__dict__:
... del obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __repr__(self):
... if isinstance(self.type, tuple) and len(self.type) > 1:
... typestr = '({0})'.format(
... ', '.join(t.__name__ for t in self.type))
... else:
... typestr = self.type.__name__
... return '<{0}(name={1}, type={2})>'.format(
... self.__class__.__name__, self.name, typestr)
...
Now let's create an example class that uses this ``TypedAttribute``::
>>> class Point2D(metaclass=OrderedDescriptorContainer):
... x = TypedAttribute((float, int))
... y = TypedAttribute((float, int))
...
... def __init__(self, x, y):
... self.x, self.y = x, y
...
>>> p1 = Point2D(1.0, 2.0)
>>> p1.x
1.0
>>> p1.y
2.0
>>> p2 = Point2D('a', 'b') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Point2D.x must be of type (float, int>)
We see that ``TypedAttribute`` works more or less as advertised, but
there's nothing special about that. Let's see what
`OrderedDescriptorContainer` did for us::
>>> Point2D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>)])
If we create a subclass, it does *not* by default add inherited descriptors
to ``typed_attributes``::
>>> class Point3D(Point2D):
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('z', <TypedAttribute(name=z, type=(float, int))>)])
However, if we specify ``_inherit_descriptors_`` from ``Point2D`` then
it will do so::
>>> class Point3D(Point2D):
... _inherit_descriptors_ = (TypedAttribute,)
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>),
('z', <TypedAttribute(name=z, type=(float, int))>)])
.. note::
Hopefully it is clear from these examples that this construction
also allows a class of type `OrderedDescriptorContainer` to use
multiple different `OrderedDescriptor` classes simultaneously.
"""
_inherit_descriptors_ = ()
def __init__(cls, cls_name, bases, members):
descriptors = defaultdict(list)
seen = set()
inherit_descriptors = ()
descr_bases = {}
for mro_cls in cls.__mro__:
for name, obj in mro_cls.__dict__.items():
if name in seen:
# Checks if we've already seen an attribute of the given
# name (if so it will override anything of the same name in
# any base class)
continue
seen.add(name)
if not isinstance(obj, OrderedDescriptor) or (
inherit_descriptors and not isinstance(obj, inherit_descriptors)
):
# The second condition applies when checking any
# subclasses, to see if we can inherit any descriptors of
# the given type from subclasses (by default inheritance is
# disabled unless the class has _inherit_descriptors_
# defined)
continue
if obj._name_attribute_ is not None:
setattr(obj, obj._name_attribute_, name)
# Don't just use the descriptor's class directly; instead go
# through its MRO and find the class on which _class_attribute_
# is defined directly. This way subclasses of some
# OrderedDescriptor *may* override _class_attribute_ and have
# its own _class_attribute_, but by default all subclasses of
# some OrderedDescriptor are still grouped together
# TODO: It might be worth clarifying this in the docs
if obj.__class__ not in descr_bases:
for obj_cls_base in obj.__class__.__mro__:
if "_class_attribute_" in obj_cls_base.__dict__:
descr_bases[obj.__class__] = obj_cls_base
descriptors[obj_cls_base].append((obj, name))
break
else:
# Make sure to put obj first for sorting purposes
obj_cls_base = descr_bases[obj.__class__]
descriptors[obj_cls_base].append((obj, name))
if not getattr(mro_cls, "_inherit_descriptors_", False):
# If _inherit_descriptors_ is undefined then we don't inherit
# any OrderedDescriptors from any of the base classes, and
# there's no reason to continue through the MRO
break
else:
inherit_descriptors = mro_cls._inherit_descriptors_
for descriptor_cls, instances in descriptors.items():
instances.sort()
instances = OrderedDict((key, value) for value, key in instances)
setattr(cls, descriptor_cls._class_attribute_, instances)
super().__init__(cls_name, bases, members)
LOCALE_LOCK = threading.Lock()
@contextmanager
def _set_locale(name):
"""
Context manager to temporarily set the locale to ``name``.
An example is setting locale to "C" so that the C strtod()
function will use "." as the decimal point to enable consistent
numerical string parsing.
Note that one cannot nest multiple _set_locale() context manager
statements as this causes a threading lock.
This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale.
Parameters
==========
name : str
Locale name, e.g. "C" or "fr_FR".
"""
name = str(name)
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
if saved == name:
# Don't do anything if locale is already the requested locale
yield
else:
try:
locale.setlocale(locale.LC_ALL, name)
yield
finally:
locale.setlocale(locale.LC_ALL, saved)
set_locale = deprecated("4.0")(_set_locale)
set_locale.__doc__ = """Deprecated version of :func:`_set_locale` above.
See https://github.com/astropy/astropy/issues/9196
"""
def dtype_bytes_or_chars(dtype):
"""
Parse the number out of a dtype.str value like '<U5' or '<f8'.
See #5819 for discussion on the need for this function for getting
the number of characters corresponding to a string dtype.
Parameters
----------
dtype : numpy dtype object
Input dtype
Returns
-------
bytes_or_chars : int or None
Bits (for numeric types) or characters (for string types)
"""
match = re.search(r"(\d+)$", dtype.str)
out = int(match.group(1)) if match else None
return out
def _hungry_for(option): # pragma: no cover
"""
Open browser loaded with ``option`` options near you.
*Disclaimers: Payments not included. Astropy is not
responsible for any liability from using this function.*
.. note:: Accuracy depends on your browser settings.
"""
import webbrowser
webbrowser.open(f"https://www.google.com/search?q={option}+near+me")
def pizza(): # pragma: no cover
"""``/pizza``."""
_hungry_for("pizza")
def coffee(is_adam=False, is_brigitta=False): # pragma: no cover
"""``/coffee``."""
if is_adam and is_brigitta:
raise ValueError("There can be only one!")
if is_adam:
option = "fresh+third+wave+coffee"
elif is_brigitta:
option = "decent+espresso"
else:
option = "coffee"
_hungry_for(option)
|
404b70fa52a57082befafcba5ee29f507e5f8e987f4d14de53282f4f3be6fea1 | """
Normalization class for Matplotlib that can be used to produce
colorbars.
"""
import inspect
import numpy as np
from numpy import ma
from .interval import (
AsymmetricPercentileInterval,
BaseInterval,
ManualInterval,
MinMaxInterval,
PercentileInterval,
)
from .stretch import (
AsinhStretch,
BaseStretch,
LinearStretch,
LogStretch,
PowerStretch,
SinhStretch,
SqrtStretch,
)
try:
import matplotlib # noqa: F401
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
except ImportError:
class Normalize:
def __init__(self, *args, **kwargs):
raise ImportError("matplotlib is required in order to use this class.")
__all__ = ["ImageNormalize", "simple_norm", "imshow_norm"]
__doctest_requires__ = {"*": ["matplotlib"]}
class ImageNormalize(Normalize):
"""
Normalization class to be used with Matplotlib.
Parameters
----------
data : ndarray, optional
The image array. This input is used only if ``interval`` is
also input. ``data`` and ``interval`` are used to compute the
vmin and/or vmax values only if ``vmin`` or ``vmax`` are not
input.
interval : `~astropy.visualization.BaseInterval` subclass instance, optional
The interval object to apply to the input ``data`` to determine
the ``vmin`` and ``vmax`` values. This input is used only if
``data`` is also input. ``data`` and ``interval`` are used to
compute the vmin and/or vmax values only if ``vmin`` or ``vmax``
are not input.
vmin, vmax : float, optional
The minimum and maximum levels to show for the data. The
``vmin`` and ``vmax`` inputs override any calculated values from
the ``interval`` and ``data`` inputs.
stretch : `~astropy.visualization.BaseStretch` subclass instance
The stretch object to apply to the data. The default is
`~astropy.visualization.LinearStretch`.
clip : bool, optional
If `True`, data values outside the [0:1] range are clipped to
the [0:1] range.
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in the
input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value < 0).
If `None`, then NaN values are not replaced. This keyword has
no effect if ``clip=True``.
"""
def __init__(
self,
data=None,
interval=None,
vmin=None,
vmax=None,
stretch=LinearStretch(),
clip=False,
invalid=-1.0,
):
# this super call checks for matplotlib
super().__init__(vmin=vmin, vmax=vmax, clip=clip)
self.vmin = vmin
self.vmax = vmax
if stretch is None:
raise ValueError("stretch must be input")
if not isinstance(stretch, BaseStretch):
raise TypeError("stretch must be an instance of a BaseStretch subclass")
self.stretch = stretch
if interval is not None and not isinstance(interval, BaseInterval):
raise TypeError("interval must be an instance of a BaseInterval subclass")
self.interval = interval
self.inverse_stretch = stretch.inverse
self.clip = clip
self.invalid = invalid
# Define vmin and vmax if not None and data was input
if data is not None:
self._set_limits(data)
def _set_limits(self, data):
if self.vmin is not None and self.vmax is not None:
return
# Define vmin and vmax from the interval class if not None
if self.interval is None:
if self.vmin is None:
self.vmin = np.min(data[np.isfinite(data)])
if self.vmax is None:
self.vmax = np.max(data[np.isfinite(data)])
else:
_vmin, _vmax = self.interval.get_limits(data)
if self.vmin is None:
self.vmin = _vmin
if self.vmax is None:
self.vmax = _vmax
def __call__(self, values, clip=None, invalid=None):
"""
Transform values using this normalization.
Parameters
----------
values : array-like
The input values.
clip : bool, optional
If `True`, values outside the [0:1] range are clipped to the
[0:1] range. If `None` then the ``clip`` value from the
`ImageNormalize` instance is used (the default of which is
`False`).
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value <
0). If `None`, then the `ImageNormalize` instance value is
used. This keyword has no effect if ``clip=True``.
"""
if clip is None:
clip = self.clip
if invalid is None:
invalid = self.invalid
if isinstance(values, ma.MaskedArray):
if clip:
mask = False
else:
mask = values.mask
values = values.filled(self.vmax)
else:
mask = False
# Make sure scalars get broadcast to 1-d
if np.isscalar(values):
values = np.array([values], dtype=float)
else:
# copy because of in-place operations after
values = np.array(values, copy=True, dtype=float)
# Define vmin and vmax if not None
self._set_limits(values)
# Normalize based on vmin and vmax
np.subtract(values, self.vmin, out=values)
np.true_divide(values, self.vmax - self.vmin, out=values)
# Clip to the 0 to 1 range
if clip:
values = np.clip(values, 0.0, 1.0, out=values)
# Stretch values
if self.stretch._supports_invalid_kw:
values = self.stretch(values, out=values, clip=False, invalid=invalid)
else:
values = self.stretch(values, out=values, clip=False)
# Convert to masked array for matplotlib
return ma.array(values, mask=mask)
def inverse(self, values, invalid=None):
# Find unstretched values in range 0 to 1
if self.inverse_stretch._supports_invalid_kw:
values_norm = self.inverse_stretch(values, clip=False, invalid=invalid)
else:
values_norm = self.inverse_stretch(values, clip=False)
# Scale to original range
return values_norm * (self.vmax - self.vmin) + self.vmin
def simple_norm(
data,
stretch="linear",
power=1.0,
asinh_a=0.1,
min_cut=None,
max_cut=None,
min_percent=None,
max_percent=None,
percent=None,
clip=False,
log_a=1000,
invalid=-1.0,
sinh_a=0.3,
):
"""
Return a Normalization class that can be used for displaying images
with Matplotlib.
This function enables only a subset of image stretching functions
available in `~astropy.visualization.mpl_normalize.ImageNormalize`.
This function is used by the
``astropy.visualization.scripts.fits2bitmap`` script.
Parameters
----------
data : ndarray
The image array.
stretch : {'linear', 'sqrt', 'power', log', 'asinh', 'sinh'}, optional
The stretch function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
clip : bool, optional
If `True`, data values outside the [0:1] range are clipped to
the [0:1] range.
log_a : float, optional
The log index for ``stretch='log'``. The default is 1000.
invalid : None or float, optional
Value to assign NaN values generated by the normalization. NaNs
in the input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value < 0).
If `None`, then NaN values are not replaced. This keyword has
no effect if ``clip=True``.
sinh_a : float, optional
The scaling parameter for ``stretch='sinh'``. The default is
0.3.
Returns
-------
result : `ImageNormalize` instance
An `ImageNormalize` instance that can be used for displaying
images with Matplotlib.
"""
if percent is not None:
interval = PercentileInterval(percent)
elif min_percent is not None or max_percent is not None:
interval = AsymmetricPercentileInterval(
min_percent or 0.0, max_percent or 100.0
)
elif min_cut is not None or max_cut is not None:
interval = ManualInterval(min_cut, max_cut)
else:
interval = MinMaxInterval()
if stretch == "linear":
stretch = LinearStretch()
elif stretch == "sqrt":
stretch = SqrtStretch()
elif stretch == "power":
stretch = PowerStretch(power)
elif stretch == "log":
stretch = LogStretch(log_a)
elif stretch == "asinh":
stretch = AsinhStretch(asinh_a)
elif stretch == "sinh":
stretch = SinhStretch(sinh_a)
else:
raise ValueError(f"Unknown stretch: {stretch}.")
vmin, vmax = interval.get_limits(data)
return ImageNormalize(
vmin=vmin, vmax=vmax, stretch=stretch, clip=clip, invalid=invalid
)
# used in imshow_norm
_norm_sig = inspect.signature(ImageNormalize)
def imshow_norm(data, ax=None, **kwargs):
"""A convenience function to call matplotlib's `matplotlib.pyplot.imshow`
function, using an `ImageNormalize` object as the normalization.
Parameters
----------
data : 2D or 3D array-like
The data to show. Can be whatever `~matplotlib.pyplot.imshow` and
`ImageNormalize` both accept. See `~matplotlib.pyplot.imshow`.
ax : None or `~matplotlib.axes.Axes`, optional
If None, use pyplot's imshow. Otherwise, calls ``imshow`` method of
the supplied axes.
**kwargs : dict, optional
All other keyword arguments are parsed first by the
`ImageNormalize` initializer, then to
`~matplotlib.pyplot.imshow`.
Returns
-------
result : tuple
A tuple containing the `~matplotlib.image.AxesImage` generated
by `~matplotlib.pyplot.imshow` as well as the `ImageNormalize`
instance.
Notes
-----
The ``norm`` matplotlib keyword is not supported.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import (imshow_norm, MinMaxInterval,
SqrtStretch)
# Generate and display a test image
image = np.arange(65536).reshape((256, 256))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
im, norm = imshow_norm(image, ax, origin='lower',
interval=MinMaxInterval(),
stretch=SqrtStretch())
fig.colorbar(im)
"""
if "X" in kwargs:
raise ValueError("Cannot give both ``X`` and ``data``")
if "norm" in kwargs:
raise ValueError(
"There is no point in using imshow_norm if you give "
"the ``norm`` keyword - use imshow directly if you "
"want that."
)
imshow_kwargs = dict(kwargs)
norm_kwargs = {"data": data}
for pname in _norm_sig.parameters:
if pname in kwargs:
norm_kwargs[pname] = imshow_kwargs.pop(pname)
imshow_kwargs["norm"] = ImageNormalize(**norm_kwargs)
if ax is None:
imshow_result = plt.imshow(data, **imshow_kwargs)
else:
imshow_result = ax.imshow(data, **imshow_kwargs)
return imshow_result, imshow_kwargs["norm"]
|
9ef96f1265399e64a56fd65d870d4c70e2f27e06330272fc746829ad6a6ceab8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Combine 3 images to produce a properly-scaled RGB image following Lupton et al. (2004).
The three images must be aligned and have the same pixel scale and size.
For details, see : https://ui.adsabs.harvard.edu/abs/2004PASP..116..133L
"""
import numpy as np
from . import ZScaleInterval
__all__ = ["make_lupton_rgb"]
def compute_intensity(image_r, image_g=None, image_b=None):
"""
Return a naive total intensity from the red, blue, and green intensities.
Parameters
----------
image_r : ndarray
Intensity of image to be mapped to red; or total intensity if ``image_g``
and ``image_b`` are None.
image_g : ndarray, optional
Intensity of image to be mapped to green.
image_b : ndarray, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : ndarray
Total intensity from the red, blue and green intensities, or ``image_r``
if green and blue images are not provided.
"""
if image_g is None or image_b is None:
if not (image_g is None and image_b is None):
raise ValueError(
"please specify either a single image or red, green, and blue images."
)
return image_r
intensity = (image_r + image_g + image_b) / 3.0
# Repack into whatever type was passed to us
return np.asarray(intensity, dtype=image_r.dtype)
class Mapping:
"""
Baseclass to map red, blue, green intensities into uint8 values.
Parameters
----------
minimum : float or sequence(3)
Intensity that should be mapped to black (a scalar or array for R, G, B).
image : ndarray, optional
An image used to calculate some parameters of some mappings.
"""
def __init__(self, minimum=None, image=None):
self._uint8Max = float(np.iinfo(np.uint8).max)
try:
len(minimum)
except TypeError:
minimum = 3 * [minimum]
if len(minimum) != 3:
raise ValueError("please provide 1 or 3 values for minimum.")
self.minimum = minimum
self._image = np.asarray(image)
def make_rgb_image(self, image_r, image_g, image_b):
"""
Convert 3 arrays, image_r, image_g, and image_b into an 8-bit RGB image.
Parameters
----------
image_r : ndarray
Image to map to red.
image_g : ndarray
Image to map to green.
image_b : ndarray
Image to map to blue.
Returns
-------
RGBimage : ndarray
RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
"""
image_r = np.asarray(image_r)
image_g = np.asarray(image_g)
image_b = np.asarray(image_b)
if (image_r.shape != image_g.shape) or (image_g.shape != image_b.shape):
msg = "The image shapes must match. r: {}, g: {} b: {}"
raise ValueError(msg.format(image_r.shape, image_g.shape, image_b.shape))
return np.dstack(
self._convert_images_to_uint8(image_r, image_g, image_b)
).astype(np.uint8)
def intensity(self, image_r, image_g, image_b):
"""
Return the total intensity from the red, blue, and green intensities.
This is a naive computation, and may be overridden by subclasses.
Parameters
----------
image_r : ndarray
Intensity of image to be mapped to red; or total intensity if
``image_g`` and ``image_b`` are None.
image_g : ndarray, optional
Intensity of image to be mapped to green.
image_b : ndarray, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : ndarray
Total intensity from the red, blue and green intensities, or
``image_r`` if green and blue images are not provided.
"""
return compute_intensity(image_r, image_g, image_b)
def map_intensity_to_uint8(self, I):
"""
Return an array which, when multiplied by an image, returns that image
mapped to the range of a uint8, [0, 255] (but not converted to uint8).
The intensity is assumed to have had minimum subtracted (as that can be
done per-band).
Parameters
----------
I : ndarray
Intensity to be mapped.
Returns
-------
mapped_I : ndarray
``I`` mapped to uint8
"""
with np.errstate(invalid="ignore", divide="ignore"):
return np.clip(I, 0, self._uint8Max)
def _convert_images_to_uint8(self, image_r, image_g, image_b):
"""
Use the mapping to convert images image_r, image_g, and image_b to a triplet of uint8 images.
"""
image_r = image_r - self.minimum[0] # n.b. makes copy
image_g = image_g - self.minimum[1]
image_b = image_b - self.minimum[2]
fac = self.map_intensity_to_uint8(self.intensity(image_r, image_g, image_b))
image_rgb = [image_r, image_g, image_b]
for c in image_rgb:
c *= fac
with np.errstate(invalid="ignore"):
c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't
pixmax = self._uint8Max
# copies -- could work row by row to minimise memory usage
r0, g0, b0 = image_rgb
# n.b. np.where can't and doesn't short-circuit
with np.errstate(invalid="ignore", divide="ignore"):
for i, c in enumerate(image_rgb):
c = np.where(
r0 > g0,
np.where(
r0 > b0,
np.where(r0 >= pixmax, c * pixmax / r0, c),
np.where(b0 >= pixmax, c * pixmax / b0, c),
),
np.where(
g0 > b0,
np.where(g0 >= pixmax, c * pixmax / g0, c),
np.where(b0 >= pixmax, c * pixmax / b0, c),
),
).astype(np.uint8)
c[c > pixmax] = pixmax
image_rgb[i] = c
return image_rgb
class LinearMapping(Mapping):
"""
A linear map map of red, blue, green intensities into uint8 values.
A linear stretch from [minimum, maximum].
If one or both are omitted use image min and/or max to set them.
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
maximum : float
Intensity that should be mapped to white (a scalar).
"""
def __init__(self, minimum=None, maximum=None, image=None):
if minimum is None or maximum is None:
if image is None:
raise ValueError(
"you must provide an image if you don't "
"set both minimum and maximum"
)
if minimum is None:
minimum = image.min()
if maximum is None:
maximum = image.max()
Mapping.__init__(self, minimum=minimum, image=image)
self.maximum = maximum
if maximum is None:
self._range = None
else:
if maximum == minimum:
raise ValueError("minimum and maximum values must not be equal")
self._range = float(maximum - minimum)
def map_intensity_to_uint8(self, I):
# n.b. np.where can't and doesn't short-circuit
with np.errstate(invalid="ignore", divide="ignore"):
return np.where(
I <= 0,
0,
np.where(
I >= self._range, self._uint8Max / I, self._uint8Max / self._range
),
)
class AsinhMapping(Mapping):
"""
A mapping for an asinh stretch (preserving colours independent of brightness).
x = asinh(Q (I - minimum)/stretch)/Q
This reduces to a linear stretch if Q == 0
See https://ui.adsabs.harvard.edu/abs/2004PASP..116..133L
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
stretch : float
The linear stretch of the image.
Q : float
The asinh softening parameter.
"""
def __init__(self, minimum, stretch, Q=8):
Mapping.__init__(self, minimum)
# 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit
epsilon = 1.0 / 2**23
if abs(Q) < epsilon:
Q = 0.1
else:
Qmax = 1e10
if Q > Qmax:
Q = Qmax
frac = 0.1 # gradient estimated using frac*stretch is _slope
self._slope = frac * self._uint8Max / np.arcsinh(frac * Q)
self._soften = Q / float(stretch)
def map_intensity_to_uint8(self, I):
# n.b. np.where can't and doesn't short-circuit
with np.errstate(invalid="ignore", divide="ignore"):
return np.where(I <= 0, 0, np.arcsinh(I * self._soften) * self._slope / I)
class AsinhZScaleMapping(AsinhMapping):
"""
A mapping for an asinh stretch, estimating the linear stretch by zscale.
x = asinh(Q (I - z1)/(z2 - z1))/Q
Parameters
----------
image1 : ndarray or a list of arrays
The image to analyse, or a list of 3 images to be converted to
an intensity image.
image2 : ndarray, optional
the second image to analyse (must be specified with image3).
image3 : ndarray, optional
the third image to analyse (must be specified with image2).
Q : float, optional
The asinh softening parameter. Default is 8.
pedestal : float or sequence(3), optional
The value, or array of 3 values, to subtract from the images; or None.
Notes
-----
pedestal, if not None, is removed from the images when calculating the
zscale stretch, and added back into Mapping.minimum[]
"""
def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None):
if image2 is None or image3 is None:
if not (image2 is None and image3 is None):
raise ValueError(
"please specify either a single image or three images."
)
image = [image1]
else:
image = [image1, image2, image3]
if pedestal is not None:
try:
len(pedestal)
except TypeError:
pedestal = 3 * [pedestal]
if len(pedestal) != 3:
raise ValueError("please provide 1 or 3 pedestals.")
image = list(image) # needs to be mutable
for i, im in enumerate(image):
if pedestal[i] != 0.0:
image[i] = im - pedestal[i] # n.b. a copy
else:
pedestal = len(image) * [0.0]
image = compute_intensity(*image)
zscale_limits = ZScaleInterval().get_limits(image)
zscale = LinearMapping(*zscale_limits, image=image)
# zscale.minimum is always a triple
stretch = zscale.maximum - zscale.minimum[0]
minimum = zscale.minimum
for i, level in enumerate(pedestal):
minimum[i] += level
AsinhMapping.__init__(self, minimum, stretch, Q)
self._image = image
def make_lupton_rgb(
image_r, image_g, image_b, minimum=0, stretch=5, Q=8, filename=None
):
"""
Return a Red/Green/Blue color image from up to 3 images using an asinh stretch.
The input images can be int or float, and in any range or bit-depth.
For a more detailed look at the use of this method, see the document
:ref:`astropy:astropy-visualization-rgb`.
Parameters
----------
image_r : ndarray
Image to map to red.
image_g : ndarray
Image to map to green.
image_b : ndarray
Image to map to blue.
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
stretch : float
The linear stretch of the image.
Q : float
The asinh softening parameter.
filename : str
Write the resulting RGB image to a file (file type determined
from extension).
Returns
-------
rgb : ndarray
RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
"""
asinhMap = AsinhMapping(minimum, stretch, Q)
rgb = asinhMap.make_rgb_image(image_r, image_g, image_b)
if filename:
import matplotlib.image
matplotlib.image.imsave(filename, rgb, origin="lower")
return rgb
|
66df11d64091ee5c31814a59dedac0ec1d2214c718735e9c280d1d2c21974f01 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Classes that deal with computing intervals from arrays of values based on
various criteria.
"""
import abc
import numpy as np
from astropy.utils.decorators import deprecated_attribute, deprecated_renamed_argument
from .transform import BaseTransform
__all__ = [
"BaseInterval",
"ManualInterval",
"MinMaxInterval",
"AsymmetricPercentileInterval",
"PercentileInterval",
"ZScaleInterval",
]
class BaseInterval(BaseTransform):
"""
Base class for the interval classes, which, when called with an
array of values, return an interval computed following different
algorithms.
"""
@abc.abstractmethod
def get_limits(self, values):
"""
Return the minimum and maximum value in the interval based on
the values provided.
Parameters
----------
values : ndarray
The image values.
Returns
-------
vmin, vmax : float
The mininium and maximum image value in the interval.
"""
raise NotImplementedError("Needs to be implemented in a subclass.")
def __call__(self, values, clip=True, out=None):
"""
Transform values using this interval.
Parameters
----------
values : array-like
The input values.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : ndarray, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
Returns
-------
result : ndarray
The transformed values.
"""
vmin, vmax = self.get_limits(values)
if out is None:
values = np.subtract(values, float(vmin))
else:
if out.dtype.kind != "f":
raise TypeError(
"Can only do in-place scaling for floating-point arrays"
)
values = np.subtract(values, float(vmin), out=out)
if (vmax - vmin) != 0:
np.true_divide(values, vmax - vmin, out=values)
if clip:
np.clip(values, 0.0, 1.0, out=values)
return values
class ManualInterval(BaseInterval):
"""
Interval based on user-specified values.
Parameters
----------
vmin : float, optional
The minimum value in the scaling. Defaults to the image
minimum (ignoring NaNs)
vmax : float, optional
The maximum value in the scaling. Defaults to the image
maximum (ignoring NaNs)
"""
def __init__(self, vmin=None, vmax=None):
self.vmin = vmin
self.vmax = vmax
def get_limits(self, values):
# Avoid overhead of preparing array if both limits have been specified
# manually, for performance.
if self.vmin is not None and self.vmax is not None:
return self.vmin, self.vmax
# Make sure values is a Numpy array
values = np.asarray(values).ravel()
# Filter out invalid values (inf, nan)
values = values[np.isfinite(values)]
vmin = np.min(values) if self.vmin is None else self.vmin
vmax = np.max(values) if self.vmax is None else self.vmax
return vmin, vmax
class MinMaxInterval(BaseInterval):
"""
Interval based on the minimum and maximum values in the data.
"""
def get_limits(self, values):
# Make sure values is a Numpy array
values = np.asarray(values).ravel()
# Filter out invalid values (inf, nan)
values = values[np.isfinite(values)]
return np.min(values), np.max(values)
class AsymmetricPercentileInterval(BaseInterval):
"""
Interval based on a keeping a specified fraction of pixels (can be
asymmetric).
Parameters
----------
lower_percentile : float
The lower percentile below which to ignore pixels.
upper_percentile : float
The upper percentile above which to ignore pixels.
n_samples : int, optional
Maximum number of values to use. If this is specified, and there
are more values in the dataset as this, then values are randomly
sampled from the array (with replacement).
"""
def __init__(self, lower_percentile, upper_percentile, n_samples=None):
self.lower_percentile = lower_percentile
self.upper_percentile = upper_percentile
self.n_samples = n_samples
def get_limits(self, values):
# Make sure values is a Numpy array
values = np.asarray(values).ravel()
# If needed, limit the number of samples. We sample with replacement
# since this is much faster.
if self.n_samples is not None and values.size > self.n_samples:
values = np.random.choice(values, self.n_samples)
# Filter out invalid values (inf, nan)
values = values[np.isfinite(values)]
# Determine values at percentiles
vmin, vmax = np.percentile(
values, (self.lower_percentile, self.upper_percentile)
)
return vmin, vmax
class PercentileInterval(AsymmetricPercentileInterval):
"""
Interval based on a keeping a specified fraction of pixels.
Parameters
----------
percentile : float
The fraction of pixels to keep. The same fraction of pixels is
eliminated from both ends.
n_samples : int, optional
Maximum number of values to use. If this is specified, and there
are more values in the dataset as this, then values are randomly
sampled from the array (with replacement).
"""
def __init__(self, percentile, n_samples=None):
lower_percentile = (100 - percentile) * 0.5
upper_percentile = 100 - lower_percentile
super().__init__(lower_percentile, upper_percentile, n_samples=n_samples)
class ZScaleInterval(BaseInterval):
"""
Interval based on IRAF's zscale.
https://iraf.net/forum/viewtopic.php?showtopic=134139
Original implementation:
https://github.com/spacetelescope/stsci.numdisplay/blob/master/lib/stsci/numdisplay/zscale.py
Licensed under a 3-clause BSD style license (see AURA_LICENSE.rst).
Parameters
----------
n_samples : int, optional
The number of points in the array to sample for determining
scaling factors. Defaults to 1000.
.. versionchanged:: 5.2
``n_samples`` replaces the deprecated ``nsamples`` argument,
which will be removed in the future.
contrast : float, optional
The scaling factor (between 0 and 1) for determining the minimum
and maximum value. Larger values increase the difference
between the minimum and maximum values used for display.
Defaults to 0.25.
max_reject : float, optional
If more than ``max_reject * npixels`` pixels are rejected, then
the returned values are the minimum and maximum of the data.
Defaults to 0.5.
min_npixels : int, optional
If there are less than ``min_npixels`` pixels remaining after
the pixel rejection, then the returned values are the minimum
and maximum of the data. Defaults to 5.
krej : float, optional
The number of sigma used for the rejection. Defaults to 2.5.
max_iterations : int, optional
The maximum number of iterations for the rejection. Defaults to
5.
"""
@deprecated_renamed_argument("nsamples", "n_samples", "5.2")
def __init__(
self,
n_samples=1000,
contrast=0.25,
max_reject=0.5,
min_npixels=5,
krej=2.5,
max_iterations=5,
):
self.n_samples = n_samples
self.contrast = contrast
self.max_reject = max_reject
self.min_npixels = min_npixels
self.krej = krej
self.max_iterations = max_iterations
# Mark `nsamples` as deprecated
nsamples = deprecated_attribute("nsamples", "5.2", alternative="n_samples")
def get_limits(self, values):
# Sample the image
values = np.asarray(values)
values = values[np.isfinite(values)]
stride = int(max(1.0, values.size / self.n_samples))
samples = values[::stride][: self.n_samples]
samples.sort()
npix = len(samples)
vmin = samples[0]
vmax = samples[-1]
# Fit a line to the sorted array of samples
minpix = max(self.min_npixels, int(npix * self.max_reject))
x = np.arange(npix)
ngoodpix = npix
last_ngoodpix = npix + 1
# Bad pixels mask used in k-sigma clipping
badpix = np.zeros(npix, dtype=bool)
# Kernel used to dilate the bad pixels mask
ngrow = max(1, int(npix * 0.01))
kernel = np.ones(ngrow, dtype=bool)
for _ in range(self.max_iterations):
if ngoodpix >= last_ngoodpix or ngoodpix < minpix:
break
fit = np.polyfit(x, samples, deg=1, w=(~badpix).astype(int))
fitted = np.poly1d(fit)(x)
# Subtract fitted line from the data array
flat = samples - fitted
# Compute the k-sigma rejection threshold
threshold = self.krej * flat[~badpix].std()
# Detect and reject pixels further than k*sigma from the
# fitted line
badpix[(flat < -threshold) | (flat > threshold)] = True
# Convolve with a kernel of length ngrow
badpix = np.convolve(badpix, kernel, mode="same")
last_ngoodpix = ngoodpix
ngoodpix = np.sum(~badpix)
if ngoodpix >= minpix:
slope, _ = fit
if self.contrast > 0:
slope = slope / self.contrast
center_pixel = (npix - 1) // 2
median = np.median(samples)
vmin = max(vmin, median - (center_pixel - 1) * slope)
vmax = min(vmax, median + (npix - center_pixel) * slope)
return vmin, vmax
|
b235b81a0f598de64bb491da82cff557f70f6bfa25466d729c099d5854c5fd40 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import numpy as np
from astropy import units as u
from astropy.time import Time
__all__ = ["time_support"]
__doctest_requires__ = {"time_support": ["matplotlib"]}
UNSUPPORTED_FORMATS = ("datetime", "datetime64")
YMDHMS_FORMATS = ("fits", "iso", "isot", "yday")
STR_FORMATS = YMDHMS_FORMATS + ("byear_str", "jyear_str")
def time_support(*, scale=None, format=None, simplify=True):
"""
Enable support for plotting `astropy.time.Time` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT
... plt.figure()
... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40']))
... plt.draw()
Parameters
----------
scale : str, optional
The time scale to use for the times on the axis. If not specified,
the scale of the first Time object passed to Matplotlib is used.
format : str, optional
The time format to use for the times on the axis. If not specified,
the format of the first Time object passed to Matplotlib is used.
simplify : bool, optional
If possible, simplify labels, e.g. by removing 00:00:00.000 times from
ISO strings if all labels fall on that time.
"""
import matplotlib.units as units
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar
class AstropyTimeLocator(MaxNLocator):
# Note: we default to AutoLocator since many time formats
# can just use this.
def __init__(self, converter, *args, **kwargs):
kwargs["nbins"] = 4
super().__init__(*args, **kwargs)
self._converter = converter
def tick_values(self, vmin, vmax):
# Where we put the ticks depends on the format we are using
if self._converter.format in YMDHMS_FORMATS:
# If we are here, we need to check what the range of values
# is and decide how to find tick locations accordingly
vrange = vmax - vmin
if (
self._converter.format != "yday" and vrange > 31
) or vrange > 366: # greater than a month
# We need to be careful here since not all years and months have
# the same length
# Start off by converting the values from the range to
# datetime objects, so that we can easily extract the year and
# month.
tmin = Time(
vmin, scale=self._converter.scale, format="mjd"
).datetime
tmax = Time(
vmax, scale=self._converter.scale, format="mjd"
).datetime
# Find the range of years
ymin = tmin.year
ymax = tmax.year
if ymax > ymin + 1: # greater than a year
# Find the step we want to use
ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3)))
ymin = ystep * (ymin // ystep)
# Generate the years for these steps
times = []
for year in range(ymin, ymax + 1, ystep):
times.append(datetime(year=year, month=1, day=1))
else: # greater than a month but less than a year
mmin = tmin.month
mmax = tmax.month + 12 * (ymax - ymin)
mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3)))
mmin = mstep * max(1, mmin // mstep)
# Generate the months for these steps
times = []
for month in range(mmin, mmax + 1, mstep):
times.append(
datetime(
year=ymin + (month - 1) // 12,
month=(month - 1) % 12 + 1,
day=1,
)
)
# Convert back to MJD
values = Time(times, scale=self._converter.scale).mjd
elif vrange > 1: # greater than a day
self.set_params(steps=[1, 2, 5, 10])
values = super().tick_values(vmin, vmax)
else:
# Determine ideal step
dv = (vmax - vmin) / 3 * 24 << u.hourangle
# And round to nearest sensible value
dv = select_step_hour(dv).to_value(u.hourangle) / 24
# Determine tick locations
imin = np.ceil(vmin / dv)
imax = np.floor(vmax / dv)
values = np.arange(imin, imax + 1, dtype=np.int64) * dv
else:
values = super().tick_values(vmin, vmax)
# Get rid of values outside of the input interval
values = values[(values >= vmin) & (values <= vmax)]
return values
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
class AstropyTimeFormatter(ScalarFormatter):
def __init__(self, converter, *args, **kwargs):
super().__init__(*args, **kwargs)
self._converter = converter
self.set_useOffset(False)
self.set_scientific(False)
def __call__(self, value, pos=None):
# Needed for Matplotlib <3.1
if self._converter.format in STR_FORMATS:
return self.format_ticks([value])[0]
else:
return super().__call__(value, pos=pos)
def format_ticks(self, values):
if len(values) == 0:
return []
if self._converter.format in YMDHMS_FORMATS:
times = Time(values, format="mjd", scale=self._converter.scale)
formatted = getattr(times, self._converter.format)
if self._converter.simplify:
if self._converter.format in ("fits", "iso", "isot"):
if all([x.endswith("00:00:00.000") for x in formatted]):
split = " " if self._converter.format == "iso" else "T"
formatted = [x.split(split)[0] for x in formatted]
elif self._converter.format == "yday":
if all([x.endswith(":001:00:00:00.000") for x in formatted]):
formatted = [x.split(":", 1)[0] for x in formatted]
return formatted
elif self._converter.format == "byear_str":
return Time(
values, format="byear", scale=self._converter.scale
).byear_str
elif self._converter.format == "jyear_str":
return Time(
values, format="jyear", scale=self._converter.scale
).jyear_str
else:
return super().format_ticks(values)
class MplTimeConverter(units.ConversionInterface):
def __init__(self, scale=None, format=None, simplify=None):
super().__init__()
self.format = format
self.scale = scale
self.simplify = simplify
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = units.registry.get(Time)
units.registry[Time] = self
@property
def format(self):
return self._format
@format.setter
def format(self, value):
if value in UNSUPPORTED_FORMATS:
raise ValueError(f"time_support does not support format={value}")
self._format = value
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._original_converter is None:
del units.registry[Time]
else:
units.registry[Time] = self._original_converter
def default_units(self, x, axis):
if isinstance(x, tuple):
x = x[0]
if self.format is None:
self.format = x.format
if self.scale is None:
self.scale = x.scale
return "astropy_time"
def convert(self, value, unit, axis):
"""
Convert a Time value to a scalar or array.
"""
scaled = getattr(value, self.scale)
if self.format in YMDHMS_FORMATS:
return scaled.mjd
elif self.format == "byear_str":
return scaled.byear
elif self.format == "jyear_str":
return scaled.jyear
else:
return getattr(scaled, self.format)
def axisinfo(self, unit, axis):
"""
Return major and minor tick locators and formatters.
"""
majloc = AstropyTimeLocator(self)
majfmt = AstropyTimeFormatter(self)
return units.AxisInfo(
majfmt=majfmt, majloc=majloc, label=f"Time ({self.scale})"
)
return MplTimeConverter(scale=scale, format=format, simplify=simplify)
|
cfb3412bbc681023cd3960664d922639e939bd40d03aff04f1b4f731dfcf17b8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.stats.histogram import calculate_bin_edges
__all__ = ["hist"]
def hist(x, bins=10, ax=None, max_bins=1e5, **kwargs):
"""Enhanced histogram function.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as pylab.hist().
This function was ported from astroML: https://www.astroml.org/
Parameters
----------
x : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
ax : `~matplotlib.axes.Axes` instance, optional
Specify the Axes on which to draw the histogram. If not specified,
then the current active axes will be used.
max_bins : int, optional
Maximum number of bins allowed. With more than a few thousand bins
the performance of matplotlib will not be great. If the number of
bins is large *and* the number of input data points is large then
the it will take a very long time to compute the histogram.
**kwargs :
other keyword arguments are described in ``plt.hist()``.
Notes
-----
Return values are the same as for ``plt.hist()``
See Also
--------
astropy.stats.histogram
"""
# Note that we only calculate the bin edges...matplotlib will calculate
# the actual histogram.
range = kwargs.get("range", None)
weights = kwargs.get("weights", None)
bins = calculate_bin_edges(x, bins, range=range, weights=weights)
if len(bins) > max_bins:
raise ValueError(
"Histogram has too many bins: "
"{nbin}. Use max_bins to increase the number "
"of allowed bins or range to restrict "
"the histogram range.".format(nbin=len(bins))
)
if ax is None:
# optional dependency; only import if strictly needed.
import matplotlib.pyplot as plt
ax = plt.gca()
return ax.hist(x, bins, **kwargs)
|
7133cca6cc7d224672373e1e60008577b1798176911bb951d37807810794aefd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Classes that deal with stretching, i.e. mapping a range of [0:1] values onto
another set of [0:1] values with a transformation.
"""
import numpy as np
from .transform import BaseTransform, CompositeTransform
__all__ = [
"BaseStretch",
"LinearStretch",
"SqrtStretch",
"PowerStretch",
"PowerDistStretch",
"SquaredStretch",
"LogStretch",
"AsinhStretch",
"SinhStretch",
"HistEqStretch",
"ContrastBiasStretch",
"CompositeStretch",
]
def _logn(n, x, out=None):
"""Calculate the log base n of x."""
# We define this because numpy.lib.scimath.logn doesn't support out=
if out is None:
return np.log(x) / np.log(n)
else:
np.log(x, out=out)
np.true_divide(out, np.log(n), out=out)
return out
def _prepare(values, clip=True, out=None):
"""
Prepare the data by optionally clipping and copying, and return the
array that should be subsequently used for in-place calculations.
"""
if clip:
return np.clip(values, 0.0, 1.0, out=out)
else:
if out is None:
return np.array(values, copy=True)
else:
out[:] = np.asarray(values)
return out
class BaseStretch(BaseTransform):
"""
Base class for the stretch classes, which, when called with an array
of values in the range [0:1], return an transformed array of values,
also in the range [0:1].
"""
@property
def _supports_invalid_kw(self):
return False
def __add__(self, other):
return CompositeStretch(other, self)
def __call__(self, values, clip=True, out=None):
"""
Transform values using this stretch.
Parameters
----------
values : array-like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : ndarray, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
Returns
-------
result : ndarray
The transformed values.
"""
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
class LinearStretch(BaseStretch):
"""
A linear stretch with a slope and offset.
The stretch is given by:
.. math::
y = slope x + intercept
Parameters
----------
slope : float, optional
The ``slope`` parameter used in the above formula. Default is 1.
intercept : float, optional
The ``intercept`` parameter used in the above formula. Default is 0.
"""
def __init__(self, slope=1, intercept=0):
super().__init__()
self.slope = slope
self.intercept = intercept
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
if self.slope != 1:
np.multiply(values, self.slope, out=values)
if self.intercept != 0:
np.add(values, self.intercept, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return LinearStretch(1.0 / self.slope, -self.intercept / self.slope)
class SqrtStretch(BaseStretch):
r"""
A square root stretch.
The stretch is given by:
.. math::
y = \sqrt{x}
"""
@property
def _supports_invalid_kw(self):
return True
def __call__(self, values, clip=True, out=None, invalid=None):
"""
Transform values using this stretch.
Parameters
----------
values : array-like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : ndarray, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``values`` array are not changed. This option is
generally used with matplotlib normalization classes, where
the ``invalid`` value should map to the matplotlib colormap
"under" value (i.e., any finite value < 0). If `None`, then
NaN values are not replaced. This keyword has no effect if
``clip=True``.
Returns
-------
result : ndarray
The transformed values.
"""
values = _prepare(values, clip=clip, out=out)
replace_invalid = not clip and invalid is not None
with np.errstate(invalid="ignore"):
if replace_invalid:
idx = values < 0
np.sqrt(values, out=values)
if replace_invalid:
# Assign new NaN (i.e., NaN not in the original input
# values, but generated by this class) to the invalid value.
values[idx] = invalid
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerStretch(2)
class PowerStretch(BaseStretch):
r"""
A power stretch.
The stretch is given by:
.. math::
y = x^a
Parameters
----------
a : float
The power index (see the above formula). ``a`` must be greater
than 0.
"""
@property
def _supports_invalid_kw(self):
return True
def __init__(self, a):
super().__init__()
if a <= 0:
raise ValueError("a must be > 0")
self.power = a
def __call__(self, values, clip=True, out=None, invalid=None):
"""
Transform values using this stretch.
Parameters
----------
values : array-like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : ndarray, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``values`` array are not changed. This option is
generally used with matplotlib normalization classes, where
the ``invalid`` value should map to the matplotlib colormap
"under" value (i.e., any finite value < 0). If `None`, then
NaN values are not replaced. This keyword has no effect if
``clip=True``.
Returns
-------
result : ndarray
The transformed values.
"""
values = _prepare(values, clip=clip, out=out)
replace_invalid = (
not clip
and invalid is not None
and ((-1 < self.power < 0) or (0 < self.power < 1))
)
with np.errstate(invalid="ignore"):
if replace_invalid:
idx = values < 0
np.power(values, self.power, out=values)
if replace_invalid:
# Assign new NaN (i.e., NaN not in the original input
# values, but generated by this class) to the invalid value.
values[idx] = invalid
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerStretch(1.0 / self.power)
class PowerDistStretch(BaseStretch):
r"""
An alternative power stretch.
The stretch is given by:
.. math::
y = \frac{a^x - 1}{a - 1}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. ``a`` must be
greater than or equal to 0, but cannot be set to 1. Default is
1000.
"""
def __init__(self, a=1000.0):
if a < 0 or a == 1: # singularity
raise ValueError("a must be >= 0, but cannot be set to 1")
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.power(self.exp, values, out=values)
np.subtract(values, 1, out=values)
np.true_divide(values, self.exp - 1.0, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedPowerDistStretch(a=self.exp)
class InvertedPowerDistStretch(BaseStretch):
r"""
Inverse transformation for
`~astropy.image.scaling.PowerDistStretch`.
The stretch is given by:
.. math::
y = \frac{\log(y (a-1) + 1)}{\log a}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. ``a`` must be
greater than or equal to 0, but cannot be set to 1. Default is
1000.
"""
def __init__(self, a=1000.0):
if a < 0 or a == 1: # singularity
raise ValueError("a must be >= 0, but cannot be set to 1")
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, self.exp - 1.0, out=values)
np.add(values, 1, out=values)
_logn(self.exp, values, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerDistStretch(a=self.exp)
class SquaredStretch(PowerStretch):
r"""
A convenience class for a power stretch of 2.
The stretch is given by:
.. math::
y = x^2
"""
def __init__(self):
super().__init__(2)
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return SqrtStretch()
class LogStretch(BaseStretch):
r"""
A log stretch.
The stretch is given by:
.. math::
y = \frac{\log{(a x + 1)}}{\log{(a + 1)}}
Parameters
----------
a : float
The ``a`` parameter used in the above formula. ``a`` must be
greater than 0. Default is 1000.
"""
@property
def _supports_invalid_kw(self):
return True
def __init__(self, a=1000.0):
super().__init__()
if a <= 0: # singularity
raise ValueError("a must be > 0")
self.exp = a
def __call__(self, values, clip=True, out=None, invalid=None):
"""
Transform values using this stretch.
Parameters
----------
values : array-like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : ndarray, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``values`` array are not changed. This option is
generally used with matplotlib normalization classes, where
the ``invalid`` value should map to the matplotlib colormap
"under" value (i.e., any finite value < 0). If `None`, then
NaN values are not replaced. This keyword has no effect if
``clip=True``.
Returns
-------
result : ndarray
The transformed values.
"""
values = _prepare(values, clip=clip, out=out)
replace_invalid = not clip and invalid is not None
with np.errstate(invalid="ignore"):
if replace_invalid:
idx = values < 0
np.multiply(values, self.exp, out=values)
np.add(values, 1.0, out=values)
np.log(values, out=values)
np.true_divide(values, np.log(self.exp + 1.0), out=values)
if replace_invalid:
# Assign new NaN (i.e., NaN not in the original input
# values, but generated by this class) to the invalid value.
values[idx] = invalid
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedLogStretch(self.exp)
class InvertedLogStretch(BaseStretch):
r"""
Inverse transformation for `~astropy.image.scaling.LogStretch`.
The stretch is given by:
.. math::
y = \frac{e^{y \log{a + 1}} - 1}{a} \\
y = \frac{e^{y} (a + 1) - 1}{a}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. ``a`` must be
greater than 0. Default is 1000.
"""
def __init__(self, a):
super().__init__()
if a <= 0: # singularity
raise ValueError("a must be > 0")
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, np.log(self.exp + 1.0), out=values)
np.exp(values, out=values)
np.subtract(values, 1.0, out=values)
np.true_divide(values, self.exp, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return LogStretch(self.exp)
class AsinhStretch(BaseStretch):
r"""
An asinh stretch.
The stretch is given by:
.. math::
y = \frac{{\rm asinh}(x / a)}{{\rm asinh}(1 / a)}.
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. The value of
this parameter is where the asinh curve transitions from linear
to logarithmic behavior, expressed as a fraction of the
normalized image. ``a`` must be greater than 0 and less than or
equal to 1 (0 < a <= 1). Default is 0.1.
"""
def __init__(self, a=0.1):
super().__init__()
if a <= 0 or a > 1:
raise ValueError("a must be > 0 and <= 1")
self.a = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.true_divide(values, self.a, out=values)
np.arcsinh(values, out=values)
np.true_divide(values, np.arcsinh(1.0 / self.a), out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return SinhStretch(a=1.0 / np.arcsinh(1.0 / self.a))
class SinhStretch(BaseStretch):
r"""
A sinh stretch.
The stretch is given by:
.. math::
y = \frac{{\rm sinh}(x / a)}{{\rm sinh}(1 / a)}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. ``a`` must be
greater than 0 and less than or equal to 1 (0 < a <= 1).
Default is 1/3.
"""
def __init__(self, a=1.0 / 3.0):
super().__init__()
if a <= 0 or a > 1:
raise ValueError("a must be > 0 and <= 1")
self.a = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.true_divide(values, self.a, out=values)
np.sinh(values, out=values)
np.true_divide(values, np.sinh(1.0 / self.a), out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return AsinhStretch(a=1.0 / np.sinh(1.0 / self.a))
class HistEqStretch(BaseStretch):
"""
A histogram equalization stretch.
Parameters
----------
data : array-like
The data defining the equalization.
values : array-like, optional
The input image values, which should already be normalized to
the [0:1] range.
"""
def __init__(self, data, values=None):
# Assume data is not necessarily normalized at this point
self.data = np.sort(data.ravel())
self.data = self.data[np.isfinite(self.data)]
vmin = self.data.min()
vmax = self.data.max()
self.data = (self.data - vmin) / (vmax - vmin)
# Compute relative position of each pixel
if values is None:
self.values = np.linspace(0.0, 1.0, len(self.data))
else:
self.values = values
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
values[:] = np.interp(values, self.data, self.values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedHistEqStretch(self.data, values=self.values)
class InvertedHistEqStretch(BaseStretch):
"""
Inverse transformation for `~astropy.image.scaling.HistEqStretch`.
Parameters
----------
data : array-like
The data defining the equalization.
values : array-like, optional
The input image values, which should already be normalized to
the [0:1] range.
"""
def __init__(self, data, values=None):
self.data = data[np.isfinite(data)]
if values is None:
self.values = np.linspace(0.0, 1.0, len(self.data))
else:
self.values = values
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
values[:] = np.interp(values, self.values, self.data)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return HistEqStretch(self.data, values=self.values)
class ContrastBiasStretch(BaseStretch):
r"""
A stretch that takes into account contrast and bias.
The stretch is given by:
.. math::
y = (x - {\rm bias}) * {\rm contrast} + 0.5
and the output values are clipped to the [0:1] range.
Parameters
----------
contrast : float
The contrast parameter (see the above formula).
bias : float
The bias parameter (see the above formula).
"""
def __init__(self, contrast, bias):
super().__init__()
self.contrast = contrast
self.bias = bias
def __call__(self, values, clip=True, out=None):
# As a special case here, we only clip *after* the
# transformation since it does not map [0:1] to [0:1]
values = _prepare(values, clip=False, out=out)
np.subtract(values, self.bias, out=values)
np.multiply(values, self.contrast, out=values)
np.add(values, 0.5, out=values)
if clip:
np.clip(values, 0, 1, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedContrastBiasStretch(self.contrast, self.bias)
class InvertedContrastBiasStretch(BaseStretch):
"""
Inverse transformation for ContrastBiasStretch.
Parameters
----------
contrast : float
The contrast parameter (see
`~astropy.visualization.ConstrastBiasStretch).
bias : float
The bias parameter (see
`~astropy.visualization.ConstrastBiasStretch).
"""
def __init__(self, contrast, bias):
super().__init__()
self.contrast = contrast
self.bias = bias
def __call__(self, values, clip=True, out=None):
# As a special case here, we only clip *after* the
# transformation since it does not map [0:1] to [0:1]
values = _prepare(values, clip=False, out=out)
np.subtract(values, 0.5, out=values)
np.true_divide(values, self.contrast, out=values)
np.add(values, self.bias, out=values)
if clip:
np.clip(values, 0, 1, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return ContrastBiasStretch(self.contrast, self.bias)
class CompositeStretch(CompositeTransform, BaseStretch):
"""
A combination of two stretches.
Parameters
----------
stretch_1 : :class:`astropy.visualization.BaseStretch`
The first stretch to apply.
stretch_2 : :class:`astropy.visualization.BaseStretch`
The second stretch to apply.
"""
def __call__(self, values, clip=True, out=None):
return self.transform_2(
self.transform_1(values, clip=clip, out=out), clip=clip, out=out
)
|
0a34b14dd84b056db97a3059547119bd78422940d39153e9adbd3450e07ce5b2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import weakref
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
# from astropy.utils.compat import ignored
from astropy import log
from astropy.units import Quantity, Unit, UnitConversionError
__all__ = [
"MissingDataAssociationException",
"IncompatibleUncertaintiesException",
"NDUncertainty",
"StdDevUncertainty",
"UnknownUncertainty",
"VarianceUncertainty",
"InverseVariance",
]
class IncompatibleUncertaintiesException(Exception):
"""This exception should be used to indicate cases in which uncertainties
with two different classes can not be propagated.
"""
class MissingDataAssociationException(Exception):
"""This exception should be used to indicate that an uncertainty instance
has not been associated with a parent `~astropy.nddata.NDData` object.
"""
class NDUncertainty(metaclass=ABCMeta):
"""This is the metaclass for uncertainty classes used with `NDData`.
Parameters
----------
array : any type, optional
The array or value (the parameter name is due to historical reasons) of
the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or
`NDUncertainty` subclasses are recommended.
If the `array` is `list`-like or `numpy.ndarray`-like it will be cast
to a plain `numpy.ndarray`.
Default is ``None``.
unit : unit-like, optional
Unit for the uncertainty ``array``. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the `array` as a copy. ``True`` copies it
before saving, while ``False`` tries to save every parameter as
reference. Note however that it is not always possible to save the
input as reference.
Default is ``True``.
Raises
------
IncompatibleUncertaintiesException
If given another `NDUncertainty`-like class as ``array`` if their
``uncertainty_type`` is different.
"""
def __init__(self, array=None, copy=True, unit=None):
if isinstance(array, NDUncertainty):
# Given an NDUncertainty class or subclass check that the type
# is the same.
if array.uncertainty_type != self.uncertainty_type:
raise IncompatibleUncertaintiesException
# Check if two units are given and take the explicit one then.
if unit is not None and unit != array._unit:
# TODO : Clarify it (see NDData.init for same problem)?
log.info("overwriting Uncertainty's current unit with specified unit.")
elif array._unit is not None:
unit = array.unit
array = array.array
elif isinstance(array, Quantity):
# Check if two units are given and take the explicit one then.
if unit is not None and array.unit is not None and unit != array.unit:
log.info("overwriting Quantity's current unit with specified unit.")
elif array.unit is not None:
unit = array.unit
array = array.value
if unit is None:
self._unit = None
else:
self._unit = Unit(unit)
if copy:
array = deepcopy(array)
unit = deepcopy(unit)
self.array = array
self.parent_nddata = None # no associated NDData - until it is set!
@property
@abstractmethod
def uncertainty_type(self):
"""`str` : Short description of the type of uncertainty.
Defined as abstract property so subclasses *have* to override this.
"""
return None
@property
def supports_correlated(self):
"""`bool` : Supports uncertainty propagation with correlated uncertainties?
.. versionadded:: 1.2
"""
return False
@property
def array(self):
"""`numpy.ndarray` : the uncertainty's value."""
return self._array
@array.setter
def array(self, value):
if isinstance(value, (list, np.ndarray)):
value = np.array(value, subok=False, copy=False)
self._array = value
@property
def unit(self):
"""`~astropy.units.Unit` : The unit of the uncertainty, if any."""
return self._unit
@unit.setter
def unit(self, value):
"""
The unit should be set to a value consistent with the parent NDData
unit and the uncertainty type.
"""
if value is not None:
# Check the hidden attribute below, not the property. The property
# raises an exception if there is no parent_nddata.
if self._parent_nddata is not None:
parent_unit = self.parent_nddata.unit
try:
# Check for consistency with the unit of the parent_nddata
self._data_unit_to_uncertainty_unit(parent_unit).to(value)
except UnitConversionError:
raise UnitConversionError(
"Unit {} is incompatible with unit {} of parent nddata".format(
value, parent_unit
)
)
self._unit = Unit(value)
else:
self._unit = value
@property
def quantity(self):
"""
This uncertainty as an `~astropy.units.Quantity` object.
"""
return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype)
@property
def parent_nddata(self):
"""`NDData` : reference to `NDData` instance with this uncertainty.
In case the reference is not set uncertainty propagation will not be
possible since propagation might need the uncertain data besides the
uncertainty.
"""
no_parent_message = "uncertainty is not associated with an NDData object"
parent_lost_message = (
"the associated NDData object was deleted and cannot be accessed "
"anymore. You can prevent the NDData object from being deleted by "
"assigning it to a variable. If this happened after unpickling "
"make sure you pickle the parent not the uncertainty directly."
)
try:
parent = self._parent_nddata
except AttributeError:
raise MissingDataAssociationException(no_parent_message)
else:
if parent is None:
raise MissingDataAssociationException(no_parent_message)
else:
# The NDData is saved as weak reference so we must call it
# to get the object the reference points to. However because
# we have a weak reference here it's possible that the parent
# was deleted because its reference count dropped to zero.
if isinstance(self._parent_nddata, weakref.ref):
resolved_parent = self._parent_nddata()
if resolved_parent is None:
log.info(parent_lost_message)
return resolved_parent
else:
log.info("parent_nddata should be a weakref to an NDData object.")
return self._parent_nddata
@parent_nddata.setter
def parent_nddata(self, value):
if value is not None and not isinstance(value, weakref.ref):
# Save a weak reference on the uncertainty that points to this
# instance of NDData. Direct references should NOT be used:
# https://github.com/astropy/astropy/pull/4799#discussion_r61236832
value = weakref.ref(value)
# Set _parent_nddata here and access below with the property because value
# is a weakref
self._parent_nddata = value
# set uncertainty unit to that of the parent if it was not already set, unless initializing
# with empty parent (Value=None)
if value is not None:
parent_unit = self.parent_nddata.unit
if self.unit is None:
if parent_unit is None:
self.unit = None
else:
# Set the uncertainty's unit to the appropriate value
self.unit = self._data_unit_to_uncertainty_unit(parent_unit)
else:
# Check that units of uncertainty are compatible with those of
# the parent. If they are, no need to change units of the
# uncertainty or the data. If they are not, let the user know.
unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit)
try:
unit_from_data.to(self.unit)
except UnitConversionError:
raise UnitConversionError(
"Unit {} of uncertainty "
"incompatible with unit {} of "
"data".format(self.unit, parent_unit)
)
@abstractmethod
def _data_unit_to_uncertainty_unit(self, value):
"""
Subclasses must override this property. It should take in a data unit
and return the correct unit for the uncertainty given the uncertainty
type.
"""
return None
def __repr__(self):
prefix = self.__class__.__name__ + "("
try:
body = np.array2string(self.array, separator=", ", prefix=prefix)
except AttributeError:
# In case it wasn't possible to use array2string
body = str(self.array)
return "".join([prefix, body, ")"])
def __getstate__(self):
# Because of the weak reference the class wouldn't be picklable.
try:
return self._array, self._unit, self.parent_nddata
except MissingDataAssociationException:
# In case there's no parent
return self._array, self._unit, None
def __setstate__(self, state):
if len(state) != 3:
raise TypeError("The state should contain 3 items.")
self._array = state[0]
self._unit = state[1]
parent = state[2]
if parent is not None:
parent = weakref.ref(parent)
self._parent_nddata = parent
def __getitem__(self, item):
"""Normal slicing on the array, keep the unit and return a reference."""
return self.__class__(self.array[item], unit=self.unit, copy=False)
def propagate(self, operation, other_nddata, result_data, correlation):
"""Calculate the resulting uncertainty given an operation on the data.
.. versionadded:: 1.2
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide` (or `numpy.divide`).
other_nddata : `NDData` instance
The second operand in the arithmetic operation.
result_data : `~astropy.units.Quantity` or ndarray
The result of the arithmetic operations on the data.
correlation : `numpy.ndarray` or number
The correlation (rho) is defined between the uncertainties in
sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means
uncorrelated operands.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
Another instance of the same `NDUncertainty` subclass containing
the uncertainty of the result.
Raises
------
ValueError
If the ``operation`` is not supported or if correlation is not zero
but the subclass does not support correlated uncertainties.
Notes
-----
First this method checks if a correlation is given and the subclass
implements propagation with correlated uncertainties.
Then the second uncertainty is converted (or an Exception is raised)
to the same class in order to do the propagation.
Then the appropriate propagation method is invoked and the result is
returned.
"""
# Check if the subclass supports correlation
if not self.supports_correlated:
if isinstance(correlation, np.ndarray) or correlation != 0:
raise ValueError(
"{} does not support uncertainty propagation"
" with correlation."
"".format(self.__class__.__name__)
)
# Get the other uncertainty (and convert it to a matching one)
other_uncert = self._convert_uncertainty(other_nddata.uncertainty)
if operation.__name__ == "add":
result = self._propagate_add(other_uncert, result_data, correlation)
elif operation.__name__ == "subtract":
result = self._propagate_subtract(other_uncert, result_data, correlation)
elif operation.__name__ == "multiply":
result = self._propagate_multiply(other_uncert, result_data, correlation)
elif operation.__name__ in ["true_divide", "divide"]:
result = self._propagate_divide(other_uncert, result_data, correlation)
else:
raise ValueError("unsupported operation")
return self.__class__(result, copy=False)
def _convert_uncertainty(self, other_uncert):
"""Checks if the uncertainties are compatible for propagation.
Checks if the other uncertainty is `NDUncertainty`-like and if so
verify that the uncertainty_type is equal. If the latter is not the
case try returning ``self.__class__(other_uncert)``.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The other uncertainty.
Returns
-------
other_uncert : `NDUncertainty` subclass
but converted to a compatible `NDUncertainty` subclass if
possible and necessary.
Raises
------
IncompatibleUncertaintiesException:
If the other uncertainty cannot be converted to a compatible
`NDUncertainty` subclass.
"""
if isinstance(other_uncert, NDUncertainty):
if self.uncertainty_type == other_uncert.uncertainty_type:
return other_uncert
else:
return self.__class__(other_uncert)
else:
raise IncompatibleUncertaintiesException
@abstractmethod
def _propagate_add(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
def represent_as(self, other_uncert):
"""Convert this uncertainty to a different uncertainty type.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The `NDUncertainty` subclass to convert to.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
An instance of ``other_uncert`` subclass containing the uncertainty
converted to the new uncertainty type.
Raises
------
TypeError
If either the initial or final subclasses do not support
conversion, a `TypeError` is raised.
"""
as_variance = getattr(self, "_convert_to_variance", None)
if as_variance is None:
raise TypeError(
f"{type(self)} does not support conversion to another uncertainty type."
)
from_variance = getattr(other_uncert, "_convert_from_variance", None)
if from_variance is None:
raise TypeError(
f"{other_uncert.__name__} does not support conversion from "
"another uncertainty type."
)
return from_variance(as_variance())
class UnknownUncertainty(NDUncertainty):
"""This class implements any unknown uncertainty type.
The main purpose of having an unknown uncertainty class is to prevent
uncertainty propagation.
Parameters
----------
args, kwargs :
see `NDUncertainty`
"""
@property
def supports_correlated(self):
"""`False` : Uncertainty propagation is *not* possible for this class."""
return False
@property
def uncertainty_type(self):
"""``"unknown"`` : `UnknownUncertainty` implements any unknown \
uncertainty type.
"""
return "unknown"
def _data_unit_to_uncertainty_unit(self, value):
"""
No way to convert if uncertainty is unknown.
"""
return None
def _convert_uncertainty(self, other_uncert):
"""Raise an Exception because unknown uncertainty types cannot
implement propagation.
"""
msg = "Uncertainties of unknown type cannot be propagated."
raise IncompatibleUncertaintiesException(msg)
def _propagate_add(self, other_uncert, result_data, correlation):
"""Not possible for unknown uncertainty types."""
return None
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
class _VariancePropagationMixin:
"""
Propagation of uncertainties for variances, also used to perform error
propagation for variance-like uncertainties (standard deviation and inverse
variance).
"""
def _propagate_add_sub(
self,
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for addition or subtraction of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
if subtract:
correlation_sign = -1
else:
correlation_sign = 1
try:
result_unit_sq = result_data.unit**2
except AttributeError:
result_unit_sq = None
if other_uncert.array is not None:
# Formula: sigma**2 = dB
if other_uncert.unit is not None and result_unit_sq != to_variance(
other_uncert.unit
):
# If the other uncertainty has a unit and this unit differs
# from the unit of the result convert it to the results unit
other = (
to_variance(other_uncert.array << other_uncert.unit)
.to(result_unit_sq)
.value
)
else:
other = to_variance(other_uncert.array)
else:
other = 0
if self.array is not None:
# Formula: sigma**2 = dA
if (
self.unit is not None
and to_variance(self.unit) != self.parent_nddata.unit**2
):
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = to_variance(self.array << self.unit).to(result_unit_sq).value
else:
this = to_variance(self.array)
else:
this = 0
# Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB)
# Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB)
# (sign depends on whether addition or subtraction)
# Determine the result depending on the correlation
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = 2 * correlation * np.sqrt(this * other)
result = this + other + correlation_sign * corr
else:
result = this + other
return from_variance(result)
def _propagate_multiply_divide(
self,
other_uncert,
result_data,
correlation,
divide=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for multiplication or division of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
divide : bool, optional
If ``True``, propagate for division, otherwise propagate for
multiplication.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
# For multiplication we don't need the result as quantity
if isinstance(result_data, Quantity):
result_data = result_data.value
if divide:
correlation_sign = -1
else:
correlation_sign = 1
if other_uncert.array is not None:
# We want the result to have a unit consistent with the parent, so
# we only need to convert the unit of the other uncertainty if it
# is different from its data's unit.
if (
other_uncert.unit
and to_variance(1 * other_uncert.unit)
!= ((1 * other_uncert.parent_nddata.unit) ** 2).unit
):
d_b = (
to_variance(other_uncert.array << other_uncert.unit)
.to((1 * other_uncert.parent_nddata.unit) ** 2)
.value
)
else:
d_b = to_variance(other_uncert.array)
# Formula: sigma**2 = |A|**2 * d_b
right = np.abs(self.parent_nddata.data**2 * d_b)
else:
right = 0
if self.array is not None:
# Just the reversed case
if (
self.unit
and to_variance(1 * self.unit)
!= ((1 * self.parent_nddata.unit) ** 2).unit
):
d_a = (
to_variance(self.array << self.unit)
.to((1 * self.parent_nddata.unit) ** 2)
.value
)
else:
d_a = to_variance(self.array)
# Formula: sigma**2 = |B|**2 * d_a
left = np.abs(other_uncert.parent_nddata.data**2 * d_a)
else:
left = 0
# Multiplication
#
# The fundamental formula is:
# sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# This formula is not very handy since it generates NaNs for every
# zero in A and B. So we rewrite it:
#
# Multiplication Formula:
# sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB)))
# sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB)))
#
# Division
#
# The fundamental formula for division is:
# sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# As with multiplication, it is convenient to rewrite this to avoid
# nans where A is zero.
#
# Division formula (rewritten):
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2 * cor * A *sqrt(dAdB) / B**3
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B
# sigma**2 = multiplication formula/B**4 (and sign change in
# the correlation)
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = (
2
* correlation
* np.sqrt(d_a * d_b)
* self.parent_nddata.data
* other_uncert.parent_nddata.data
)
else:
corr = 0
if divide:
return from_variance(
(left + right + correlation_sign * corr)
/ other_uncert.parent_nddata.data**4
)
else:
return from_variance(left + right + correlation_sign * corr)
class StdDevUncertainty(_VariancePropagationMixin, NDUncertainty):
"""Standard deviation uncertainty assuming first order gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `StdDevUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will have the same unit as the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
`StdDevUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, StdDevUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.1, 0.1, 0.1])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.2])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 2
>>> ndd.uncertainty
StdDevUncertainty(2)
.. note::
The unit will not be displayed.
"""
@property
def supports_correlated(self):
"""`True` : `StdDevUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
@property
def uncertainty_type(self):
"""``"std"`` : `StdDevUncertainty` implements standard deviation."""
return "std"
def _convert_uncertainty(self, other_uncert):
if isinstance(other_uncert, StdDevUncertainty):
return other_uncert
else:
raise IncompatibleUncertaintiesException
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=True,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=False,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=True,
to_variance=np.square,
from_variance=np.sqrt,
)
def _data_unit_to_uncertainty_unit(self, value):
return value
def _convert_to_variance(self):
new_array = None if self.array is None else self.array**2
new_unit = None if self.unit is None else self.unit**2
return VarianceUncertainty(new_array, unit=new_unit)
@classmethod
def _convert_from_variance(cls, var_uncert):
new_array = None if var_uncert.array is None else var_uncert.array ** (1 / 2)
new_unit = None if var_uncert.unit is None else var_uncert.unit ** (1 / 2)
return cls(new_array, unit=new_unit)
class VarianceUncertainty(_VariancePropagationMixin, NDUncertainty):
"""
Variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `VarianceUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will be the square of the unit of the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`VarianceUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, VarianceUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=VarianceUncertainty([0.01, 0.01, 0.01]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.01, 0.01, 0.01])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = VarianceUncertainty([0.04], unit='m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.04])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 4
>>> ndd.uncertainty
VarianceUncertainty(4)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"var"`` : `VarianceUncertainty` implements variance."""
return "var"
@property
def supports_correlated(self):
"""`True` : `VarianceUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert, result_data, correlation, subtract=False
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert, result_data, correlation, subtract=True
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert, result_data, correlation, divide=False
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert, result_data, correlation, divide=True
)
def _data_unit_to_uncertainty_unit(self, value):
return value**2
def _convert_to_variance(self):
return self
@classmethod
def _convert_from_variance(cls, var_uncert):
return var_uncert
def _inverse(x):
"""Just a simple inverse for use in the InverseVariance."""
return 1 / x
class InverseVariance(_VariancePropagationMixin, NDUncertainty):
"""
Inverse variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `InverseVariance`. The class can handle if the uncertainty has a unit
that differs from (but is convertible to) the parents `NDData` unit. The
unit of the resulting uncertainty will the inverse square of the unit of
the resulting data. Also support for correlation is possible but requires
the correlation as input. It cannot handle correlation determination
itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`InverseVariance` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, InverseVariance
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=InverseVariance([100, 100, 100]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([100, 100, 100])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = InverseVariance([25], unit='1/m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([25])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 0.25
>>> ndd.uncertainty
InverseVariance(0.25)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"ivar"`` : `InverseVariance` implements inverse variance."""
return "ivar"
@property
def supports_correlated(self):
"""`True` : `InverseVariance` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=True,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=False,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=True,
to_variance=_inverse,
from_variance=_inverse,
)
def _data_unit_to_uncertainty_unit(self, value):
return 1 / value**2
def _convert_to_variance(self):
new_array = None if self.array is None else 1 / self.array
new_unit = None if self.unit is None else 1 / self.unit
return VarianceUncertainty(new_array, unit=new_unit)
@classmethod
def _convert_from_variance(cls, var_uncert):
new_array = None if var_uncert.array is None else 1 / var_uncert.array
new_unit = None if var_uncert.unit is None else 1 / var_uncert.unit
return cls(new_array, unit=new_unit)
|
d7ed85ff2a75622088290f756b02b087ec9146b9d6124bf032456762bec353c7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDData class.
from copy import deepcopy
import numpy as np
from astropy import log
from astropy.units import Quantity, Unit
from astropy.utils.metadata import MetaData
from astropy.wcs.wcsapi import SlicedLowLevelWCS # noqa: F401
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS, HighLevelWCSWrapper
from .nddata_base import NDDataBase
from .nduncertainty import NDUncertainty, UnknownUncertainty
__all__ = ["NDData"]
_meta_doc = """`dict`-like : Additional meta information about the dataset."""
class NDData(NDDataBase):
"""
A container for `numpy.ndarray`-based datasets, using the
`~astropy.nddata.NDDataBase` interface.
The key distinction from raw `numpy.ndarray` is the presence of
additional metadata such as uncertainty, mask, unit, a coordinate system
and/or a dictionary containing further meta information. This class *only*
provides a container for *storing* such datasets. For further functionality
take a look at the ``See also`` section.
See also: https://docs.astropy.org/en/stable/nddata/
Parameters
----------
data : `numpy.ndarray`-like or `NDData`-like
The dataset.
uncertainty : any type, optional
Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, for example ``"std"`` for standard deviation or
``"var"`` for variance. A metaclass defining such an interface is
`NDUncertainty` - but isn't mandatory. If the uncertainty has no such
attribute the uncertainty is stored as `UnknownUncertainty`.
Defaults to ``None``.
mask : any type, optional
Mask for the dataset. Masks should follow the ``numpy`` convention that
**valid** data points are marked by ``False`` and **invalid** ones with
``True``.
Defaults to ``None``.
wcs : any type, optional
World coordinate system (WCS) for the dataset.
Default is ``None``.
meta : `dict`-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty `collections.OrderedDict` is created.
Default is ``None``.
unit : unit-like, optional
Unit for the dataset. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the arguments as copy. ``True`` copies
every attribute before saving it while ``False`` tries to save every
parameter as reference.
Note however that it is not always possible to save the input as
reference.
Default is ``False``.
.. versionadded:: 1.2
psf : `numpy.ndarray` or None, optional
Image representation of the PSF. In order for convolution to be flux-
preserving, this should generally be normalized to sum to unity.
Raises
------
TypeError
In case ``data`` or ``meta`` don't meet the restrictions.
Notes
-----
Each attribute can be accessed through the homonymous instance attribute:
``data`` in a `NDData` object can be accessed through the `data`
attribute::
>>> from astropy.nddata import NDData
>>> nd = NDData([1,2,3])
>>> nd.data
array([1, 2, 3])
Given a conflicting implicit and an explicit parameter during
initialization, for example the ``data`` is a `~astropy.units.Quantity` and
the unit parameter is not ``None``, then the implicit parameter is replaced
(without conversion) by the explicit one and a warning is issued::
>>> import numpy as np
>>> import astropy.units as u
>>> q = np.array([1,2,3,4]) * u.m
>>> nd2 = NDData(q, unit=u.cm)
INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata]
>>> nd2.data # doctest: +FLOAT_CMP
array([1., 2., 3., 4.])
>>> nd2.unit
Unit("cm")
See also
--------
NDDataRef
NDDataArray
"""
# Instead of a custom property use the MetaData descriptor also used for
# Tables. It will check if the meta is dict-like or raise an exception.
meta = MetaData(doc=_meta_doc, copy=False)
def __init__(
self,
data,
uncertainty=None,
mask=None,
wcs=None,
meta=None,
unit=None,
copy=False,
psf=None,
):
# Rather pointless since the NDDataBase does not implement any setting
# but before the NDDataBase did call the uncertainty
# setter. But if anyone wants to alter this behavior again the call
# to the superclass NDDataBase should be in here.
super().__init__()
# Check if data is any type from which to collect some implicitly
# passed parameters.
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# Of course we need to check the data because subclasses with other
# init-logic might be passed in here. We could skip these
# tests if we compared for self.__class__ but that has other
# drawbacks.
# Comparing if there is an explicit and an implicit unit parameter.
# If that is the case use the explicit one and issue a warning
# that there might be a conflict. In case there is no explicit
# unit just overwrite the unit parameter with the NDData.unit
# and proceed as if that one was given as parameter. Same for the
# other parameters.
if unit is not None and data.unit is not None and unit != data.unit:
log.info("overwriting NDData's current unit with specified unit.")
elif data.unit is not None:
unit = data.unit
if uncertainty is not None and data.uncertainty is not None:
log.info(
"overwriting NDData's current "
"uncertainty with specified uncertainty."
)
elif data.uncertainty is not None:
uncertainty = data.uncertainty
if mask is not None and data.mask is not None:
log.info("overwriting NDData's current mask with specified mask.")
elif data.mask is not None:
mask = data.mask
if wcs is not None and data.wcs is not None:
log.info("overwriting NDData's current wcs with specified wcs.")
elif data.wcs is not None:
wcs = data.wcs
if psf is not None and data.psf is not None:
log.info("Overwriting NDData's current psf with specified psf.")
elif data.psf is not None:
psf = data.psf
if meta is not None and data.meta is not None:
log.info("overwriting NDData's current meta with specified meta.")
elif data.meta is not None:
meta = data.meta
data = data.data
else:
if hasattr(data, "mask") and hasattr(data, "data"):
# Separating data and mask
if mask is not None:
log.info(
"overwriting Masked Objects's current mask with specified mask."
)
else:
mask = data.mask
# Just save the data for further processing, we could be given
# a masked Quantity or something else entirely. Better to check
# it first.
data = data.data
if isinstance(data, Quantity):
if unit is not None and unit != data.unit:
log.info("overwriting Quantity's current unit with specified unit.")
else:
unit = data.unit
data = data.value
# Quick check on the parameters if they match the requirements.
if (
not hasattr(data, "shape")
or not hasattr(data, "__getitem__")
or not hasattr(data, "__array__")
):
# Data doesn't look like a numpy array, try converting it to
# one.
data = np.array(data, subok=True, copy=False)
# Another quick check to see if what we got looks like an array
# rather than an object (since numpy will convert a
# non-numerical/non-string inputs to an array of objects).
if data.dtype == "O":
raise TypeError("could not convert data to numpy array.")
if unit is not None:
unit = Unit(unit)
if copy:
# Data might have been copied before but no way of validating
# without another variable.
data = deepcopy(data)
mask = deepcopy(mask)
wcs = deepcopy(wcs)
psf = deepcopy(psf)
meta = deepcopy(meta)
uncertainty = deepcopy(uncertainty)
# Actually - copying the unit is unnecessary but better safe
# than sorry :-)
unit = deepcopy(unit)
# Store the attributes
self._data = data
self.mask = mask
self._wcs = None
if wcs is not None:
# Validate the wcs
self.wcs = wcs
self.meta = meta # TODO: Make this call the setter sometime
self._unit = unit
# Call the setter for uncertainty to further check the uncertainty
self.uncertainty = uncertainty
self.psf = psf
def __str__(self):
data = str(self.data)
unit = f" {self.unit}" if self.unit is not None else ""
return data + unit
def __repr__(self):
prefix = self.__class__.__name__ + "("
data = np.array2string(self.data, separator=", ", prefix=prefix)
unit = f", unit='{self.unit}'" if self.unit is not None else ""
return "".join((prefix, data, unit, ")"))
@property
def data(self):
"""
`~numpy.ndarray`-like : The stored dataset.
"""
return self._data
@property
def mask(self):
"""
any type : Mask for the dataset, if any.
Masks should follow the ``numpy`` convention that valid data points are
marked by ``False`` and invalid ones with ``True``.
"""
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
"""
`~astropy.units.Unit` : Unit for the dataset, if any.
"""
return self._unit
@property
def wcs(self):
"""
any type : A world coordinate system (WCS) for the dataset, if any.
"""
return self._wcs
@wcs.setter
def wcs(self, wcs):
if self._wcs is not None and wcs is not None:
raise ValueError(
"You can only set the wcs attribute with a WCS if no WCS is present."
)
if wcs is None or isinstance(wcs, BaseHighLevelWCS):
self._wcs = wcs
elif isinstance(wcs, BaseLowLevelWCS):
self._wcs = HighLevelWCSWrapper(wcs)
else:
raise TypeError(
"The wcs argument must implement either the high or low level WCS API."
)
@property
def psf(self):
return self._psf
@psf.setter
def psf(self, value):
self._psf = value
@property
def uncertainty(self):
"""
any type : Uncertainty in the dataset, if any.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``'std'`` for standard deviation or
``'var'`` for variance. A metaclass defining such an interface is
`~astropy.nddata.NDUncertainty` but isn't mandatory.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, "uncertainty_type"):
log.info("uncertainty should have attribute uncertainty_type.")
value = UnknownUncertainty(value, copy=False)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
value = value.__class__(value, copy=False)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = self
self._uncertainty = value
|
c438ba234bb52b6fb7244195e9449f93ee756cbe73b8e77b008cf4adf5d01cda | """
A module that provides functions for manipulating bit masks and data quality
(DQ) arrays.
"""
import numbers
import warnings
from collections import OrderedDict
import numpy as np
__all__ = [
"bitfield_to_boolean_mask",
"interpret_bit_flags",
"BitFlagNameMap",
"extend_bit_flag_map",
"InvalidBitFlag",
]
_ENABLE_BITFLAG_CACHING = True
_MAX_UINT_TYPE = np.maximum_sctype(np.uint)
_SUPPORTED_FLAGS = int(np.bitwise_not(0, dtype=_MAX_UINT_TYPE, casting="unsafe"))
def _is_bit_flag(n):
"""
Verifies if the input number is a bit flag (i.e., an integer number that is
an integer power of 2).
Parameters
----------
n : int
A positive integer number. Non-positive integers are considered not to
be "flags".
Returns
-------
bool
``True`` if input ``n`` is a bit flag and ``False`` if it is not.
"""
if n < 1:
return False
return bin(n).count("1") == 1
def _is_int(n):
return (isinstance(n, numbers.Integral) and not isinstance(n, bool)) or (
isinstance(n, np.generic) and np.issubdtype(n, np.integer)
)
class InvalidBitFlag(ValueError):
"""Indicates that a value is not an integer that is a power of 2."""
pass
class BitFlag(int):
"""Bit flags: integer values that are powers of 2."""
def __new__(cls, val, doc=None):
if isinstance(val, tuple):
if doc is not None:
raise ValueError("Flag's doc string cannot be provided twice.")
val, doc = val
if not (_is_int(val) and _is_bit_flag(val)):
raise InvalidBitFlag(
"Value '{}' is not a valid bit flag: bit flag value must be "
"an integral power of two.".format(val)
)
s = int.__new__(cls, val)
if doc is not None:
s.__doc__ = doc
return s
class BitFlagNameMeta(type):
def __new__(mcls, name, bases, members):
for k, v in members.items():
if not k.startswith("_"):
v = BitFlag(v)
attr = [k for k in members.keys() if not k.startswith("_")]
attrl = list(map(str.lower, attr))
if _ENABLE_BITFLAG_CACHING:
cache = OrderedDict()
for b in bases:
for k, v in b.__dict__.items():
if k.startswith("_"):
continue
kl = k.lower()
if kl in attrl:
idx = attrl.index(kl)
raise AttributeError(
f"Bit flag '{attr[idx]:s}' was already defined."
)
if _ENABLE_BITFLAG_CACHING:
cache[kl] = v
members = {
k: v if k.startswith("_") else BitFlag(v) for k, v in members.items()
}
if _ENABLE_BITFLAG_CACHING:
cache.update(
{k.lower(): v for k, v in members.items() if not k.startswith("_")}
)
members = {"_locked": True, "__version__": "", **members, "_cache": cache}
else:
members = {"_locked": True, "__version__": "", **members}
return super().__new__(mcls, name, bases, members)
def __setattr__(cls, name, val):
if name == "_locked":
return super().__setattr__(name, True)
else:
if name == "__version__":
if cls._locked:
raise AttributeError("Version cannot be modified.")
return super().__setattr__(name, val)
err_msg = f"Bit flags are read-only. Unable to reassign attribute {name}"
if cls._locked:
raise AttributeError(err_msg)
namel = name.lower()
if _ENABLE_BITFLAG_CACHING:
if not namel.startswith("_") and namel in cls._cache:
raise AttributeError(err_msg)
else:
for b in cls.__bases__:
if not namel.startswith("_") and namel in list(
map(str.lower, b.__dict__)
):
raise AttributeError(err_msg)
if namel in list(map(str.lower, cls.__dict__)):
raise AttributeError(err_msg)
val = BitFlag(val)
if _ENABLE_BITFLAG_CACHING and not namel.startswith("_"):
cls._cache[namel] = val
return super().__setattr__(name, val)
def __getattr__(cls, name):
if _ENABLE_BITFLAG_CACHING:
flagnames = cls._cache
else:
flagnames = {k.lower(): v for k, v in cls.__dict__.items()}
flagnames.update(
{k.lower(): v for b in cls.__bases__ for k, v in b.__dict__.items()}
)
try:
return flagnames[name.lower()]
except KeyError:
raise AttributeError(f"Flag '{name}' not defined")
def __getitem__(cls, key):
return cls.__getattr__(key)
def __add__(cls, items):
if not isinstance(items, dict):
if not isinstance(items[0], (tuple, list)):
items = [items]
items = dict(items)
return extend_bit_flag_map(
cls.__name__ + "_" + "_".join([k for k in items]), cls, **items
)
def __iadd__(cls, other):
raise NotImplementedError(
"Unary '+' is not supported. Use binary operator instead."
)
def __delattr__(cls, name):
raise AttributeError(
f"{cls.__name__}: cannot delete {cls.mro()[-2].__name__} member."
)
def __delitem__(cls, name):
raise AttributeError(
f"{cls.__name__}: cannot delete {cls.mro()[-2].__name__} member."
)
def __repr__(cls):
return f"<{cls.mro()[-2].__name__} '{cls.__name__}'>"
class BitFlagNameMap(metaclass=BitFlagNameMeta):
"""
A base class for bit flag name maps used to describe data quality (DQ)
flags of images by provinding a mapping from a mnemonic flag name to a flag
value.
Mapping for a specific instrument should subclass this class.
Subclasses should define flags as class attributes with integer values
that are powers of 2. Each bit flag may also contain a string
comment following the flag value.
Examples
--------
>>> from astropy.nddata.bitmask import BitFlagNameMap
>>> class ST_DQ(BitFlagNameMap):
... __version__ = '1.0.0' # optional
... CR = 1, 'Cosmic Ray'
... CLOUDY = 4 # no docstring comment
... RAINY = 8, 'Dome closed'
...
>>> class ST_CAM1_DQ(ST_DQ):
... HOT = 16
... DEAD = 32
"""
pass
def extend_bit_flag_map(cls_name, base_cls=BitFlagNameMap, **kwargs):
"""
A convenience function for creating bit flags maps by subclassing an
existing map and adding additional flags supplied as keyword arguments.
Parameters
----------
cls_name : str
Class name of the bit flag map to be created.
base_cls : BitFlagNameMap, optional
Base class for the new bit flag map.
**kwargs : int
Each supplied keyword argument will be used to define bit flag
names in the new map. In addition to bit flag names, ``__version__`` is
allowed to indicate the version of the newly created map.
Examples
--------
>>> from astropy.nddata.bitmask import extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8)
>>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32)
>>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys
16
>>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes
16
"""
new_cls = BitFlagNameMeta.__new__(
BitFlagNameMeta, cls_name, (base_cls,), {"_locked": False}
)
for k, v in kwargs.items():
try:
setattr(new_cls, k, v)
except AttributeError as e:
if new_cls[k] != int(v):
raise e
new_cls._locked = True
return new_cls
def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None):
"""
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
string of comma-, ``'|'``-, or ``'+'``-separated list of flags),
the returned bit mask is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or
``'+'``-separated list of integer bit flags or mnemonic flag names,
or a Python list of integer bit flags. If ``bit_flags`` is a `str`
and if it is prepended with '~', then the output bit mask will have
its bits flipped (compared to simple sum of input flags).
For input ``bit_flags`` that is already a bit mask or a Python list
of bit flags, bit-flipping can be controlled through ``flip_bits``
parameter.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
.. note::
Only one flag separator is supported at a time. ``bit_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32)
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)',
... flag_name_map=ST_DQ))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return ~int(bit_flags) if flip_bits else int(bit_flags)
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, str):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ["", "NONE", "INDEF"]:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find("~")
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count("(")
nrpar = bit_flags.count(")")
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parentheses in bit flag list.")
lpar_pos = bit_flags.find("(")
rpar_pos = bit_flags.rfind(")")
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError(
"Incorrect syntax (incorrect use of parenthesis) in bit flag list."
)
bit_flags = bit_flags[1:-1].strip()
if sum(k in bit_flags for k in "+,|") > 1:
raise ValueError(
"Only one type of bit flag separator may be used in one "
"expression. Allowed separators are: '+', '|', or ','."
)
if "," in bit_flags:
bit_flags = bit_flags.split(",")
elif "+" in bit_flags:
bit_flags = bit_flags.split("+")
elif "|" in bit_flags:
bit_flags = bit_flags.split("|")
else:
if bit_flags == "":
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
if flag_name_map is not None:
try:
int(bit_flags[0])
except ValueError:
bit_flags = [flag_name_map[f] for f in bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, "__iter__"):
if not all([_is_int(flag) for flag in bit_flags]):
if flag_name_map is not None and all(
[isinstance(flag, str) for flag in bit_flags]
):
bit_flags = [flag_name_map[f] for f in bit_flags]
else:
raise TypeError(
"Every bit flag in a list must be either an "
"integer flag value or a 'str' flag name."
)
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not _is_bit_flag(v) and not allow_non_flags:
raise ValueError(
f"Input list contains invalid (not powers of two) bit flag: {v}"
)
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
def bitfield_to_boolean_mask(
bitfield,
ignore_flags=0,
flip_bits=None,
good_mask_value=False,
dtype=np.bool_,
flag_name_map=None,
):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (default = 0)
An integer bit mask, `None`, a Python list of bit flags, a comma-,
or ``'|'``-separated, ``'+'``-separated string list of integer
bit flags or mnemonic flag names that indicate what bits in the input
``bitfield`` should be *ignored* (i.e., zeroed), or `None`.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
``'+'``(or ``'|'``)-separated list of integer bit flags that should
be added (bitwise OR) together to create an integer bit mask.
For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent
and indicate that bit flags 4 and 8 in the input ``bitfield``
array should be ignored when generating boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpreted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
Only one flag separator is supported at a time. ``ignore_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
mask : ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> flag_map = bitmask.extend_bit_flag_map(
... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32
... )
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(
ignore_flags, flip_bits=flip_bits, flag_name_map=flag_name_map
)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & _SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(
ignore_mask, dtype=bitfield.dtype.type, casting="unsafe"
)
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting="unsafe")
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
bf21a20339d3cee05ca25d1805b8f235c3b854aff9927b2ea31b21e40a2fedff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import itertools
import numpy as np
from astropy import log
from astropy import units as u
from astropy.io import fits, registry
from astropy.utils.decorators import sharedmethod
from astropy.wcs import WCS
from .compat import NDDataArray
from .nduncertainty import (
InverseVariance,
NDUncertainty,
StdDevUncertainty,
VarianceUncertainty,
)
__all__ = ["CCDData", "fits_ccddata_reader", "fits_ccddata_writer"]
_known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance)
_unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties}
_unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties}
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarily disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand, operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`."
return sharedmethod(inner)
return decorator
def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit):
if uncertainty_type is StdDevUncertainty:
return unit == parent_unit
elif uncertainty_type is VarianceUncertainty:
return unit == (parent_unit**2)
elif uncertainty_type is InverseVariance:
return unit == (1 / (parent_unit**2))
raise ValueError(f"unsupported uncertainty type: {uncertainty_type}")
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
----------
data : `~astropy.nddata.CCDData`-like or array-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
psf : `numpy.ndarray` or None, optional
Image representation of the PSF at the center of this image. In order
for convolution to be flux-preserving, this should generally be
normalized to sum to unity.
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Attributes
----------
known_invalid_fits_unit_strings
A dictionary that maps commonly-used fits unit name strings that are
technically invalid to the correct valid unit type (or unit string).
This is primarily for variant names like "ELECTRONS/S" which are not
formally valid, but are unambiguous and frequently enough encountered
that it is convenient to map them to the correct unit.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if "meta" not in kwd:
kwd["meta"] = kwd.pop("header", None)
if "header" in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
if self._wcs is not None:
llwcs = self._wcs.low_level_wcs
if not isinstance(llwcs, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = llwcs
# Check if a unit is set. This can be temporarily disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
if value is not None and not isinstance(value, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def psf(self):
return self._psf
@psf.setter
def psf(self, value):
if value is not None and not isinstance(value, np.ndarray):
raise TypeError("The psf must be a numpy array.")
self._psf = value
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, "_parent_nddata", None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as data.")
self._uncertainty = StdDevUncertainty(value)
log.info(
"array provided for uncertainty; assuming it is a "
"StdDevUncertainty."
)
else:
raise TypeError(
"uncertainty must be an instance of a "
"NDUncertainty object or a numpy array."
)
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
if as_image_hdu:
hdus = [fits.ImageHDU(self.data, header)]
else:
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError(
f"only uncertainties of type {_known_uncertainties} can be saved."
)
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit
):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(
self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty
)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError(
"adding the flags to a HDU is not supported at this time."
)
if hdu_psf and self.psf is not None:
# The PSF is an image, so write it as a separate ImageHDU.
hdu_psf = fits.ImageHDU(self.psf, name=hdu_psf)
hdus.append(hdu_psf)
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta[f"HIERARCH {key.upper()}"] = (
short_name,
f"Shortened name for {key}",
)
self.meta[short_name] = value
else:
self.meta[key] = value
# A dictionary mapping "known" invalid fits unit
known_invalid_fits_unit_strings = {
"ELECTRONS/S": u.electron / u.s,
"ELECTRONS": u.electron,
"electrons": u.electron,
}
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = ["JD-OBS", "MJD-OBS", "DATE-OBS"]
_PCs = {"PC1_1", "PC1_2", "PC2_1", "PC2_2"}
_CDs = {"CD1_1", "CD1_2", "CD2_1", "CD2_2"}
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info(
"An exception happened while extracting WCS information from "
"the Header.\n{}: {}".format(type(exc).__name__, str(exc))
)
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = "{}_{}_{}"
polynomials = ["A", "B", "AP", "BP"]
for poly in polynomials:
order = wcs.sip.__getattribute__(f"{poly.lower()}_order")
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(
filename,
hdu=0,
unit=None,
hdu_uncertainty="UNCERT",
hdu_mask="MASK",
hdu_flags=None,
key_uncertainty_type="UTYPE",
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, str, tuple of (str, int), optional
Index or other identifier of the Header Data Unit of the FITS
file from which CCDData should be initialized. If zero and
no data in the primary HDU, it will search for the first
extension HDU with data. The header will be added to the primary HDU.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
hdu_psf : str or None, optional
FITS extension from which the psf image should be initialized. If the
extension does not exist the psf of the CCDData is `None`.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
"do_not_scale_image_data": "Image data must be scaled.",
"scale_back": "Scale information is not preserved.",
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f"unsupported keyword: {key}."
raise TypeError(" ".join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, "None")
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError("loading flags is currently not supported.")
if hdu_psf is not None and hdu_psf in hdus:
psf = hdus[hdu_psf].data
else:
psf = None
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (
hdus.info(hdu)[i][3] == "ImageHDU"
and hdus.fileinfo(i)["datSpan"] > 0
):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if "bunit" in hdr:
fits_unit_string = hdr["bunit"]
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == "adu":
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
"The Header value for the key BUNIT ({}) cannot be "
"interpreted as valid unit. To successfully read the "
"file as CCDData you can pass in a valid `unit` "
"argument explicitly or change the header of the FITS "
"file before reading it.".format(fits_unit_string)
)
else:
log.info(
"using the unit {} passed to the FITS reader instead "
"of the unit {} in the FITS file.".format(unit, fits_unit_string)
)
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(
hdus[hdu].data,
meta=hdr,
unit=use_unit,
mask=mask,
uncertainty=uncertainty,
wcs=wcs,
psf=psf,
)
return ccd_data
def fits_ccddata_writer(
ccd_data,
filename,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Write CCDData object to FITS file.
Parameters
----------
ccd_data : CCDData
Object to write.
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of
the default `~astropy.io.fits.PrimaryHDU`.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask,
hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type,
hdu_flags=hdu_flags,
as_image_hdu=as_image_hdu,
hdu_psf=hdu_psf,
)
if as_image_hdu:
hdu.insert(0, fits.PrimaryHDU())
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader("fits", CCDData, fits_ccddata_reader)
registry.register_writer("fits", CCDData, fits_ccddata_writer)
registry.register_identifier("fits", CCDData, fits.connect.is_fits)
|
b283411de00f1f41ac6635d9c91619df8dc90f8062fc5f894436d81bb65abc8f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io.fits.hdu.image import Section
from astropy.utils import lazyproperty
from astropy.wcs import Sip
from astropy.wcs.utils import proj_plane_pixel_scales, skycoord_to_pixel
__all__ = [
"extract_array",
"add_array",
"subpixel_indices",
"overlap_slices",
"NoOverlapError",
"PartialOverlapError",
"Cutout2D",
]
class NoOverlapError(ValueError):
"""Raised when determining the overlap of non-overlapping arrays."""
pass
class PartialOverlapError(ValueError):
"""Raised when arrays only partially overlap."""
pass
def overlap_slices(large_array_shape, small_array_shape, position, mode="partial"):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ["partial", "trim", "strict"]:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape,)
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape,)
if np.isscalar(position):
position = (position,)
if any(~np.isfinite(position)):
raise ValueError("Input position contains invalid values (NaNs or infs).")
if len(small_array_shape) != len(large_array_shape):
raise ValueError(
'"large_array_shape" and "small_array_shape" must '
"have the same number of dimensions."
)
if len(small_array_shape) != len(position):
raise ValueError(
'"position" must have the same number of dimensions as "small_array_shape".'
)
# define the min/max pixel indices
indices_min = [
int(np.ceil(pos - (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
indices_max = [
int(np.ceil(pos + (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
for e_max in indices_max:
if e_max < 0:
raise NoOverlapError("Arrays do not overlap.")
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError("Arrays do not overlap.")
if mode == "strict":
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError("Arrays overlap only partially.")
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max > large_shape:
raise PartialOverlapError("Arrays overlap only partially.")
# Set up slices
slices_large = tuple(
slice(max(0, indices_min), min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
if mode == "trim":
slices_small = tuple(slice(0, slc.stop - slc.start) for slc in slices_large)
else:
slices_small = tuple(
slice(
max(0, -indices_min),
min(large_shape - indices_min, indices_max - indices_min),
)
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
return slices_large, slices_small
def extract_array(
array_large,
shape,
position,
mode="partial",
fill_value=np.nan,
return_position=False,
):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : ndarray
The array from which to extract the small array.
shape : int or tuple thereof
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` will be changed to have the same ``dtype`` as the
``array_large`` array, with one exception. If ``array_large``
has integer type and ``fill_value`` is ``np.nan``, then a
`ValueError` will be raised.
return_position : bool, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : ndarray
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape,)
if np.isscalar(position):
position = (position,)
if mode not in ["partial", "trim", "strict"]:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(
array_large.shape, shape, position, mode=mode
)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == "partial"):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
try:
extracted_array[:] = fill_value
except ValueError as exc:
exc.args += (
"fill_value is inconsistent with the data type of "
"the input array (e.g., fill_value cannot be set to "
"np.nan if the input array has integer type). Please "
"change either the input array dtype or the "
"fill_value.",
)
raise exc
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position, small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array
def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : ndarray
Large array.
array_small : ndarray
Small array to add. Can be equal to ``array_large`` in size in a given
dimension, but not larger.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : ndarray
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is not smaller
if all(
large_shape >= small_shape
for (large_shape, small_shape) in zip(array_large.shape, array_small.shape)
):
large_slices, small_slices = overlap_slices(
array_large.shape, array_small.shape, position
)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.")
def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : ndarray or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : ndarray
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling)
class Cutout2D:
"""
Create a cutout object from a 2D array.
The returned object will contain a 2D cutout array. If
``copy=False`` (default), the cutout array is a view into the
original ``data`` array, otherwise the cutout array will contain a
copy of the original data.
If a `~astropy.wcs.WCS` object is input, then the returned object
will also contain a copy of the original WCS, but updated for the
cutout array.
For example usage, see :ref:`astropy:cutout_images`.
.. warning::
The cutout WCS object does not currently handle cases where the
input WCS object contains distortion lookup tables described in
the `FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
Parameters
----------
data : ndarray
The 2D data array from which to extract the cutout array.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to
the ``data`` array. The position can be specified either as
a ``(x, y)`` tuple of pixel coordinates or a
`~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a
required input.
size : int, array-like, or `~astropy.units.Quantity`
The size of the cutout array along each axis. If ``size``
is a scalar number or a scalar `~astropy.units.Quantity`,
then a square cutout of ``size`` will be created. If
``size`` has two elements, they should be in ``(ny, nx)``
order. Scalar numbers in ``size`` are assumed to be in
units of pixels. ``size`` can also be a
`~astropy.units.Quantity` object or contain
`~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or
angular units. For all cases, ``size`` will be converted to
an integer number of pixels, rounding the the nearest
integer. See the ``mode`` keyword for additional details on
the final cutout size.
.. note::
If ``size`` is in angular units, the cutout size is
converted to pixels using the pixel scales along each
axis of the image at the ``CRPIX`` location. Projection
and other non-linear distortions are not taken into
account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input ``data`` array. If
``wcs`` is not `None`, then the returned cutout object will
contain a copy of the updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
cutout array and the input ``data`` array is sufficient.
For the ``'strict'`` mode, the cutout array has to be fully
contained within the ``data`` array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In
all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'``
mode, positions in the cutout array that do not overlap with
the ``data`` array will be filled with ``fill_value``. In
``'trim'`` mode only the overlapping elements are returned,
thus the resulting cutout array may be smaller than the
requested ``shape``.
fill_value : float or int, optional
If ``mode='partial'``, the value to fill pixels in the
cutout array that do not overlap with the input ``data``.
``fill_value`` must have the same ``dtype`` as the input
``data`` array.
copy : bool, optional
If `False` (default), then the cutout data will be a view
into the original ``data`` array. If `True`, then the
cutout data will hold a copy of the original ``data`` array.
Attributes
----------
data : 2D `~numpy.ndarray`
The 2D cutout array.
shape : (2,) tuple
The ``(ny, nx)`` shape of the cutout array.
shape_input : (2,) tuple
The ``(ny, nx)`` shape of the input (original) array.
input_position_cutout : (2,) tuple
The (unrounded) ``(x, y)`` position with respect to the cutout
array.
input_position_original : (2,) tuple
The original (unrounded) ``(x, y)`` input position (with respect
to the original array).
slices_original : (2,) tuple of slice object
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the original array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
slices_cutout : (2,) tuple of slice object
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the cutout array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
xmin_original, ymin_original, xmax_original, ymax_original : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values. These
values are the same as those in `bbox_original`.
xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values. These values are
the same as those in `bbox_cutout`.
wcs : `~astropy.wcs.WCS` or None
A WCS object associated with the cutout array if a ``wcs``
was input.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import Cutout2D
>>> from astropy import units as u
>>> data = np.arange(20.).reshape(5, 4)
>>> cutout1 = Cutout2D(data, (2, 2), (3, 3))
>>> print(cutout1.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> print(cutout1.center_original)
(2.0, 2.0)
>>> print(cutout1.center_cutout)
(1.0, 1.0)
>>> print(cutout1.origin_original)
(1, 1)
>>> cutout2 = Cutout2D(data, (2, 2), 3)
>>> print(cutout2.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> size = u.Quantity([3, 3], u.pixel)
>>> cutout3 = Cutout2D(data, (0, 0), size)
>>> print(cutout3.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3))
>>> print(cutout4.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial')
>>> print(cutout5.data) # doctest: +FLOAT_CMP
[[nan nan nan]
[nan 0. 1.]
[nan 4. 5.]]
"""
def __init__(
self, data, position, size, wcs=None, mode="trim", fill_value=np.nan, copy=False
):
if wcs is None:
wcs = getattr(data, "wcs", None)
if isinstance(position, SkyCoord):
if wcs is None:
raise ValueError("wcs must be input if position is a SkyCoord")
position = skycoord_to_pixel(position, wcs, mode="all") # (x, y)
if np.isscalar(size):
size = np.repeat(size, 2)
# special handling for a scalar Quantity
if isinstance(size, u.Quantity):
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
if len(size) > 2:
raise ValueError("size must have at most two elements")
shape = np.zeros(2).astype(int)
pixel_scales = None
# ``size`` can have a mixture of int and Quantity (and even units),
# so evaluate each axis separately
for axis, side in enumerate(size):
if not isinstance(side, u.Quantity):
shape[axis] = int(np.round(size[axis])) # pixels
else:
if side.unit == u.pixel:
shape[axis] = int(np.round(side.value))
elif side.unit.physical_type == "angle":
if wcs is None:
raise ValueError(
"wcs must be input if any element of size has angular units"
)
if pixel_scales is None:
pixel_scales = u.Quantity(
proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis]
)
shape[axis] = int(np.round((side / pixel_scales[axis]).decompose()))
else:
raise ValueError(
"shape can contain Quantities with only pixel or angular units"
)
if not isinstance(data, Section): # Accept lazy-loaded image sections
data = np.asanyarray(data)
# reverse position because extract_array and overlap_slices
# use (y, x), but keep the input position
pos_yx = position[::-1]
cutout_data, input_position_cutout = extract_array(
data,
tuple(shape),
pos_yx,
mode=mode,
fill_value=fill_value,
return_position=True,
)
if copy:
cutout_data = np.copy(cutout_data)
self.data = cutout_data
self.input_position_cutout = input_position_cutout[::-1] # (x, y)
slices_original, slices_cutout = overlap_slices(
data.shape, shape, pos_yx, mode=mode
)
self.slices_original = slices_original
self.slices_cutout = slices_cutout
self.shape = self.data.shape
self.input_position_original = position
self.shape_input = shape
(
(self.ymin_original, self.ymax_original),
(self.xmin_original, self.xmax_original),
) = self.bbox_original
(
(self.ymin_cutout, self.ymax_cutout),
(self.xmin_cutout, self.xmax_cutout),
) = self.bbox_cutout
# the true origin pixel of the cutout array, including any
# filled cutout values
self._origin_original_true = (
self.origin_original[0] - self.slices_cutout[1].start,
self.origin_original[1] - self.slices_cutout[0].start,
)
if wcs is not None:
self.wcs = deepcopy(wcs)
self.wcs.wcs.crpix -= self._origin_original_true
self.wcs.array_shape = self.data.shape
if wcs.sip is not None:
self.wcs.sip = Sip(
wcs.sip.a,
wcs.sip.b,
wcs.sip.ap,
wcs.sip.bp,
wcs.sip.crpix - self._origin_original_true,
)
else:
self.wcs = None
def to_original_position(self, cutout_position):
"""
Convert an ``(x, y)`` position in the cutout array to the original
``(x, y)`` position in the original large array.
Parameters
----------
cutout_position : tuple
The ``(x, y)`` pixel position in the cutout array.
Returns
-------
original_position : tuple
The corresponding ``(x, y)`` pixel position in the original
large array.
"""
return tuple(cutout_position[i] + self.origin_original[i] for i in [0, 1])
def to_cutout_position(self, original_position):
"""
Convert an ``(x, y)`` position in the original large array to
the ``(x, y)`` position in the cutout array.
Parameters
----------
original_position : tuple
The ``(x, y)`` pixel position in the original large array.
Returns
-------
cutout_position : tuple
The corresponding ``(x, y)`` pixel position in the cutout
array.
"""
return tuple(original_position[i] - self.origin_original[i] for i in [0, 1])
def plot_on_original(self, ax=None, fill=False, **kwargs):
"""
Plot the cutout region on a matplotlib Axes instance.
Parameters
----------
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `matplotlib.axes.Axes` instance
is used.
fill : bool, optional
Set whether to fill the cutout patch. The default is
`False`.
kwargs : optional
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `matplotlib.axes.Axes` instance
The matplotlib Axes instance constructed in the method if
``ax=None``. Otherwise the output ``ax`` is the same as the
input ``ax``.
"""
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
kwargs["fill"] = fill
if ax is None:
ax = plt.gca()
height, width = self.shape
hw, hh = width / 2.0, height / 2.0
pos_xy = self.position_original - np.array([hw, hh])
patch = mpatches.Rectangle(pos_xy, width, height, 0.0, **kwargs)
ax.add_patch(patch)
return ax
@staticmethod
def _calc_center(slices):
"""
Calculate the center position. The center position will be
fractional for even-sized arrays. For ``mode='partial'``, the
central position is calculated for the valid (non-filled) cutout
values.
"""
return tuple(0.5 * (slices[i].start + slices[i].stop - 1) for i in [1, 0])
@staticmethod
def _calc_bbox(slices):
"""
Calculate a minimal bounding box in the form ``((ymin, ymax),
(xmin, xmax))``. Note these are pixel locations, not slice
indices. For ``mode='partial'``, the bounding box indices are
for the valid (non-filled) cutout values.
"""
# (stop - 1) to return the max pixel location, not the slice index
return (
(slices[0].start, slices[0].stop - 1),
(slices[1].start, slices[1].stop - 1),
)
@lazyproperty
def origin_original(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the original array. For ``mode='partial'``, the
origin pixel is calculated for the valid (non-filled) cutout
values.
"""
return (self.slices_original[1].start, self.slices_original[0].start)
@lazyproperty
def origin_cutout(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the cutout array. For ``mode='partial'``, the origin
pixel is calculated for the valid (non-filled) cutout values.
"""
return (self.slices_cutout[1].start, self.slices_cutout[0].start)
@staticmethod
def _round(a):
"""
Round the input to the nearest integer.
If two integers are equally close, the value is rounded up.
Note that this is different from `np.round`, which rounds to the
nearest even number.
"""
return int(np.floor(a + 0.5))
@lazyproperty
def position_original(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the original array.
"""
return (
self._round(self.input_position_original[0]),
self._round(self.input_position_original[1]),
)
@lazyproperty
def position_cutout(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the cutout array.
"""
return (
self._round(self.input_position_cutout[0]),
self._round(self.input_position_cutout[1]),
)
@lazyproperty
def center_original(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the original array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_original)
@lazyproperty
def center_cutout(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the cutout array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_cutout)
@lazyproperty
def bbox_original(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_original)
@lazyproperty
def bbox_cutout(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_cutout)
|
41b03d31c41ee8aa36db720f0f65dc690561a83c9dab1c5988fd2fdba17c5d04 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module contains a class equivalent to pre-1.0 NDData.
import numpy as np
from astropy import log
from astropy.units import Unit, UnitConversionError, UnitsError # noqa: F401
from .flag_collection import FlagCollection
from .mixins.ndarithmetic import NDArithmeticMixin
from .mixins.ndio import NDIOMixin
from .mixins.ndslicing import NDSlicingMixin
from .nddata import NDData
from .nduncertainty import NDUncertainty
__all__ = ["NDDataArray"]
class NDDataArray(NDArithmeticMixin, NDSlicingMixin, NDIOMixin, NDData):
"""
An ``NDData`` object with arithmetic. This class is functionally equivalent
to ``NDData`` in astropy versions prior to 1.0.
The key distinction from raw numpy arrays is the presence of
additional metadata such as uncertainties, a mask, units, flags,
and/or a coordinate system.
See also: https://docs.astropy.org/en/stable/nddata/
Parameters
----------
data : ndarray or `NDData`
The actual data contained in this `NDData` object. Not that this
will always be copies by *reference* , so you should make copy
the ``data`` before passing it in if that's the desired behavior.
uncertainty : `~astropy.nddata.NDUncertainty`, optional
Uncertainties on the data.
mask : array-like, optional
Mask for the data, given as a boolean Numpy array or any object that
can be converted to a boolean Numpy array with a shape
matching that of the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
flags : array-like or `~astropy.nddata.FlagCollection`, optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type (or an object which can be converted
to a Numpy array) with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
wcs : None, optional
WCS-object containing the world coordinate system for the data.
.. warning::
This is not yet defined because the discussion of how best to
represent this class's WCS system generically is still under
consideration. For now just leave it as None
meta : `dict`-like object, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object. e.g., creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data.
Raises
------
ValueError :
If the `uncertainty` or `mask` inputs cannot be broadcast (e.g., match
shape) onto ``data``.
"""
def __init__(self, data, *args, flags=None, **kwargs):
# Initialize with the parent...
super().__init__(data, *args, **kwargs)
# ...then reset uncertainty to force it to go through the
# setter logic below. In base NDData all that is done is to
# set self._uncertainty to whatever uncertainty is passed in.
self.uncertainty = self._uncertainty
# Same thing for mask.
self.mask = self._mask
# Initial flags because it is no longer handled in NDData
# or NDDataBase.
if isinstance(data, NDDataArray):
if flags is None:
flags = data.flags
else:
log.info(
"Overwriting NDDataArrays's current flags with specified flags"
)
self.flags = flags
# Implement uncertainty as NDUncertainty to support propagation of
# uncertainties in arithmetic operations
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
class_name = self.__class__.__name__
if not self.unit and value._unit:
# Raise an error if uncertainty has unit and data does not
raise ValueError(
"Cannot assign an uncertainty with unit "
"to {} without "
"a unit".format(class_name)
)
self._uncertainty = value
self._uncertainty.parent_nddata = self
else:
raise TypeError(
"Uncertainty must be an instance of a NDUncertainty object"
)
else:
self._uncertainty = value
# Override unit so that we can add a setter.
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
from . import conf
try:
if self._unit is not None and conf.warn_setting_unit_directly:
log.info(
"Setting the unit directly changes the unit without "
"updating the data or uncertainty. Use the "
".convert_unit_to() method to change the unit and "
"scale values appropriately."
)
except AttributeError:
# raised if self._unit has not been set yet, in which case the
# warning is irrelevant
pass
if value is None:
self._unit = None
else:
self._unit = Unit(value)
# Implement mask in a way that converts nicely to a numpy masked array
@property
def mask(self):
if self._mask is np.ma.nomask:
return None
else:
return self._mask
@mask.setter
def mask(self, value):
# Check that value is not either type of null mask.
if (value is not None) and (value is not np.ma.nomask):
mask = np.array(value, dtype=np.bool_, copy=False)
if mask.shape != self.data.shape:
raise ValueError("dimensions of mask do not match data")
else:
self._mask = mask
else:
# internal representation should be one numpy understands
self._mask = np.ma.nomask
@property
def shape(self):
"""
shape tuple of this object's data.
"""
return self.data.shape
@property
def size(self):
"""
integer size of this object's data.
"""
return self.data.size
@property
def dtype(self):
"""
`numpy.dtype` of this object's data.
"""
return self.data.dtype
@property
def ndim(self):
"""
integer dimensions of this object's data.
"""
return self.data.ndim
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if value is not None:
if isinstance(value, FlagCollection):
if value.shape != self.shape:
raise ValueError("dimensions of FlagCollection does not match data")
else:
self._flags = value
else:
flags = np.array(value, copy=False)
if flags.shape != self.shape:
raise ValueError("dimensions of flags do not match data")
else:
self._flags = flags
else:
self._flags = value
def __array__(self):
"""
This allows code that requests a Numpy array to use an NDData
object as a Numpy array.
"""
if self.mask is not None:
return np.ma.masked_array(self.data, self.mask)
else:
return np.array(self.data)
def __array_prepare__(self, array, context=None):
"""
This ensures that a masked array is returned if self is masked.
"""
if self.mask is not None:
return np.ma.masked_array(array, self.mask)
else:
return array
def convert_unit_to(self, unit, equivalencies=[]):
"""
Returns a new `NDData` object whose values have been converted
to a new unit.
Parameters
----------
unit : `astropy.units.UnitBase` instance or str
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Returns
-------
result : `~astropy.nddata.NDData`
The resulting dataset
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
if self.unit is None:
raise ValueError("No unit specified on source data")
data = self.unit.to(unit, self.data, equivalencies=equivalencies)
if self.uncertainty is not None:
uncertainty_values = self.unit.to(
unit, self.uncertainty.array, equivalencies=equivalencies
)
# should work for any uncertainty class
uncertainty = self.uncertainty.__class__(uncertainty_values)
else:
uncertainty = None
if self.mask is not None:
new_mask = self.mask.copy()
else:
new_mask = None
# Call __class__ in case we are dealing with an inherited type
result = self.__class__(
data,
uncertainty=uncertainty,
mask=new_mask,
wcs=self.wcs,
meta=self.meta,
unit=unit,
)
return result
|
248c164aff2ad3e5ac74d2180ad0c91a9a4c85a1c6e04d0296ea0fb51f7de169 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""World Coordinate System (WCS) transformations in FITS files.
.. _wcslib: https://www.atnf.csiro.au/people/mcalabre/WCS/wcslib/index.html
.. _distortion paper: https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf
.. _SIP: https://irsa.ipac.caltech.edu/data/SPITZER/docs/files/spitzer/shupeADASS.pdf
.. _FITS WCS standard: https://fits.gsfc.nasa.gov/fits_wcs.html
`astropy.wcs` contains utilities for managing World Coordinate System
(WCS) transformations in FITS files. These transformations map the
pixel locations in an image to their real-world units, such as their
position on the sky sphere.
It performs three separate classes of WCS transformations:
- Core WCS, as defined in the `FITS WCS standard`_, based on Mark
Calabretta's `wcslib`_. See `~astropy.wcs.Wcsprm`.
- Simple Imaging Polynomial (`SIP`_) convention. See
`~astropy.wcs.Sip`.
- table lookup distortions as defined in WCS `distortion paper`_. See
`~astropy.wcs.DistortionLookupTable`.
Each of these transformations can be used independently or together in
a standard pipeline.
"""
from . import utils
from .wcs import *
from .wcs import InvalidTabularParametersError # just for docs
def get_include():
"""
Get the path to astropy.wcs's C header files.
"""
import os
return os.path.join(os.path.dirname(__file__), "include")
|
884d5577579f0253794813db8501e4f637a1f9fa0c6040f0cf7e4a70a8dfd28a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import numpy as np
import astropy.units as u
from astropy.coordinates import ITRS, CartesianRepresentation, SphericalRepresentation
from astropy.utils import unbroadcast
from .wcs import WCS, WCSSUB_LATITUDE, WCSSUB_LONGITUDE
__doctest_skip__ = ["wcs_to_celestial_frame", "celestial_frame_to_wcs"]
__all__ = [
"obsgeo_to_frame",
"add_stokes_axis_to_wcs",
"celestial_frame_to_wcs",
"wcs_to_celestial_frame",
"proj_plane_pixel_scales",
"proj_plane_pixel_area",
"is_proj_plane_distorted",
"non_celestial_pixel_scales",
"skycoord_to_pixel",
"pixel_to_skycoord",
"custom_wcs_to_frame_mappings",
"custom_frame_to_wcs_mappings",
"pixel_to_pixel",
"local_partial_pixel_derivatives",
"fit_wcs_from_points",
]
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = "STOKES"
newwcs.wcs.cname[add_before_ind] = "STOKES"
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
FK4NoETerms,
Galactic,
SphericalRepresentation,
)
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from astropy.time import Time
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4]
ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == "" and xcoord == "RA--" and ycoord == "DEC-":
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.0:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == "FK4":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4(equinox=equinox)
elif radesys == "FK4-NO-E":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4NoETerms(equinox=equinox)
elif radesys == "FK5":
if equinox is not None:
equinox = Time(equinox, format="jyear")
frame = FK5(equinox=equinox)
elif radesys == "ICRS":
frame = ICRS()
else:
if xcoord == "GLON" and ycoord == "GLAT":
frame = Galactic()
elif xcoord == "TLON" and ycoord == "TLAT":
# The default representation for ITRS is cartesian, but for WCS
# purposes, we need the spherical representation.
frame = ITRS(
representation_type=SphericalRepresentation,
obstime=wcs.wcs.dateobs or None,
)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection="TAN"):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
BaseRADecFrame,
FK4NoETerms,
Galactic,
)
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = "RA--"
ycoord = "DEC-"
if isinstance(frame, ICRS):
wcs.wcs.radesys = "ICRS"
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = "FK4-NO-E"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = "FK4"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = "FK5"
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = "GLON"
ycoord = "GLAT"
elif isinstance(frame, ITRS):
xcoord = "TLON"
ycoord = "TLAT"
wcs.wcs.radesys = "ITRS"
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + "-" + projection, ycoord + "-" + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, "__call__"):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, "__call__"):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError(
"Could not determine celestial frame corresponding to the specified WCS object"
)
def celestial_frame_to_wcs(frame, projection="TAN"):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError(
"Could not determine WCS corresponding to the specified coordinate frame."
)
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : ndarray
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs) # fmt: skip
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if pixarea == 0.0:
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return cd_unitary_err < maxerr
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1 - np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd)) * u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(
getattr(wcs, dist_attr) is not None
for dist_attr in ["cpdis1", "cpdis2", "det2im1", "det2im2", "sip"]
)
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode="all"):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == "all":
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == "wcs":
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : `~astropy.coordinates.SkyCoord` subclass
The celestial coordinates. Whatever ``cls`` type is.
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == "all":
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == "wcs":
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
def _unique_with_order_preserved(items):
"""
Return a list of unique items in the list provided, preserving the order
in which they are found.
"""
new_items = []
for item in items:
if item not in new_items:
new_items.append(item)
return new_items
def _pixel_to_world_correlation_matrix(wcs):
"""
Return a correlation matrix between the pixel coordinates and the
high level world coordinates, along with the list of high level world
coordinate classes.
The shape of the matrix is ``(n_world, n_pix)``, where ``n_world`` is the
number of high level world coordinates.
"""
# We basically want to collapse the world dimensions together that are
# combined into the same high-level objects.
# Get the following in advance as getting these properties can be expensive
all_components = wcs.low_level_wcs.world_axis_object_components
all_classes = wcs.low_level_wcs.world_axis_object_classes
axis_correlation_matrix = wcs.low_level_wcs.axis_correlation_matrix
components = _unique_with_order_preserved([c[0] for c in all_components])
matrix = np.zeros((len(components), wcs.pixel_n_dim), dtype=bool)
for iworld in range(wcs.world_n_dim):
iworld_unique = components.index(all_components[iworld][0])
matrix[iworld_unique] |= axis_correlation_matrix[iworld]
classes = [all_classes[component][0] for component in components]
return matrix, classes
def _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out):
"""
Correlation matrix between the input and output pixel coordinates for a
pixel -> world -> pixel transformation specified by two WCS instances.
The first WCS specified is the one used for the pixel -> world
transformation and the second WCS specified is the one used for the world ->
pixel transformation. The shape of the matrix is
``(n_pixel_out, n_pixel_in)``.
"""
matrix1, classes1 = _pixel_to_world_correlation_matrix(wcs_in)
matrix2, classes2 = _pixel_to_world_correlation_matrix(wcs_out)
if len(classes1) != len(classes2):
raise ValueError("The two WCS return a different number of world coordinates")
# Check if classes match uniquely
unique_match = True
mapping = []
for class1 in classes1:
matches = classes2.count(class1)
if matches == 0:
raise ValueError("The world coordinate types of the two WCS do not match")
elif matches > 1:
unique_match = False
break
else:
mapping.append(classes2.index(class1))
if unique_match:
# Classes are unique, so we need to re-order matrix2 along the world
# axis using the mapping we found above.
matrix2 = matrix2[mapping]
elif classes1 != classes2:
raise ValueError(
"World coordinate order doesn't match and automatic matching is ambiguous"
)
matrix = np.matmul(matrix2.T, matrix1)
return matrix
def _split_matrix(matrix):
"""
Given an axis correlation matrix from a WCS object, return information about
the individual WCS that can be split out.
The output is a list of tuples, where each tuple contains a list of
pixel dimensions and a list of world dimensions that can be extracted to
form a new WCS. For example, in the case of a spectral cube with the first
two world coordinates being the celestial coordinates and the third
coordinate being an uncorrelated spectral axis, the matrix would look like::
array([[ True, True, False],
[ True, True, False],
[False, False, True]])
and this function will return ``[([0, 1], [0, 1]), ([2], [2])]``.
"""
pixel_used = []
split_info = []
for ipix in range(matrix.shape[1]):
if ipix in pixel_used:
continue
pixel_include = np.zeros(matrix.shape[1], dtype=bool)
pixel_include[ipix] = True
n_pix_prev, n_pix = 0, 1
while n_pix > n_pix_prev:
world_include = matrix[:, pixel_include].any(axis=1)
pixel_include = matrix[world_include, :].any(axis=0)
n_pix_prev, n_pix = n_pix, np.sum(pixel_include)
pixel_indices = list(np.nonzero(pixel_include)[0])
world_indices = list(np.nonzero(world_include)[0])
pixel_used.extend(pixel_indices)
split_info.append((pixel_indices, world_indices))
return split_info
def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
# Shortcut for scalars
if np.isscalar(inputs[0]):
world_outputs = wcs_in.pixel_to_world(*inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
return wcs_out.world_to_pixel(*world_outputs)
# Remember original shape
original_shape = inputs[0].shape
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
split_info = _split_matrix(matrix)
outputs = [None] * wcs_out.pixel_n_dim
for pixel_in_indices, pixel_out_indices in split_info:
pixel_inputs = []
for ipix in range(wcs_in.pixel_n_dim):
if ipix in pixel_in_indices:
pixel_inputs.append(unbroadcast(inputs[ipix]))
else:
pixel_inputs.append(inputs[ipix].flat[0])
pixel_inputs = np.broadcast_arrays(*pixel_inputs)
world_outputs = wcs_in.pixel_to_world(*pixel_inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
pixel_outputs = wcs_out.world_to_pixel(*world_outputs)
if wcs_out.pixel_n_dim == 1:
pixel_outputs = (pixel_outputs,)
for ipix in range(wcs_out.pixel_n_dim):
if ipix in pixel_out_indices:
outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape)
return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
def local_partial_pixel_derivatives(wcs, *pixel, normalize_by_world=False):
"""
Return a matrix of shape ``(world_n_dim, pixel_n_dim)`` where each entry
``[i, j]`` is the partial derivative d(world_i)/d(pixel_j) at the requested
pixel position.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS transformation to evaluate the derivatives for.
*pixel : float
The scalar pixel coordinates at which to evaluate the derivatives.
normalize_by_world : bool
If `True`, the matrix is normalized so that for each world entry
the derivatives add up to 1.
"""
# Find the world coordinates at the requested pixel
pixel_ref = np.array(pixel)
world_ref = np.array(wcs.pixel_to_world_values(*pixel_ref))
# Set up the derivative matrix
derivatives = np.zeros((wcs.world_n_dim, wcs.pixel_n_dim))
for i in range(wcs.pixel_n_dim):
pixel_off = pixel_ref.copy()
pixel_off[i] += 1
world_off = np.array(wcs.pixel_to_world_values(*pixel_off))
derivatives[:, i] = world_off - world_ref
if normalize_by_world:
derivatives /= derivatives.sum(axis=0)[:, np.newaxis]
return derivatives
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
lat_resids = lat - lat2
lon_resids = lon - lon2
# In case the longitude has wrapped around
lon_resids = np.mod(lon_resids - 180.0, 360.0) - 180.0
resids = np.concatenate((lon_resids * np.cos(np.radians(lat)), lat_resids))
return resids
def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names):
"""Objective function for fitting SIP.
Parameters
----------
params : array
Fittable parameters. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
u, v: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
from astropy.modeling.models import SIP # here to avoid circular import
# unpack params
crpix = params[0:2]
cdx = params[2:6].reshape((2, 2))
a_params = params[6 : 6 + len(coeff_names)]
b_params = params[6 + len(coeff_names) :]
# assign to wcs, used for transformations in this function
w_obj.wcs.cd = cdx
w_obj.wcs.crpix = crpix
a_coeff, b_coeff = {}, {}
for i in range(len(coeff_names)):
a_coeff["A_" + coeff_names[i]] = a_params[i]
b_coeff["B_" + coeff_names[i]] = b_params[i]
sip = SIP(
crpix=crpix, a_order=order, b_order=order, a_coeff=a_coeff, b_coeff=b_coeff
)
fuv, guv = sip(u, v)
xo, yo = np.dot(cdx, np.array([u + fuv - crpix[0], v + guv - crpix[1]]))
# use all pix2world in case `projection` contains distortion table
x, y = w_obj.all_world2pix(lon, lat, 0)
x, y = np.dot(w_obj.wcs.cd, (x - w_obj.wcs.crpix[0], y - w_obj.wcs.crpix[1]))
resids = np.concatenate((x - xo, y - yo))
return resids
def fit_wcs_from_points(
xy, world_coords, proj_point="center", projection="TAN", sip_degree=None
):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS.
Fits a WCS object to matched set of input detector and sky coordinates.
Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
-----
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- Units in all output WCS objects will always be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for ``world_coords`` and ``proj_point``, the frame for
``world_coords`` will override as the frame for the output WCS.
- If a WCS object is passed in to ``projection`` the CD/PC matrix will
be used as an initial guess for the fit. If this is known to be
significantly off and may throw off the fit, set to the identity matrix
(for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)])
Parameters
----------
xy : (`numpy.ndarray`, `numpy.ndarray`) tuple
x & y pixel coordinates.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match ``world_coords`` if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_degree : None or int
If set to a non-zero integer value, will fit SIP of degree
``sip_degree`` to model geometric distortion. Defaults to None, meaning
no distortion corrections will be fit.
Returns
-------
wcs : `~astropy.wcs.WCS`
The best-fit WCS to the points given.
"""
from scipy.optimize import least_squares
import astropy.units as u
from astropy.coordinates import SkyCoord # here to avoid circular import
from .wcs import Sip
xp, yp = xy
try:
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
except AttributeError:
unit_sph = world_coords.unit_spherical
lon, lat = unit_sph.lon.deg, unit_sph.lat.deg
# verify input
if (type(proj_point) != type(world_coords)) and (proj_point != "center"):
raise ValueError(
"proj_point must be set to 'center', or an"
+ "`~astropy.coordinates.SkyCoord` object with "
+ "a pair of points."
)
use_center_as_proj_point = str(proj_point) == "center"
if not use_center_as_proj_point:
assert proj_point.size == 1
proj_codes = [
"AZP",
"SZP",
"TAN",
"STG",
"SIN",
"ARC",
"ZEA",
"AIR",
"CYP",
"CEA",
"CAR",
"MER",
"SFL",
"PAR",
"MOL",
"AIT",
"COP",
"COE",
"COD",
"COO",
"BON",
"PCO",
"TSC",
"CSC",
"QSC",
"HPX",
"XPH",
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError(
"Must specify valid projection code from list of "
+ "supported types: ",
", ".join(proj_codes),
)
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame, projection=projection)
else: # if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1.0, 1.0) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__("pc")
if (type(sip_degree) != type(None)) and (type(sip_degree) != int):
raise ValueError("sip_degree must be None, or integer.")
# compute bounding box for sources in image coordinates:
xpmin, xpmax, ypmin, ypmax = xp.min(), xp.max(), yp.min(), yp.max()
# set pixel_shape to span of input points
wcs.pixel_shape = (
1 if xpmax <= 0.0 else int(np.ceil(xpmax)),
1 if ypmax <= 0.0 else int(np.ceil(ypmax)),
)
# determine CRVAL from input
close = lambda l, p: p[np.argmin(np.abs(l))]
if use_center_as_proj_point: # use center of input points
sc1 = SkyCoord(lon.min() * u.deg, lat.max() * u.deg)
sc2 = SkyCoord(lon.max() * u.deg, lat.min() * u.deg)
pa = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
midpoint_sc = sc1.directional_offset_by(pa, sep / 2)
wcs.wcs.crval = (midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg)
wcs.wcs.crpix = ((xpmax + xpmin) / 2.0, (ypmax + ypmin) / 2.0)
else: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (
close(lon - wcs.wcs.crval[0], xp + 1),
close(lon - wcs.wcs.crval[1], yp + 1),
)
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
# Use bounds to require that the fit center pixel is on the input image
if xpmin == xpmax:
xpmin, xpmax = xpmin - 0.5, xpmax + 0.5
if ypmin == ypmax:
ypmin, ypmax = ypmin - 0.5, ypmax + 0.5
p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()])
fit = least_squares(
_linear_wcs_fit,
p0,
args=(lon, lat, xp, yp, wcs),
bounds=[
[-np.inf, -np.inf, -np.inf, -np.inf, xpmin + 1, ypmin + 1],
[np.inf, np.inf, np.inf, np.inf, xpmax + 1, ypmax + 1],
],
)
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_degree:
degree = sip_degree
if "-SIP" not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + "-SIP" for x in wcs.wcs.ctype]
coef_names = [
f"{i}_{j}"
for i in range(degree + 1)
for j in range(degree + 1)
if (i + j) < (degree + 1) and (i + j) > 1
]
p0 = np.concatenate(
(
np.array(wcs.wcs.crpix),
wcs.wcs.cd.flatten(),
np.zeros(2 * len(coef_names)),
)
)
fit = least_squares(
_sip_fit,
p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names),
bounds=[
[xpmin + 1, ypmin + 1] + [-np.inf] * (4 + 2 * len(coef_names)),
[xpmax + 1, ypmax + 1] + [np.inf] * (4 + 2 * len(coef_names)),
],
)
coef_fit = (
list(fit.x[6 : 6 + len(coef_names)]),
list(fit.x[6 + len(coef_names) :]),
)
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree + 1, degree + 1))
b_vals = np.zeros((degree + 1, degree + 1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(
a_vals,
b_vals,
np.zeros((degree + 1, degree + 1)),
np.zeros((degree + 1, degree + 1)),
wcs.wcs.crpix,
)
return wcs
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an ITRS coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time associated with the coordinate, will be passed to
`~astropy.coordinates.ITRS` as the obstime keyword.
Returns
-------
~astropy.coordinates.ITRS
An `~astropy.coordinates.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (
obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
):
raise ValueError(
f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array"
)
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
|
adb135b4458ee8d392d9b6d59c5e33c82bce8cb8da87fb98b8589b276cf171d0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Under the hood, there are 3 separate classes that perform different
# parts of the transformation:
#
# - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
# functionality in `wcslib`_. (This includes TPV and TPD
# polynomial distortion, but not SIP distortion).
#
# - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
# `SIP`_ convention.
#
# - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
# lookup tables.
#
# Additionally, the class `WCS` aggregates all of these transformations
# together in a pipeline:
#
# - Detector to image plane correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
# object)
#
# - `distortion paper`_ table-lookup correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
# STDLIB
import builtins
import copy
import io
import itertools
import os
import re
import textwrap
import uuid
import warnings
# THIRD-PARTY
import numpy as np
from packaging.version import Version
# LOCAL
from astropy import log
from astropy import units as u
from astropy.io import fits
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyUserWarning,
AstropyWarning,
)
from . import _wcs, docstrings
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = [
"FITSFixedWarning",
"WCS",
"find_all_wcs",
"DistortionLookupTable",
"Sip",
"Tabprm",
"Wcsprm",
"Auxprm",
"Celprm",
"Prjprm",
"Wtbarr",
"WCSBase",
"validate",
"WcsError",
"SingularMatrixError",
"InconsistentAxisTypesError",
"InvalidTransformError",
"InvalidCoordinateError",
"InvalidPrjParametersError",
"NoSolutionError",
"InvalidSubimageSpecificationError",
"NoConvergence",
"NonseparableSubimageCoordinateSystemError",
"NoWcsKeywordsFoundError",
"InvalidTabularParametersError",
]
__doctest_skip__ = ["WCS.all_world2pix"]
if _wcs is not None:
if Version(_wcs.__version__) < Version("5.8"):
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used."
)
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build on your platform."
)
_WCSSUB_TIME_SUPPORT = Version(_wcs.__version__) >= Version("7.8")
_WCS_TPD_WARN_LT71 = Version(_wcs.__version__) < Version("7.1")
_WCS_TPD_WARN_LT74 = Version(_wcs.__version__) < Version("7.4")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Auxprm = _wcs.Auxprm
Celprm = _wcs.Celprm
Prjprm = _wcs.Prjprm
Tabprm = _wcs.Tabprm
Wtbarr = _wcs.Wtbarr
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = (
_wcs.NonseparableSubimageCoordinateSystemError
)
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
InvalidPrjParametersError = _wcs.InvalidPrjParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(("WCSSUB_", "WCSHDR_", "WCSHDO_", "WCSCOMPARE_", "PRJ_")):
locals()[key] = val
__all__.append(key)
# Set coordinate extraction callback for WCS -TAB:
def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim):
arr = hdulist[(extnam, extver)].data[ttype][row - 1]
if arr.ndim != ndim:
if kind == "c" and ndim == 2:
arr = arr.reshape((arr.size, 1))
else:
raise ValueError("Bad TDIM")
return np.ascontiguousarray(arr, dtype=np.double)
_wcs.set_wtbarr_fitsio_callback(_load_tab_bintable)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
Wtbarr = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
_WCSSUB_TIME_SUPPORT = False
_WCS_TPD_WARN_LT71 = False
_WCS_TPD_WARN_LT74 = False
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile("""^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$""")
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == "image":
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == "binary":
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == "pixel":
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' and/or 'pixel'"
)
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(
self,
*args,
best_solution=None,
accuracy=None,
niter=None,
divergent=None,
slow_conv=None,
**kwargs,
):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn(
f"Function received unexpected arguments ({list(kwargs)}) these "
"are ignored but will raise an Exception in the "
"future.",
AstropyDeprecationWarning,
)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: https://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : `~astropy.io.fits.HDUList`, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of str, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
"""
def __init__(
self,
header=None,
fobj=None,
key=" ",
minerr=0.0,
relax=True,
naxis=None,
keysel=None,
colsel=None,
fix=True,
translate_units="",
_do_set=True,
):
close_fds = []
# these parameters are stored to be used when unpickling a WCS object:
self._init_kwargs = {
"keysel": copy.copy(keysel),
"colsel": copy.copy(colsel),
}
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key, relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = os.path.exists(header)
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2"
)
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object"
)
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError(
"'fobj' must be either None or an astropy.io.fits.HDUList object."
)
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode("ascii")
tmp_wcsprm = _wcs.Wcsprm(
header=tmp_header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
warnings=False,
hdulist=fobj,
)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except _wcs.NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(header, fobj, dist="CPDIS", err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace("END" + " " * 77, "")
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
try:
wcsprm = _wcs.Wcsprm(
header=header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(
header=None,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if wcsprm.naxis != 2 and (
det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip
):
raise ValueError(
f"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {wcsprm.naxis} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
"""
)
header_naxis = header.get("NAXIS", None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
f"The WCS transformation has more axes ({wcsprm.naxis:d}) than the "
f"image it is associated with ({header_naxis:d})",
FITSFixedWarning,
)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(
new_copy,
self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2),
)
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(
new_copy,
deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo), deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo), deepcopy(self.det2im2, memo)),
)
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [
cname_uuid.index(cname) if cname in cname_uuid else None
for cname in copy.wcs.cname
]
# Restore the original CNAMEs
copy.wcs.cname = ["" if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple(
None if i is None else self.pixel_shape[i] for i in keep
)
if self.pixel_bounds:
copy.pixel_bounds = [
None if i is None else self.pixel_bounds[i] for i in keep
]
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn(
"Removed redundant SCAMP distortion parameters "
+ "because SIP parameters are also present",
FITSFixedWarning,
)
def fix(self, translate_units="", naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (
key == "datfix"
and "1858-11-17" in val
and not np.count_nonzero(self.wcs.mjdref)
):
continue
warnings.warn(
f"'{key}' made the change '{val}'.",
FITSFixedWarning,
)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : (int, int), optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn(
"Need a valid header in order to calculate footprint\n",
AstropyUserWarning,
)
return None
else:
naxis1 = header.get("NAXIS1", None)
naxis2 = header.get("NAXIS2", None)
if naxis1 is None or naxis2 is None:
raise ValueError("Image size could not be determined.")
if center:
corners = np.array(
[[1, 1], [1, naxis2], [naxis1, naxis2], [naxis1, 1]], dtype=np.float64
)
else:
corners = np.array(
[
[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5],
],
dtype=np.float64,
)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header["AXISCORR"]
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = "D2IMDIS"
d_kw = "D2IM"
err_kw = "D2IMERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == "lookup":
del header[distortion]
assert isinstance(fobj, fits.HDUList), (
"An astropy.io.fits.HDUList"
"is required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["D2IMARR", d_extver].data
else:
d_data = (fobj["D2IMARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["D2IMARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn(
"The use of ``AXISCORR`` for D2IM correction has been"
" deprecated.`~astropy.wcs` will read in files with ``AXISCORR`` but"
" ``to_fits()`` will write out files without it.",
AstropyDeprecationWarning,
)
cpdis = [None, None]
crpix = [0.0, 0.0]
crval = [0.0, 0.0]
cdelt = [1.0, 1.0]
try:
d2im_data = fobj[("D2IMARR", 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[("D2IMARR", 1)].header
naxis = d2im_hdr["NAXIS"]
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get("CRPIX" + str(i), 0.0)
crval[i - 1] = d2im_hdr.get("CRVAL" + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get("CDELT" + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`~astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = "D2IMDIS"
d_kw = "D2IM"
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Detector to image correction type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(det2im.data.shape),
"Number of independent variables in D2IM function",
)
for i in range(det2im.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a D2IM function",
)
image = fits.ImageHDU(det2im.data, name="D2IMARR")
header = image.header
header["CRPIX1"] = (det2im.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (det2im.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
det2im.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
det2im.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (det2im.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (det2im.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist="CPDIS", err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == "CPDIS":
d_kw = "DP"
err_kw = "CPERR"
else:
d_kw = "DQ"
err_kw = "CQERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == "lookup":
if not isinstance(fobj, fits.HDUList):
raise ValueError(
"an astropy.io.fits.HDUList is "
"required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["WCSDVARR", d_extver].data
else:
d_data = (fobj["WCSDVARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["WCSDVARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist="CPDIS"):
"""
Write out `distortion paper`_ keywords to the given
`~astropy.io.fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == "CPDIS":
d_kw = "DP"
else:
d_kw = "DQ"
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Prior distortion function type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(cpdis.data.shape),
f"Number of independent variables in {dist} function",
)
for i in range(cpdis.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a {dist} function",
)
image = fits.ImageHDU(cpdis.data, name="WCSDVARR")
header = image.header
header["CRPIX1"] = (cpdis.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (cpdis.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
cpdis.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
cpdis.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (cpdis.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (cpdis.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in {
m.group() for m in map(SIP_KW.match, list(header)) if m is not None
}:
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header["A_ORDER"] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion"
)
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header["A_ORDER"]
del header["B_ORDER"]
ctype = [header[f"CTYPE{nax}{wcskey}"] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith("-SIP") for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
"""
log.info(message)
elif "B_ORDER" in header and header["B_ORDER"] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER "
+ "keyword for SIP distortion"
)
else:
a = None
b = None
if "AP_ORDER" in header and header["AP_ORDER"] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion"
)
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header["AP_ORDER"]
del header["BP_ORDER"]
elif "BP_ORDER" in header and header["BP_ORDER"] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion"
)
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError("Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = "sky to detector" if name[-1] == "P" else "detector to sky"
comment = (
f'SIP polynomial order, axis {ord(name[0]) - ord("A"):d}, {trdir:s}'
)
keywords[f"{name}_ORDER"] = size - 1, comment
comment = "SIP distortion coefficient"
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[f"{name}_{i:d}_{j:d}"] = a[i, j], comment
write_array("A", self.sip.a)
write_array("B", self.sip.b)
write_array("AP", self.sip.ap)
write_array("BP", self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be used as input"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be "
"used as input"
)
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other"
)
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == "output":
output = self._normalize_sky(output)
return (
output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape),
)
return [output[:, i].reshape(axes[0].shape) for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
f"of shape (N, {self.naxis})"
)
if 0 in xy.shape:
return xy
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == "output":
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
f"(coords[N][{self.naxis}], origin)"
)
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be "
+ "a 1-D array for each axis, followed by an origin."
)
return _return_list_of_arrays(axes, origin)
raise TypeError(
f"WCS projection has {self.naxis} dimensions, so expected 2 (an Nx{self.naxis} array "
f"and the origin argument) or {self.naxis + 1} arguments (the position in each "
f"dimension, and the origin argument). Instead, {len(args)} arguments were "
"given."
)
def all_pix2world(self, *args, **kwargs):
return self._array_converter(self._all_pix2world, "output", *args, **kwargs)
all_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('sky coordinates, in degrees', 8)}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)["world"], "output", *args, **kwargs
)
wcs_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('world coordinates, in degrees', 8)}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
"""
def _all_world2pix(
self, world, origin, tolerance, maxiter, adaptive, detect_divergence, quiet
):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()["invalid"]
old_over = np.geterr()["over"]
np.seterr(invalid="ignore", over="ignore")
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while np.nanmax(dn) >= tol2 and k < maxiter:
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = dn >= dnprev
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = dn >= tol2
(inddiv,) = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = dn < dnprev
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
(ind,) = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
(ind,) = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while ind.shape[0] > 0 and k < maxiter:
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = dnnew < dnprev[ind]
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
(subind,) = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
(subind,) = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = (~np.all(np.isfinite(pix), axis=1)) & (
np.all(np.isfinite(world), axis=1)
)
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
(inddiv,) = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
(ind,) = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
f"converge to the requested accuracy after {k:d} "
"iterations.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=None,
)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
f"After {k:d} iterations, the solution is diverging "
"at least for one input point.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=inddiv,
)
return pix
@deprecated_renamed_argument("accuracy", "tolerance", "4.3")
def all_world2pix(
self,
*args,
tolerance=1e-4,
maxiter=20,
adaptive=False,
detect_divergence=True,
quiet=False,
**kwargs,
):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs: self._all_world2pix(
*args,
tolerance=tolerance,
maxiter=maxiter,
adaptive=adaptive,
detect_divergence=detect_divergence,
quiet=quiet,
),
"input",
*args,
**kwargs,
)
all_world2pix.__doc__ = f"""
all_world2pix(*arg, tolerance=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
tolerance : float, optional (default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
"""
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)["pixcrd"], "input", *args, **kwargs
)
wcs_world2pix.__doc__ = f"""
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = f"""
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = f"""
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def proj_plane_pixel_scales(self):
"""
Calculate pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This method is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
scale : list of `~astropy.units.Quantity`
A vector of projection plane increments corresponding to each
pixel side (axis).
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import
values = proj_plane_pixel_scales(self)
units = [u.Unit(x) for x in self.wcs.cunit]
return [
value * unit for (value, unit) in zip(values, units)
] # Can have different units
def proj_plane_pixel_area(self):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
area : `~astropy.units.Quantity`
Area (in the projection plane) of the pixel at ``CRPIX`` location.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import
value = proj_plane_pixel_area(self)
unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only
return value * unit
def to_fits(self, relax=False, key=None):
"""
Generate an `~astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14 # Defined by C-ext
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
# Check if we can handle TPD distortion correctly
if _WCS_TPD_WARN_LT71:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}. WCSLIB"
f" {_wcs.__version__} is writing this in a format"
" incompatible with current versions - please update to"
" 7.4 or use the bundled WCSLIB.",
AstropyWarning,
)
elif _WCS_TPD_WARN_LT74:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}, which"
" requires WCSLIB 7.4 or later to store in a FITS header"
f" (having {_wcs.__version__}).",
AstropyWarning,
)
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(
not ctyp.endswith("-SIP") for ctyp in self.wcs.ctype
):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if (
not do_sip
and self.wcs is not None
and any(self.wcs.ctype)
and self.sip is not None
):
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded:"
f" {', '.join(missing_keys)} Use the ``relax`` kwarg to control"
" this.",
AstropyWarning,
)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis + 1):
# strip() must be called here to cover the case of alt key= " "
kw = f"CTYPE{i}{self.wcs.alt}".strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(
self, filename="footprint.reg", color="green", width=2, coordsys=None
):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = (
"# Region file format: DS9 version 4.0 \n"
'# global color=green font="helvetica 12 bold '
"select=1 highlite=1 edit=1 move=1 delete=1 "
"include=1 fixed=0 source\n"
)
coordsys = coordsys or self.wcs.radesys
if coordsys not in (
"PHYSICAL",
"IMAGE",
"FK4",
"B1950",
"FK5",
"J2000",
"GALACTIC",
"ECLIPTIC",
"ICRS",
"LINEAR",
"AMPLIFIER",
"DETECTOR",
):
raise ValueError(
f"Coordinate system '{coordsys}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
)
with open(filename, mode="w") as f:
f.write(comments)
f.write(f"{coordsys}\n")
f.write("polygon(")
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=",")
f.write(f") # color={color}, width={width:d} \n")
def _get_naxis(self, header=None):
_naxis = []
if header is not None and not isinstance(header, (str, bytes)):
for naxis in itertools.count(1):
try:
_naxis.append(header[f"NAXIS{naxis}"])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
"""
Return a short description. Simply porting the behavior from
the `printwcs()` method.
"""
description = ["WCS Keywords\n", f"Number of WCS axes: {self.naxis!r}"]
sfmt = " : " + "".join(["{" + f"{i}" + "!r} " for i in range(self.naxis)])
keywords = ["CTYPE", "CRVAL", "CRPIX"]
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword + sfmt.format(*value))
if hasattr(self.wcs, "pc"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["PC", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = "CDELT" + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, "cd"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["CD", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append(f"NAXIS : {' '.join(map(str, self._naxis))}")
return "\n".join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dict
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError("This WCS object does not have a wcsprm object.")
coordinate_type_map = {0: None, 1: "stokes", 2: "celestial", 3: "spectral"}
scale_map = {
0: "linear",
1: "quantized",
2: "non-linear celestial",
3: "non-linear spectral",
4: "logarithmic",
5: "tabular",
}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult["coordinate_type"] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult["scale"] = scale_map[scale]
group = (axis_type // 10) % 10
subresult["group"] = group
number = axis_type % 10
subresult["number"] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
dct = self.__dict__.copy()
dct["_alt_wcskey"] = self.wcs.alt
return (
__WCS_unpickle__,
(
self.__class__,
dct,
buffer.getvalue(),
),
)
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i + 1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with the same number of axes,
but two swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i + 1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub(
[WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES, WCSSUB_TIME]
) # Defined by C-ext
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, "__len__") and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, "__len__"): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = (
(crpix - iview.start - 1.0) / iview.step
+ 0.5
+ 1.0 / iview.step / 2.0
)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if "indices must be integers" not in str(exc):
raise
warnings.warn(
f"NAXIS{wcs_index} attribute is not updated because at "
f"least one index ('{iview}') is no integer.",
AstropyUserWarning,
)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(
self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix
)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis.
Returns
-------
list of str
A list of names along each axis.
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split("-")[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included.
"""
return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included.
"""
return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def temporal(self):
"""
A copy of the current WCS with only the time axes included.
"""
if not _WCSSUB_TIME_SUPPORT:
raise NotImplementedError(
"Support for 'temporal' axis requires WCSLIB version 7.8 or "
f"greater but linked WCSLIB version is {_wcs.__version__}"
)
return self.sub([WCSSUB_TIME]) # Defined by C-ext
@property
def is_temporal(self):
return self.has_temporal and self.naxis == 1
@property
def has_temporal(self):
return any(t // 1000 == 4 for t in self.wcs.axis_types)
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (
self.sip is not None
or self.cpdis1 is not None
or self.cpdis2 is not None
or self.det2im1 is not None
and self.det2im2 is not None
)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"cdelt will be ignored since cd is present",
RuntimeWarning,
)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.dot(cdelt, pc)
return pccd
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
naxis = dct.pop("naxis", None)
if naxis:
hdulist[0].header["naxis"] = naxis
naxes = dct.pop("_naxis", [])
for k, na in enumerate(naxes):
hdulist[0].header[f"naxis{k + 1:d}"] = na
kwargs = dct.pop("_init_kwargs", {})
self.__dict__.update(dct)
wcskey = dct.pop("_alt_wcskey", " ")
WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs)
self.pixel_bounds = dct.get("_pixel_bounds", None)
return self
def find_all_wcs(
header, relax=True, keysel=None, fix=True, translate_units="", _do_set=True
):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or `~astropy.io.fits.Header` object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
keysel : sequence of str, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS`
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError("header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str or file-like or `~astropy.io.fits.HDUList`
The FITS file to validate.
Returns
-------
results : list subclass instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [f" WCS key '{self._key or ' '}':"]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = " - "
else:
initial_indent = " "
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=" ",
)
)
else:
result.append(" No issues.")
return "\n".join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f" ({self._hdu_name})"
else:
hdu_name = ""
result = [f"HDU {self._hdu_index}{hdu_name}:"]
for wcs in self:
result.append(repr(wcs))
return "\n".join(result)
return ""
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return "\n\n".join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject, fix=False, _do_set=False
)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", FITSFixedWarning, append=True)
try:
WCS(
hdu.header,
hdulist,
key=wcs.wcs.alt or " ",
relax=_wcs.WCSHDR_reject,
fix=True,
_do_set=False,
)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
|
3bb46bb03d606bd1a1624948b981b2a63d3e536d05e840f7924b9751579c3481 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import enum
import operator
import os
import threading
from datetime import date, datetime, timedelta
from time import strftime
from warnings import warn
import erfa
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.extern import _strptime
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo, data_info_factory
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # noqa: F401
from .formats import (
TIME_DELTA_FORMATS,
TIME_FORMATS,
TimeAstropyTime,
TimeDatetime,
TimeJD,
TimeUnique,
)
from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS
from .utils import day_frac
__all__ = [
"TimeBase",
"Time",
"TimeDelta",
"TimeInfo",
"TimeInfoBase",
"update_leap_seconds",
"TIME_SCALES",
"STANDARD_TIME_SCALES",
"TIME_DELTA_SCALES",
"ScaleValueError",
"OperandTypeError",
"TimeDeltaMissingUnitWarning",
]
STANDARD_TIME_SCALES = ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc")
LOCAL_SCALES = ("local",)
TIME_TYPES = {
scale: scales for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales
}
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {
("tai", "tcb"): ("tt", "tdb"),
("tai", "tcg"): ("tt",),
("tai", "ut1"): ("utc",),
("tai", "tdb"): ("tt",),
("tcb", "tcg"): ("tdb", "tt"),
("tcb", "tt"): ("tdb",),
("tcb", "ut1"): ("tdb", "tt", "tai", "utc"),
("tcb", "utc"): ("tdb", "tt", "tai"),
("tcg", "tdb"): ("tt",),
("tcg", "ut1"): ("tt", "tai", "utc"),
("tcg", "utc"): ("tt", "tai"),
("tdb", "ut1"): ("tt", "tai", "utc"),
("tdb", "utc"): ("tt", "tai"),
("tt", "ut1"): ("tai", "utc"),
("tt", "utc"): ("tai",),
}
GEOCENTRIC_SCALES = ("tai", "tt", "tcg")
BARYCENTRIC_SCALES = ("tcb", "tdb")
ROTATIONAL_SCALES = ("ut1",)
TIME_DELTA_TYPES = {
scale: scales
for scales in (
GEOCENTRIC_SCALES,
BARYCENTRIC_SCALES,
ROTATIONAL_SCALES,
LOCAL_SCALES,
)
for scale in scales
}
TIME_DELTA_SCALES = (
GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
)
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {
("tt", "tai"): None,
("tai", "tt"): None,
("tcg", "tt"): -erfa.ELG,
("tt", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcg", "tai"): -erfa.ELG,
("tai", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcb", "tdb"): -erfa.ELB,
("tdb", "tcb"): erfa.ELB / (1.0 - erfa.ELB),
}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
"mean": {
"IAU2006": {"function": erfa.gmst06, "scales": ("ut1", "tt")},
"IAU2000": {"function": erfa.gmst00, "scales": ("ut1", "tt")},
"IAU1982": {"function": erfa.gmst82, "scales": ("ut1",), "include_tio": False},
},
"apparent": {
"IAU2006A": {"function": erfa.gst06a, "scales": ("ut1", "tt")},
"IAU2000A": {"function": erfa.gst00a, "scales": ("ut1", "tt")},
"IAU2000B": {"function": erfa.gst00b, "scales": ("ut1",)},
"IAU1994": {"function": erfa.gst94, "scales": ("ut1",), "include_tio": False},
},
}
class _LeapSecondsCheck(enum.Enum):
NOT_STARTED = 0 # No thread has reached the check
RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held)
DONE = 2 # update_leap_seconds has completed
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED
_LEAP_SECONDS_LOCK = threading.RLock()
def _compress_array_dims(arr):
"""Compress array by allowing at most 2 * edgeitems + 1 in each dimension.
Parameters
----------
arr : array-like
Array to compress.
Returns
-------
out : array-like
Compressed array.
"""
idxs = []
edgeitems = np.get_printoptions()["edgeitems"]
# Build up a list of index arrays for each dimension, allowing no more than
# 2 * edgeitems + 1 elements in each dimension.
for dim in range(arr.ndim):
if arr.shape[dim] > 2 * edgeitems:
# The middle [edgeitems] value does not matter as it gets replaced
# by ... in the output.
idxs.append(
np.concatenate(
[np.arange(edgeitems), [edgeitems], np.arange(-edgeitems, 0)]
)
)
else:
idxs.append(np.arange(arr.shape[dim]))
# Use the magic np.ix_ function to effectively treat each index array as a
# slicing operator.
idxs_ix = np.ix_(*idxs)
out = arr[idxs_ix]
return out
class TimeInfoBase(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
This base class is common between TimeInfo and TimeDeltaInfo.
"""
attr_names = MixinInfo.attr_names | {"serialize_method"}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = (
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
"_delta_ut1_utc",
"_delta_tdb_tt",
)
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = "value"
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == "formatted_value":
out = ("value",)
elif method == "jd1_jd2":
out = ("jd1", "jd2")
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {
"fits": "jd1_jd2",
"ecsv": "formatted_value",
"hdf5": "jd1_jd2",
"yaml": "jd1_jd2",
"parquet": "jd1_jd2",
None: "jd1_jd2",
}
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
Returns
-------
arrays : list of ndarray
"""
parent = self._parent
jd_approx = parent.jd
jd_remainder = (parent - parent.__class__(jd_approx, format="jd")).jd
return [jd_approx, jd_remainder]
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(
names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats],
)
)
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
if "jd1" in map and "jd2" in map:
# Initialize as JD but revert to desired format and out_subfmt (if needed)
format = map.pop("format")
out_subfmt = map.pop("out_subfmt", None)
map["format"] = "jd"
map["val"] = map.pop("jd1")
map["val2"] = map.pop("jd2")
out = self._parent_cls(**map)
out.format = format
if out_subfmt is not None:
out.out_subfmt = out_subfmt
else:
map["val"] = map.pop("value")
out = self._parent_cls(**map)
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError("input columns have inconsistent locations")
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
tm_attrs = {
attr: getattr(col0, attr) for attr in ("scale", "location", "precision")
}
out = self._parent_cls(jd1, jd2, format="jd", **tm_attrs)
out.format = col0.format
out.out_subfmt = col0.out_subfmt
out.in_subfmt = col0.in_subfmt
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (
self._serialize_context == "ecsv"
and map["format"] == "datetime64"
and "value" in map
):
map["value"] = map["value"].astype("U")
# The datetime format is serialized as ISO with no loss of precision.
if map["format"] == "datetime" and "value" in map:
map["value"] = np.vectorize(lambda x: x.isoformat())(map["value"])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (
map["format"] == "datetime64"
and "value" in map
and map["value"].dtype.kind == "U"
):
map["value"] = map["value"].astype("datetime64")
# Convert back to datetime objects for datetime format.
if map["format"] == "datetime" and "value" in map:
from datetime import datetime
map["value"] = np.vectorize(datetime.fromisoformat)(map["value"])
delta_ut1_utc = map.pop("_delta_ut1_utc", None)
delta_tdb_tt = map.pop("_delta_tdb_tt", None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_extra_attrs = ("format", "scale")
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd1 = np.zeros(shape, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
out = self._parent_cls(jd1, jd2, format="jd", scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeBase(ShapedLikeNDArray):
"""Base time class from which Time and TimeDelta inherit."""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __getnewargs__(self):
return (self._time,)
def _init_from_vals(
self,
val,
val2,
format,
scale,
copy,
precision=None,
in_subfmt=None,
out_subfmt=None,
):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = "*"
if out_subfmt is None:
out_subfmt = "*"
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError(
"Input val and val2 have inconsistent shape; "
"they cannot be broadcast together."
)
if scale is not None:
if not (isinstance(scale, str) and scale.lower() in self.SCALES):
raise ScaleValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(self.SCALES)}"
)
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(
val, val2, format, scale, precision, in_subfmt, out_subfmt
)
self._format = self._time.name
# Hack from #9969 to allow passing the location value that has been
# collected by the TimeAstropyTime format class up to the Time level.
# TODO: find a nicer way.
if hasattr(self._time, "_location"):
self.location = self._time._location
del self._time._location
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and (
val.dtype.kind in ("S", "U", "O", "M") or val.dtype.names
):
# Input is a string, object, datetime, or a table-like ndarray
# (structured array, recarray). These input types can be
# uniquely identified by the format classes.
formats = [
(name, cls)
for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)
]
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(("astropy_time", TimeAstropyTime))
elif not (isinstance(format, str) and format.lower() in self.FORMATS):
if format is None:
raise ValueError(
"No time format was given, and the input is not unique"
)
else:
raise ValueError(
f"Format {format!r} is not one of the allowed formats "
f"{sorted(self.FORMATS)}"
)
else:
formats = [(format, self.FORMATS[format])]
assert formats
problems = {}
for name, cls in formats:
try:
return cls(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError) as err:
# If ``format`` specified then there is only one possibility, so raise
# immediately and include the upstream exception message to make it
# easier for user to see what is wrong.
if len(formats) == 1:
raise ValueError(
f"Input values did not match the format class {format}:"
+ os.linesep
+ f"{err.__class__.__name__}: {err}"
) from err
else:
problems[name] = err
else:
raise ValueError(
"Input values did not match any of the formats where the format "
f"keyword is optional: {problems}"
) from problems[formats[0][0]]
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format."""
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
format_cls = self.FORMATS[format]
# Get the new TimeFormat object to contain time in new format. Possibly
# coerce in/out_subfmt to '*' (default) if existing subfmt values are
# not valid in the new format.
self._time = format_cls(
self._time.jd1,
self._time.jd2,
self._time._scale,
self.precision,
in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt),
out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
self._format = format
def to_string(self):
"""Output a string representation of the Time or TimeDelta object.
Similar to ``str(self.value)`` (which uses numpy array formatting) but
array values are evaluated only for the items that actually are output.
For large arrays this can be a substantial performance improvement.
Returns
-------
out : str
String representation of the time values.
"""
npo = np.get_printoptions()
if self.size < npo["threshold"]:
out = str(self.value)
else:
# Compress time object by allowing at most 2 * npo["edgeitems"] + 1
# in each dimension. Then force numpy to use "summary mode" of
# showing only the edge items by setting the size threshold to 0.
# TODO: use np.core.arrayprint._leading_trailing if we have support for
# np.concatenate. See #8610.
tm = _compress_array_dims(self)
with np.printoptions(threshold=0):
out = str(tm.value)
return out
def __repr__(self):
return "<{} object: scale='{}' format='{}' value={}>".format(
self.__class__.__name__, self.scale, self.format, self.to_string()
)
def __str__(self):
return self.to_string()
def __hash__(self):
try:
loc = getattr(self, "location", None)
if loc is not None:
loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m)
return hash((self.jd1, self.jd2, self.scale, loc))
except TypeError:
if self.ndim != 0:
reason = "(must be scalar)"
elif self.masked:
reason = "(value is masked)"
else:
raise
raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}")
@property
def scale(self):
"""Time scale."""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
if scale == "utc" or self.scale == "utc":
# If doing a transform involving UTC then check that the leap
# seconds table is up to date.
_check_leapsec()
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = "_get_delta_{}_{}".format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
jd1, jd2 = day_frac(jd1, jd2)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](
jd1,
jd2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
# Setting the out_subfmt property here does validation of ``val``
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in (
(self._time, "jd1"),
(self._time, "jd2"),
(self, "_delta_ut1_utc"),
(self, "_delta_tdb_tt"),
(self, "location"),
):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
if self._time.jd1.shape:
if isinstance(value, np.ndarray):
return value
else:
raise TypeError(
f"JD is an array ({self._time.jd1!r}) but value is not ({value!r})"
)
else:
# zero-dimensional array, is it safe to unbox?
if (
isinstance(value, np.ndarray)
and not value.shape
and not np.ma.is_masked(value)
):
if value.dtype.kind == "M":
# existing test doesn't want datetime64 converted
return value[()]
elif value.dtype.fields:
# Unpack but keep field names; .item() doesn't
# Still don't get python types in the fields
return value[()]
else:
return value.item()
else:
return value
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
def to_value(self, format, subfmt="*"):
"""Get time values expressed in specified output format.
This method allows representing the ``Time`` object in the desired
output ``format`` and optional sub-format ``subfmt``. Available
built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each
format can have its own sub-formats
For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can
be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with
the enhancement depending on platform), and 'decimal'
:class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the
number of digits is also chosen such that time values are represented
accurately.
For built-in date-like string formats, one of 'date_hms', 'date_hm', or
'date' (or 'longdate_hms', etc., for 5-digit years in
`~astropy.time.TimeFITS`). For sub-formats including seconds, the
number of digits used for the fractional seconds is as set by
`~astropy.time.Time.precision`.
Parameters
----------
format : str
The format in which one wants the time values. Default: the current
format.
subfmt : str or None, optional
Value or wildcard pattern to select the sub-format in which the
values should be given. The default of '*' picks the first
available for a given format, i.e., 'float' or 'date_hms'.
If `None`, use the instance's ``out_subfmt``.
"""
# TODO: add a precision argument (but ensure it is keyword argument
# only, to make life easier for TimeDelta.to_value()).
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
cache = self.cache["format"]
# Try to keep cache behaviour like it was in astropy < 4.0.
key = format if subfmt is None else (format, subfmt)
if key not in cache:
if format == self.format:
tm = self
else:
tm = self.replicate(format=format)
# Some TimeFormat subclasses may not be able to handle being passes
# on a out_subfmt. This includes some core classes like
# TimeBesselianEpochString that do not have any allowed subfmts. But
# those do deal with `self.out_subfmt` internally, so if subfmt is
# the same, we do not pass it on.
kwargs = {}
if subfmt is not None and subfmt != tm.out_subfmt:
kwargs["out_subfmt"] = subfmt
try:
value = tm._time.to_value(parent=tm, **kwargs)
except TypeError as exc:
# Try validating subfmt, e.g. for formats like 'jyear_str' that
# do not implement out_subfmt in to_value() (because there are
# no allowed subformats). If subfmt is not valid this gives the
# same exception as would have occurred if the call to
# `to_value()` had succeeded.
tm._time._select_subfmts(subfmt)
# Subfmt was valid, so fall back to the original exception to see
# if it was lack of support for out_subfmt as a call arg.
if "unexpected keyword argument 'out_subfmt'" in str(exc):
raise ValueError(
f"to_value() method for format {format!r} does not "
"support passing a 'subfmt' argument"
) from None
else:
# Some unforeseen exception so raise.
raise
value = tm._shaped_like_input(value)
cache[key] = value
return cache[key]
@property
def value(self):
"""Time value(s) in current format."""
return self.to_value(self.format, None)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, self.__class__):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0 : idx0 + n_values] = values
out._time.jd1[idx0 + n_values :] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values :] = self._time.jd2[idx0:]
return out
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError(
f"{self.__class__.__name__} object is read-only. Make a "
'copy() or set "writeable" attribute to True.'
)
else:
raise ValueError(
f"scalar {self.__class__.__name__} object is read-only."
)
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ("_delta_tdb_tt", "_delta_ut1_utc"):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def isclose(self, other, atol=None):
"""Returns a boolean or boolean array where two Time objects are
element-wise equal within a time tolerance.
This evaluates the expression below::
abs(self - other) <= atol
Parameters
----------
other : `~astropy.time.Time`
Time object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is two bits in the 128-bit JD time representation,
equivalent to about 40 picosecs.
"""
if atol is None:
# Note: use 2 bits instead of 1 bit based on experience in precision
# tests, since taking the difference with a UTC time means one has
# to do a scale change.
atol = 2 * np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
try:
# Separate these out so user sees where the problem is
dt = self - other
dt = abs(dt)
out = dt <= atol
except Exception as err:
raise TypeError(
"'other' argument must support subtraction with Time "
"and return a value that supports comparison with "
f"{atol.__class__.__name__}: {err}"
)
return out
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply("copy", format=format)
def replicate(self, format=None, copy=False, cls=None):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply("copy" if copy else "replicate", format=format, cls=cls)
def _apply(self, method, *args, format=None, cls=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == "replicate":
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(cls or self.__class__)
tm._time = TimeJD(
jd1,
jd2,
self.scale,
precision=0,
in_subfmt="*",
out_subfmt="*",
from_jd=True,
)
# Optional ndarray attributes.
for attr in ("_delta_ut1_utc", "_delta_tdb_tt", "location"):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only an array scalar and the method would return a view,
# since in that case nothing would change).
if getattr(val, "shape", ()):
val = apply_method(val)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined and the
# time object is not a scalar (issue #10688).
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError(f"format must be one of {list(tm.FORMATS)}")
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(
tm._time.jd1,
tm._time.jd2,
tm._time._scale,
precision=self.precision,
in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt),
out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
index = [
indices
if i == axis
else np.arange(s).reshape(
(1,) * (i if keepdims or i < axis else i - 1)
+ (s,)
+ (1,) * (ndim - i - (1 if keepdims or i > axis else 2))
)
for i, s in enumerate(self.shape)
]
return tuple(index)
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# First get the minimum at normal precision.
jd1, jd2 = self.jd1, self.jd2
approx = np.min(jd1 + jd2, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (jd1 - approx) + jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = np.max(jd1 + jd2, axis, keepdims=True)
dt = (jd1 - approx) + jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = jd1 + jd2
remainder = (jd1 - approx) + jd2
if axis is None:
return np.lexsort((remainder.ravel(), approx.ravel()))
else:
return np.lexsort(keys=(remainder, approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)]
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
"""Mean along a given axis.
This is similar to :meth:`~numpy.ndarray.mean`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2`` is
used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.mean``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
Similarly, the ``dtype`` argument is also present for compatibility
only; it has no meaning for `Time`.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
dtype : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
out : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for
details.
Returns
-------
m : Time
A new Time instance containing the mean values
"""
if dtype is not None:
raise ValueError("Cannot set ``dtype`` on `Time` instances")
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
where = where & ~self.mask
where_broadcasted = np.broadcast_to(where, self.shape)
kwargs = dict(
axis=axis,
keepdims=keepdims,
where=where,
)
divisor = np.sum(where_broadcasted, axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
raise ValueError(
"Mean over zero elements is not supported as it would give an undefined"
" time;see issue https://github.com/astropy/astropy/issues/6509"
)
jd1, jd2 = day_frac(
val1=np.sum(np.ma.getdata(self.jd1), **kwargs),
val2=np.sum(np.ma.getdata(self.jd2), **kwargs),
divisor=divisor,
)
result = type(self)(
val=jd1,
val2=jd2,
format="jd",
scale=self.scale,
copy=False,
)
result.format = self.format
return result
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache["scale"]
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
return self.to_value(attr, subfmt=None)
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError(
"Cannot convert TimeDelta with "
"undefined scale to any defined scale."
)
else:
raise ScaleValueError(
f"Cannot convert {self.__class__.__name__} with scale "
f"'{self.scale}' to scale '{attr}'"
)
else:
# Should raise AttributeError
return self.__getattribute__(attr)
def __dir__(self):
return sorted(set(super().__dir__()) | set(self.SCALES) | set(self.FORMATS))
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError(
"Attribute shape must match or be broadcastable to that of "
"Time object. Typically, give either a single value or "
"one for each time."
)
return val
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented.
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError(
f"Cannot compare {self.__class__.__name__} instances with "
f"scales '{self.scale}' and '{other.scale}'"
)
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.0)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
class Time(TimeBase):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
If not given, assumed to be the center of the Earth for time scale
transformations to and from the solar-system barycenter.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, Time):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(
self,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
if not hasattr(self, "location"):
self.location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(
val, val2, format, scale, copy, precision, in_subfmt, out_subfmt
)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (
self.location.size > 1 and self.location.shape != self.shape
):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape, subok=True)
except Exception as err:
raise ValueError(
f"The location with shape {self.location.shape} cannot be "
f"broadcast against time with shape {self.shape}. "
"Typically, either give a single location or one for each time."
) from err
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object."""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif (self_location is None and value.location is not None) or (
self_location is not None and value.location is None
):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError(
"cannot set to Time with different location: expected "
f"location={self_location} and got location={value.location}"
)
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(
value,
scale=self.scale,
format=self.format,
location=self_location,
)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible Time object: {err}"
)
return value
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime : :class:`~astropy.time.Time`
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format="datetime", scale="utc")
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : str, sequence, or ndarray
Objects containing time data of type string
format_string : str
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ("U", "S"):
raise TypeError(
"Expected type is string, a bytes-like object or a sequence "
f"of these. Got dtype '{time_array.dtype.kind}'"
)
to_string = (
str
if time_array.dtype.kind == "U"
else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, "U30"])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}".format(
*time_tuple
)
format = kwargs.pop("format", None)
out = cls(*iterator.operands[1:], format="isot", **kwargs)
if format is not None:
out.format = format
return out
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : str
Format definition of return string.
Returns
-------
formatted : str or numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate("iso")._time.str_kwargs():
date_tuple = date(sk["year"], sk["mon"], sk["day"]).timetuple()
datetime_tuple = (
sk["year"],
sk["mon"],
sk["day"],
sk["hour"],
sk["min"],
sk["sec"],
date_tuple[6],
date_tuple[7],
-1,
)
fmtd_str = format_spec
if "%f" in fmtd_str:
fmtd_str = fmtd_str.replace(
"%f",
"{frac:0{precision}}".format(
frac=sk["fracsec"], precision=self.precision
),
)
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
def light_travel_time(
self, skycoord, kind="barycentric", location=None, ephemeris=None
):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ("barycentric", "heliocentric"):
raise ValueError(
"'kind' parameter must be one of 'heliocentric' or 'barycentric'"
)
if location is None:
if self.location is None:
raise ValueError(
"An EarthLocation needs to be set or passed in to calculate bary- "
"or heliocentric corrections"
)
location = self.location
from astropy.coordinates import (
GCRS,
HCRS,
ICRS,
CartesianRepresentation,
UnitSphericalRepresentation,
solar_system_ephemeris,
)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError(
"Supplied location does not have a valid `get_itrs` method"
)
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == "heliocentric":
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (
skycoord.icrs.represent_as(UnitSphericalRepresentation)
.represent_as(CartesianRepresentation)
.xyz
)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale="tdb")
def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
"""
if isinstance(longitude, str) and longitude == "tio":
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(
longitude=longitude,
function=erfa.era00,
scales=("ut1",),
include_tio=include_tio,
)
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS:
raise ValueError(
"The kind of sidereal time has to be "
+ " or ".join(sorted(SIDEREAL_TIME_MODELS))
)
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models)[-1]
elif model.upper() not in available_models:
raise ValueError(
f"Model {model} not implemented for {kind} sidereal time; "
f"available models are {sorted(available_models)}"
)
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ("tio", "greenwich"):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs["include_tio"] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
"apparent",
sorted(SIDEREAL_TIME_MODELS["apparent"]),
"mean",
sorted(SIDEREAL_TIME_MODELS["mean"]),
)
def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True):
"""Calculate a local sidereal time or Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance.
function : callable
The ERFA function to use.
scales : tuple of str
The time scales that the function requires on input.
include_tio : bool, optional
Whether to includes the TIO locator corrected for polar motion.
Should be `False` for pre-2000 IAU models. Default: `True`.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time or Earth rotation angle, with units of hourangle.
"""
from astropy.coordinates import EarthLocation, Longitude
from astropy.coordinates.builtin_frames.utils import get_polar_motion
from astropy.coordinates.matrix_utilities import rotation_matrix
if longitude is None:
if self.location is None:
raise ValueError(
"No longitude is given but the location for "
"the Time object is not set."
)
longitude = self.location.lon
elif isinstance(longitude, EarthLocation):
longitude = longitude.lon
else:
# Sanity check on input; default unit is degree.
longitude = Longitude(longitude, u.degree, copy=False)
theta = self._call_erfa(function, scales)
if include_tio:
# TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio;
# maybe posisble to factor out to one or the other.
sp = self._call_erfa(erfa.sp00, ("tt",))
xp, yp = get_polar_motion(self)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (
rotation_matrix(longitude, "z")
@ rotation_matrix(-yp, "x", unit=u.radian)
@ rotation_matrix(-xp, "y", unit=u.radian)
@ rotation_matrix(theta + sp, "z", unit=u.radian)
)
# Solve for angle.
angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian
else:
angle = longitude + (theta << u.radian)
return Longitude(angle, u.hourangle)
def _call_erfa(self, function, scales):
# TODO: allow erfa functions to be used on Time with __array_ufunc__.
erfa_parameters = [
getattr(getattr(self, scale)._time, jd_part)
for scale in scales
for jd_part in ("jd1", "jd2_filled")
]
result = function(*erfa_parameters)
if self.masked:
result[self.mask] = np.nan
return result
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : `~astropy.utils.iers.IERS`, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. Default: `~astropy.utils.iers.earth_orientation_table`
(which in turn defaults to the combined version provided by
`~astropy.utils.iers.IERS_Auto`).
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA
>>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, "_delta_ut1_utc"):
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = "utc"
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == "ut1":
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, "_delta_tdb_tt"):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ("tt", "tdb"):
raise ValueError(
"Accessing the delta_tdb_tt attribute is only "
"possible for TT or TDB time scales"
)
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
# Assume geocentric.
self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0.0, 0.0, 0.0)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1,
jd2,
ut,
lon.to_value(u.radian),
rxy.to_value(u.km),
z.to_value(u.km),
)
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = not isinstance(other, Time)
if other_is_delta: # T - Tdelta
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot subtract Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError(
"Cannot subtract Time instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
self_time = (
self._time if self.scale in TIME_DELTA_SCALES else self.tai._time
)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(
self_time.jd1, self_time.jd2, format="jd", scale=self_time.scale
)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
# T + Tdelta = T
# T + T = error
if isinstance(other, Time):
raise OperandTypeError(self, other, "+")
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot add Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
# Reverse addition is possible: <something-Tdelta-ish> + T
# but there is no case of <something> - T, so no __rsub__.
def __radd__(self, other):
return self.__add__(other)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
scale = self.scale
if scale == "utc":
self = self.tai
result = super().mean(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
if scale == "utc":
result = result.utc
result.out_subfmt = self.out_subfmt
location = self.location
if self.location is not None:
if self.location.shape:
if axis is None:
axis_normalized = tuple(range(self.ndim))
elif isinstance(axis, int):
axis_normalized = (axis,)
else:
axis_normalized = axis
sl = [slice(None)] * self.location.ndim
for a in axis_normalized:
sl[a] = slice(0, 1)
if np.any(self.location != self.location[tuple(sl)]):
raise ValueError(
"`location` must be constant over the reduction axes."
)
if not keepdims:
for a in axis_normalized:
sl[a] = 0
location = self.location[tuple(sl)]
result.location = location
return result
def __array_function__(self, function, types, args, kwargs):
"""
Wrap numpy functions.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
"""
if function in CUSTOM_FUNCTIONS:
f = CUSTOM_FUNCTIONS[function]
return f(*args, **kwargs)
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
return super().__array_function__(function, types, args, kwargs)
def to_datetime(self, timezone=None):
# TODO: this could likely go through to_value, as long as that
# had an **kwargs part that was just passed on to _time.
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta."""
pass
class TimeDelta(TimeBase):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
See also:
- https://docs.astropy.org/en/stable/time/
- https://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s). For numerical inputs without units,
"jd" is assumed and values are interpreted as days.
A deprecation warning is raised in this case. To avoid the warning,
either specify the format or add units to the input values.
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, TimeDelta):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
format = format or self._get_format(val)
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
@staticmethod
def _get_format(val):
if isinstance(val, timedelta):
return "datetime"
if getattr(val, "unit", None) is None:
warn(
"Numerical value without unit or explicit format passed to"
" TimeDelta, assuming days",
TimeDeltaMissingUnitWarning,
)
return "jd"
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1,
jd2 + offset2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
def _add_sub(self, other, op):
"""Perform common elements of addition / subtraction for two delta times."""
# If not a TimeDelta then see if it can be turned into a TimeDelta.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
raise TypeError(
"Cannot add TimeDelta instances with scales '{}' and '{}'".format(
self.scale, other.scale
)
)
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = op(self._time.jd1, other._time.jd1)
jd2 = op(self._time.jd2, other._time.jd2)
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __add__(self, other):
# If other is a Time then use Time.__add__ to do the calculation.
if isinstance(other, Time):
return other.__add__(self)
return self._add_sub(other, operator.add)
def __sub__(self, other):
# TimeDelta - Time is an error
if isinstance(other, Time):
raise OperandTypeError(self, other, "-")
return self._add_sub(other, operator.sub)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, "*")
elif (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(
unit, equivalencies=equivalencies
)
def to_value(self, *args, **kwargs):
"""Get time delta values expressed in specified output format or unit.
This method is flexible and handles both conversion to a specified
``TimeDelta`` format / sub-format AND conversion to a specified unit.
If positional argument(s) are provided then the first one is checked
to see if it is a valid ``TimeDelta`` format, and next it is checked
to see if it is a valid unit or unit string.
To convert to a ``TimeDelta`` format and optional sub-format the options
are::
tm = TimeDelta(1.0 * u.s)
tm.to_value('jd') # equivalent of tm.jd
tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object
tm.to_value('jd', subfmt='decimal')
tm.to_value(format='jd', subfmt='decimal')
To convert to a unit with optional equivalencies, the options are::
tm.to_value('hr') # convert to u.hr (hours)
tm.to_value('hr', []) # specify equivalencies as a positional arg
tm.to_value('hr', equivalencies=[])
tm.to_value(unit='hr', equivalencies=[])
The built-in `~astropy.time.TimeDelta` options for ``format`` are:
{'jd', 'sec', 'datetime'}.
For the two numerical formats 'jd' and 'sec', the available ``subfmt``
options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with the
enhancement depending on platform), and 'decimal' instances of
:class:`decimal.Decimal` for full precision. For the 'str' and 'bytes'
sub-formats, the number of digits is also chosen such that time values
are represented accurately. Default: as set by ``out_subfmt`` (which by
default picks the first available for a given format, i.e., 'float').
Parameters
----------
format : str, optional
The format in which one wants the `~astropy.time.TimeDelta` values.
Default: the current format.
subfmt : str, optional
Possible sub-format in which the values should be given. Default: as
set by ``out_subfmt`` (which by default picks the first available
for a given format, i.e., 'float' or 'date_hms').
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally or
within a context.
Returns
-------
value : ndarray or scalar
The value in the format or units specified.
See also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
if not (args or kwargs):
raise TypeError("to_value() missing required format or unit argument")
# TODO: maybe allow 'subfmt' also for units, keeping full precision
# (effectively, by doing the reverse of quantity_day_frac)?
# This way, only equivalencies could lead to possible precision loss.
if "format" in kwargs or (
args != () and (args[0] is None or args[0] in self.FORMATS)
):
# Super-class will error with duplicate arguments, etc.
return super().to_value(*args, **kwargs)
# With positional arguments, we try parsing the first one as a unit,
# so that on failure we can give a more informative exception.
if args:
try:
unit = u.Unit(args[0])
except ValueError as exc:
raise ValueError(
"first argument is not one of the known "
f"formats ({list(self.FORMATS)}) and failed to parse as a unit."
) from exc
args = (unit,) + args[1:]
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(
*args, **kwargs
)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object."""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible TimeDelta object: {err}"
)
return value
def isclose(self, other, atol=None, rtol=0.0):
"""Returns a boolean or boolean array where two TimeDelta objects are
element-wise equal within a time tolerance.
This effectively evaluates the expression below::
abs(self - other) <= atol + rtol * abs(other)
Parameters
----------
other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Quantity or TimeDelta object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is one bit in the 128-bit JD time representation,
equivalent to about 20 picosecs.
rtol : float
Relative tolerance for equality
"""
try:
other_day = other.to_value(u.day)
except Exception as err:
raise TypeError(f"'other' argument must support conversion to days: {err}")
if atol is None:
atol = np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
return np.isclose(
self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)
)
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in "OSUMaV":
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = "" if op is None else f" for {op}"
super().__init__(
"Unsupported operand type(s){}: '{}' and '{}'".format(
op_string, left.__class__.__name__, right.__class__.__name__
)
)
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
|
92782595643ddd4df64e1b9ca26d73b41e352adba5859adb1ee70bfbc4502f0b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import datetime
import fnmatch
import re
import time
import warnings
from collections import OrderedDict, defaultdict
from decimal import Decimal
import erfa
import numpy as np
import astropy.units as u
from astropy.utils.decorators import classproperty, lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from . import _parse_times, conf, utils
from .utils import day_frac, quantity_day_frac, two_product, two_sum
__all__ = [
"TimeFormat",
"TimeJD",
"TimeMJD",
"TimeFromEpoch",
"TimeUnix",
"TimeUnixTai",
"TimeCxcSec",
"TimeGPS",
"TimeDecimalYear",
"TimePlotDate",
"TimeUnique",
"TimeDatetime",
"TimeString",
"TimeISO",
"TimeISOT",
"TimeFITS",
"TimeYearDayTime",
"TimeEpochDate",
"TimeBesselianEpoch",
"TimeJulianEpoch",
"TimeDeltaFormat",
"TimeDeltaSec",
"TimeDeltaJD",
"TimeEpochDateString",
"TimeBesselianEpochString",
"TimeJulianEpochString",
"TIME_FORMATS",
"TIME_DELTA_FORMATS",
"TimezoneInfo",
"TimeDeltaDatetime",
"TimeDatetime64",
"TimeYMDHMS",
"TimeNumeric",
"TimeDeltaNumeric",
]
__doctest_skip__ = ["TimePlotDate"]
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {
"TDT": "tt",
"ET": "tt",
"GMT": "utc",
"UT": "utc",
"IAT": "tai",
}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
if isinstance(subfmt_in, str):
for strptime_code, regex in (
("%Y", r"(?P<year>\d\d\d\d)"),
("%m", r"(?P<mon>\d{1,2})"),
("%d", r"(?P<mday>\d{1,2})"),
("%H", r"(?P<hour>\d{1,2})"),
("%M", r"(?P<min>\d{1,2})"),
("%S", r"(?P<sec>\d{1,2})"),
):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if "%" not in subfmt_in:
subfmt_tuple = (
subfmt_tuple[0],
re.compile(subfmt_in + "$"),
subfmt_tuple[2],
)
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormat:
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = "utc" # As of astropy 0.4
subfmts = ()
_registry = TIME_FORMATS
def __init__(
self, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False
):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
self._jd1, self._jd2 = None, None
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __init_subclass__(cls, **kwargs):
# Register time formats that define a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if "name" in cls.__dict__ and cls.name != "astropy_time":
# FIXME: check here that we're not introducing a collision with
# an existing method or attribute; problem is it could be either
# astropy.time.Time or astropy.time.TimeDelta, and at the point
# where this is run neither of those classes have necessarily been
# constructed yet.
if "value" in cls.__dict__ and not hasattr(cls.value, "fget"):
raise ValueError("If defined, 'value' must be a property")
cls._registry[cls.name] = cls
# If this class defines its own subfmts, preprocess the definitions.
if "subfmts" in cls.__dict__:
cls.subfmts = _regexify_subfmts(cls.subfmts)
return super().__init_subclass__(**kwargs)
@classmethod
def _get_allowed_subfmt(cls, subfmt):
"""Get an allowed subfmt for this class, either the input ``subfmt``
if this is valid or '*' as a default. This method gets used in situations
where the format of an existing Time object is changing and so the
out_ or in_subfmt may need to be coerced to the default '*' if that
``subfmt`` is no longer valid.
"""
try:
cls._select_subfmts(subfmt)
except ValueError:
subfmt = "*"
return subfmt
@property
def in_subfmt(self):
return self._in_subfmt
@in_subfmt.setter
def in_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._in_subfmt = subfmt
@property
def out_subfmt(self):
return self._out_subfmt
@out_subfmt.setter
def out_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._out_subfmt = subfmt
@property
def jd1(self):
return self._jd1
@jd1.setter
def jd1(self, jd1):
self._jd1 = _validate_jd_for_storage(jd1)
if self._jd2 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
@property
def jd2(self):
return self._jd2
@jd2.setter
def jd2(self, jd2):
self._jd2 = _validate_jd_for_storage(jd2)
if self._jd1 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale."""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if "mask" not in self.cache:
self.cache["mask"] = np.isnan(self.jd2)
if self.cache["mask"].shape:
self.cache["mask"].flags.writeable = False
return self.cache["mask"]
@property
def masked(self):
if "masked" not in self.cache:
self.cache["masked"] = bool(np.any(self.mask))
return self.cache["masked"]
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, val):
# Verify precision is 0-9 (inclusive)
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError("precision attribute must be an int between 0 and 9")
self._precision = val
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
# val1 cannot contain nan, but val2 can contain nan
isfinite1 = np.isfinite(val1)
if val1.size > 1: # Calling .all() on a scalar is surprisingly slow
isfinite1 = (
isfinite1.all()
) # Note: arr.all() about 3x faster than np.all(arr)
elif val1.size == 0:
isfinite1 = False
ok1 = (
val1.dtype.kind == "f"
and val1.dtype.itemsize >= 8
and isfinite1
or val1.size == 0
)
ok2 = (
val2 is None
or (
val2.dtype.kind == "f"
and val2.dtype.itemsize >= 8
and not np.any(np.isinf(val2))
)
or val2.size == 0
)
if not (ok1 and ok2):
raise TypeError(
f"Input values for {self.name} class must be finite doubles"
)
if getattr(val1, "unit", None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without losing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances."
)
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1.0 / getattr(self, "unit", 1.0)
if factor != 1.0:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, "unit", None) is not None:
raise TypeError("Cannot mix float and Quantity inputs")
if val2 is None:
val2 = np.array(0, dtype=val1.dtype)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_SCALES}"
)
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2 in specified
``out_subfmt``.
This is the base method that ignores ``parent`` and uses the ``value``
property to compute the output. This is done by temporarily setting
``self.out_subfmt`` and calling ``self.value``. This is required for
legacy Format subclasses prior to astropy 4.0 New code should instead
implement the value functionality in ``to_value()`` and then make the
``value`` property be a simple call to ``self.to_value()``.
Parameters
----------
parent : object
Parent `~astropy.time.Time` object associated with this
`~astropy.time.TimeFormat` object
out_subfmt : str or None
Output subformt (use existing self.out_subfmt if `None`)
Returns
-------
value : numpy.array, numpy.ma.array
Array or masked array of formatted time representation values
"""
# Get value via ``value`` property, overriding out_subfmt temporarily if needed.
if out_subfmt is not None:
out_subfmt_orig = self.out_subfmt
try:
self.out_subfmt = out_subfmt
value = self.value
finally:
self.out_subfmt = out_subfmt_orig
else:
value = self.value
return self.mask_if_needed(value)
@property
def value(self):
raise NotImplementedError
@classmethod
def _select_subfmts(cls, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
If no subformat matches pattern then a ValueError is raised. A special
case is a format with no allowed subformats, i.e. subfmts=(), and
pattern='*'. This is OK and happens when this method is used for
validation of an out_subfmt.
"""
if not isinstance(pattern, str):
raise ValueError("subfmt attribute must be a string")
elif pattern == "*":
return cls.subfmts
subfmts = [x for x in cls.subfmts if fnmatch.fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
if len(cls.subfmts) == 0:
raise ValueError(f"subformat not allowed for format {cls.name}")
else:
subfmt_names = [x[0] for x in cls.subfmts]
raise ValueError(
f"subformat {pattern!r} must match one of "
f"{subfmt_names} for format {cls.name}"
)
return subfmts
class TimeNumeric(TimeFormat):
subfmts = (
("float", np.float64, None, np.add),
("long", np.longdouble, utils.longdouble_to_twoval, utils.twoval_to_longdouble),
("decimal", np.object_, utils.decimal_to_twoval, utils.twoval_to_decimal),
("str", np.str_, utils.decimal_to_twoval, utils.twoval_to_string),
("bytes", np.bytes_, utils.bytes_to_twoval, utils.twoval_to_bytes),
)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
# Save original state of val2 because the super()._check_val_type below
# may change val2 from None to np.array(0). The value is saved in order
# to prevent a useless and slow call to np.result_type() below in the
# most common use-case of providing only val1.
orig_val2_is_none = val2 is None
if val1.dtype.kind == "f":
val1, val2 = super()._check_val_type(val1, val2)
elif not orig_val2_is_none or not (
val1.dtype.kind in "US"
or (
val1.dtype.kind == "O"
and all(isinstance(v, Decimal) for v in val1.flat)
)
):
raise TypeError(
f"for {self.name} class, input should be doubles, string, or Decimal, "
"and second values are only allowed for doubles."
)
val_dtype = (
val1.dtype if orig_val2_is_none else np.result_type(val1.dtype, val2.dtype)
)
subfmts = self._select_subfmts(self.in_subfmt)
for subfmt, dtype, convert, _ in subfmts:
if np.issubdtype(val_dtype, dtype):
break
else:
raise ValueError("input type not among selected sub-formats.")
if convert is not None:
try:
val1, val2 = convert(val1, val2)
except Exception:
raise TypeError(
f"for {self.name} class, input should be (long) doubles, string, "
"or Decimal, and second values are only allowed for "
"(long) doubles."
)
return val1, val2
def to_value(self, jd1=None, jd2=None, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2.
Subclasses that require ``parent`` or to adjust the jds should
override this method.
"""
# TODO: do this in __init_subclass__?
if self.__class__.value.fget is not self.__class__.to_value:
return self.value
if jd1 is None:
jd1 = self.jd1
if jd2 is None:
jd2 = self.jd2
if out_subfmt is None:
out_subfmt = self.out_subfmt
subfmt = self._select_subfmts(out_subfmt)[0]
kwargs = {}
if subfmt[0] in ("str", "bytes"):
unit = getattr(self, "unit", 1)
digits = int(np.ceil(np.log10(unit / np.finfo(float).eps)))
# TODO: allow a way to override the format.
kwargs["fmt"] = f".{digits}f"
value = subfmt[3](jd1, jd2, **kwargs)
return self.mask_if_needed(value)
value = property(to_value)
class TimeJD(TimeNumeric):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = "jd"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
class TimeMJD(TimeNumeric):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = "mjd"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h).
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd1 = self.jd1 - erfa.DJM0 # This cannot lose precision.
jd2 = self.jd2
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDecimalYear(TimeNumeric):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = "decimalyear"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format="jd")
t_end = Time(jd1_end, jd2_end, scale=self.scale, format="jd")
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
def to_value(self, **kwargs):
scale = self.scale.upper().encode("ascii")
iy_start, ims, ids, ihmsfs = erfa.d2dtf(
scale, 0, self.jd1, self.jd2_filled # precision=0
)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
# Trying to be precise, but more than float64 not useful.
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return super().to_value(jd1=decimalyear, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeFromEpoch(TimeNumeric):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
@classproperty(lazy=True)
def _epoch(cls):
# Ideally we would use `def epoch(cls)` here and not have the instance
# property below. However, this breaks the sphinx API docs generation
# in a way that was not resolved. See #10406 for details.
return Time(
cls.epoch_val,
cls.epoch_val2,
scale=cls.epoch_scale,
format=cls.epoch_format,
)
@property
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
return self._epoch
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1.0 / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# For the usual case that scale is the same as epoch_scale, we only need
# to ensure that abs(jd2) <= 0.5. Since abs(self.epoch.jd2) <= 0.5 and
# abs(frac) <= 0.5, we can do simple (fast) checks and arithmetic here
# without another call to day_frac(). Note also that `round(jd2.item())`
# is about 10x faster than `np.round(jd2)`` for a scalar.
if self.epoch.scale == self.scale:
jd1_extra = np.round(jd2) if jd2.shape else round(jd2.item())
jd1 += jd1_extra
jd2 -= jd1_extra
self.jd1, self.jd2 = jd1, jd2
return
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(
Time(jd1, jd2, scale=self.epoch_scale, format="jd"), self.scale
)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale '{self.epoch_scale}' "
f"to specified scale '{self.scale}', got error:\n{err}"
) from err
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None, **kwargs):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError("cannot compute value without parent Time object")
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale "
f"'{self.epoch_scale}' to specified scale '{self.scale}', "
f"got error:\n{err}"
) from err
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
# This factor is guaranteed to be exactly representable, which
# means time_from_epoch1 is calculated exactly.
factor = 1.0 / self.unit
time_from_epoch1 = (jd1 - self.epoch.jd1) * factor
time_from_epoch2 = (jd2 - self.epoch.jd2) * factor
return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time (UTC): seconds from 1970-01-01 00:00:00 UTC, ignoring leap seconds.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = "unix"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1970-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "iso"
class TimeUnixTai(TimeUnix):
"""
Unix time (TAI): SI seconds elapsed since 1970-01-01 00:00:00 TAI (see caveats).
This will generally differ from standard (UTC) Unix time by the cumulative
integral number of leap seconds introduced into UTC since 1972-01-01 UTC
plus the initial offset of 10 seconds at that date.
This convention matches the definition of linux CLOCK_TAI
(https://www.cl.cam.ac.uk/~mgk25/posix-clocks.html),
and the Precision Time Protocol
(https://en.wikipedia.org/wiki/Precision_Time_Protocol), which
is also used by the White Rabbit protocol in High Energy Physics:
https://white-rabbit.web.cern.ch.
Caveats:
- Before 1972, fractional adjustments to UTC were made, so the difference
between ``unix`` and ``unix_tai`` time is no longer an integer.
- Because of the fractional adjustments, to be very precise, ``unix_tai``
is the number of seconds since ``1970-01-01 00:00:00 TAI`` or equivalently
``1969-12-31 23:59:51.999918 UTC``. The difference between TAI and UTC
at that epoch was 8.000082 sec.
- On the day of a positive leap second the difference between ``unix`` and
``unix_tai`` times increases linearly through the day by 1.0. See also the
documentation for the `~astropy.time.TimeUnix` class.
- Negative leap seconds are possible, though none have been needed to date.
Examples
--------
>>> # get the current offset between TAI and UTC
>>> from astropy.time import Time
>>> t = Time('2020-01-01', scale='utc')
>>> t.unix_tai - t.unix
37.0
>>> # Before 1972, the offset between TAI and UTC was not integer
>>> t = Time('1970-01-01', scale='utc')
>>> t.unix_tai - t.unix # doctest: +FLOAT_CMP
8.000082
>>> # Initial offset of 10 seconds in 1972
>>> t = Time('1972-01-01', scale='utc')
>>> t.unix_tai - t.unix
10.0
"""
name = "unix_tai"
epoch_val = "1970-01-01 00:00:00"
epoch_scale = "tai"
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = "cxcsec"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1998-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "tt"
epoch_format = "iso"
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = "gps"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1980-01-06 00:00:19"
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = "tai"
epoch_format = "iso"
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC.
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = "plot_date"
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "jd"
@lazyproperty
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
try:
# Matplotlib >= 3.3 has a get_epoch() function
from matplotlib.dates import get_epoch
except ImportError:
# If no get_epoch() then the epoch is '0001-01-01'
_epoch = self._epoch
else:
# Get the matplotlib date epoch as an ISOT string in UTC
epoch_utc = get_epoch()
from erfa import ErfaWarning
with warnings.catch_warnings():
# Catch possible dubious year warnings from erfa
warnings.filterwarnings("ignore", category=ErfaWarning)
_epoch = Time(epoch_utc, scale="utc", format="isot")
_epoch.format = "jd"
return _epoch
class TimeStardate(TimeFromEpoch):
"""
Stardate: date units from 2318-07-05 12:00:00 UTC.
For example, stardate 41153.7 is 00:52 on April 30, 2363.
See http://trekguide.com/Stardates.htm#TNG for calculations and reference points.
"""
name = "stardate"
unit = 0.397766856 # Stardate units per day
epoch_val = "2318-07-05 11:00:00" # Date and time of stardate 00000.00
epoch_val2 = None
epoch_scale = "tai"
epoch_format = "iso"
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = "astropy_time"
def __new__(
cls, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False
):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (
isinstance(val1_0, Time)
and all(type(val) is type(val1_0) for val in val1.flat)
):
raise TypeError(
f"Input values for {cls.name} class must all be the same "
"astropy Time type."
)
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
# Collect individual location values and merge into a single location.
if any(tm.location is not None for tm in val1):
if any(tm.location is None for tm in val1):
raise ValueError(
"cannot concatenate times unless all locations "
"are set or no locations are set"
)
locations = []
for tm in val1:
location = np.broadcast_to(
tm.location, tm._time.jd1.shape, subok=True
)
locations.append(np.atleast_1d(location))
location = np.concatenate(locations)
else:
location = None
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
location = val1_0.location
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(
jd1, jd2, scale, precision, in_subfmt, out_subfmt, from_jd=True
)
# Make a temporary hidden attribute to transfer location back to the
# parent Time object where it needs to live.
self._location = location
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object.
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = "datetime"
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError(
f"Input values for {self.name} class must be datetime objects"
)
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2."""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer(
[val1, None, None, None, None, None, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None] + 5 * [np.intc] + [np.double],
)
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"), *iterator.operands[1:]
)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None, out_subfmt=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None}, optional
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if out_subfmt is not None:
# Out_subfmt not allowed for this format, so raise the standard
# exception by trying to validate the value.
self._select_subfmts(out_subfmt)
if timezone is not None:
if self._scale != "utc":
raise ScaleValueError(
f"scale is {self._scale}, must be 'utc' when timezone is supplied."
)
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode("ascii")
iys, ims, ids, ihmsfs = erfa.d2dtf(
scale, 6, self.jd1, self.jd2_filled # 6 for microsec
)
ihrs = ihmsfs["h"]
imins = ihmsfs["m"]
isecs = ihmsfs["s"]
ifracs = ihmsfs["f"]
iterator = np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=7 * [None] + [object],
)
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError(
f"Time {(iy, im, id, ihr, imin, isec, ifracsec)} is within "
"a leap second but datetime does not support leap seconds"
)
if timezone is not None:
out[...] = datetime.datetime(
iy, im, id, ihr, imin, isec, ifracsec, tzinfo=TimezoneInfo()
).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimeYMDHMS(TimeUnique):
"""
ymdhms: A Time format to represent Time as year, month, day, hour,
minute, second (thus the name ymdhms).
Acceptable inputs must have keys or column names in the "YMDHMS" set of
``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``:
- Dict with keys in the YMDHMS set
- NumPy structured array, record array or astropy Table, or single row
of those types, with column names in the YMDHMS set
One can supply a subset of the YMDHMS values, for instance only 'year',
'month', and 'day'. Inputs have the following defaults::
'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0
When the input is supplied as a ``dict`` then each value can be either a
scalar value or an array. The values will be broadcast to a common shape.
Example::
>>> from astropy.time import Time
>>> t = Time({'year': 2015, 'month': 2, 'day': 3,
... 'hour': 12, 'minute': 13, 'second': 14.567},
... scale='utc')
>>> t.iso
'2015-02-03 12:13:14.567'
>>> t.ymdhms.year
2015
"""
name = "ymdhms"
def _check_val_type(self, val1, val2):
"""
This checks inputs for the YMDHMS format.
It is bit more complex than most format checkers because of the flexible
input that is allowed. Also, it actually coerces ``val1`` into an appropriate
dict of ndarrays that can be used easily by ``set_jds()``. This is useful
because it makes it easy to get default values in that routine.
Parameters
----------
val1 : ndarray or None
val2 : ndarray or None
Returns
-------
val1_as_dict, val2 : val1 as dict or None, val2 is always None
"""
if val2 is not None:
raise ValueError("val2 must be None for ymdhms format")
ymdhms = ["year", "month", "day", "hour", "minute", "second"]
if val1.dtype.names:
# Convert to a dict of ndarray
val1_as_dict = {name: val1[name] for name in val1.dtype.names}
elif val1.shape == (0,):
# Input was empty list [], so set to None and set_jds will handle this
return None, None
elif (
val1.dtype.kind == "O"
and val1.shape == ()
and isinstance(val1.item(), dict)
):
# Code gets here for input as a dict. The dict input
# can be either scalar values or N-d arrays.
# Extract the item (which is a dict) and broadcast values to the
# same shape here.
names = val1.item().keys()
values = val1.item().values()
val1_as_dict = {
name: value for name, value in zip(names, np.broadcast_arrays(*values))
}
else:
raise ValueError("input must be dict or table-like")
# Check that the key names now are good.
names = val1_as_dict.keys()
required_names = ymdhms[: len(names)]
def comma_repr(vals):
return ", ".join(repr(val) for val in vals)
bad_names = set(names) - set(ymdhms)
if bad_names:
raise ValueError(
f"{comma_repr(bad_names)} not allowed as YMDHMS key name(s)"
)
if set(names) != set(required_names):
raise ValueError(
f"for {len(names)} input key names "
f"you must supply {comma_repr(required_names)}"
)
return val1_as_dict, val2
def set_jds(self, val1, val2):
if val1 is None:
# Input was empty list []
jd1 = np.array([], dtype=np.float64)
jd2 = np.array([], dtype=np.float64)
else:
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"),
val1["year"],
val1.get("month", 1),
val1.get("day", 1),
val1.get("hour", 0),
val1.get("minute", 0),
val1.get("second", 0),
)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
scale = self.scale.upper().encode("ascii")
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9, self.jd1, self.jd2_filled)
out = np.empty(
self.jd1.shape,
dtype=[
("year", "i4"),
("month", "i4"),
("day", "i4"),
("hour", "i4"),
("minute", "i4"),
("second", "f8"),
],
)
out["year"] = iys
out["month"] = ims
out["day"] = ids
out["hour"] = ihmsfs["h"]
out["minute"] = ihmsfs["m"]
out["second"] = ihmsfs["s"] + ihmsfs["f"] * 10 ** (-9)
out = out.view(np.recarray)
return self.mask_if_needed(out)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0 * u.day, dst=0 * u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity`, optional
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity`, optional
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : str or None, optional
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = "UTC"
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
**Fast C-based parser**
Time format classes can take advantage of a fast C-based parser if the times
are represented as fixed-format strings with year, month, day-of-month,
hour, minute, second, OR year, day-of-year, hour, minute, second. This can
be a factor of 20 or more faster than the pure Python parser.
Fixed format means that the components always have the same number of
characters. The Python parser will accept ``2001-9-2`` as a date, but the C
parser would require ``2001-09-02``.
A subclass in this case must define a class attribute ``fast_parser_pars``
which is a `dict` with all of the keys below. An inherited attribute is not
checked, only an attribute in the class ``__dict__``.
- ``delims`` (tuple of int): ASCII code for character at corresponding
``starts`` position (0 => no character)
- ``starts`` (tuple of int): position where component starts (including
delimiter if present). Use -1 for the month component for format that use
day of year.
- ``stops`` (tuple of int): position where component ends. Use -1 to
continue to end of string, or for the month component for formats that use
day of year.
- ``break_allowed`` (tuple of int): if true (1) then the time string can
legally end just before the corresponding component (e.g. "2000-01-01"
is a valid time but "2000-01-01 12" is not).
- ``has_day_of_year`` (int): 0 if dates have year, month, day; 1 if year,
day-of-year
"""
def __init_subclass__(cls, **kwargs):
if "fast_parser_pars" in cls.__dict__:
fpp = cls.fast_parser_pars
fpp = np.array(
list(
zip(
map(chr, fpp["delims"]),
fpp["starts"],
fpp["stops"],
fpp["break_allowed"],
)
),
_parse_times.dt_pars,
)
if cls.fast_parser_pars["has_day_of_year"]:
fpp["start"][1] = fpp["stop"][1] = -1
cls._fast_parser = _parse_times.create_parser(fpp)
super().__init_subclass__(**kwargs)
def _check_val_type(self, val1, val2):
if val1.dtype.kind not in ("S", "U") and val1.size:
raise TypeError(f"Input values for {self.name} class must be strings")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ("year", "mon", "mday", "hour", "min", "sec")
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex(".")
except Exception:
timestr_has_fractional_digits = False
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
timestr_has_fractional_digits = True
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
subfmt_has_sec = "%S" in strptime_fmt_or_regex
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, "tm_" + component) for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [
int(tm.get(component, default))
for component, default in zip(components, defaults)
]
subfmt_has_sec = "sec" in tm
# Add fractional seconds if they were in the original time string
# and the subformat has seconds. A time like "2022-08-01.123" will
# never pass this for a format like ISO and will raise a parsing
# exception.
if timestr_has_fractional_digits:
if subfmt_has_sec:
vals[-1] = vals[-1] + fracsec
else:
continue
return vals
else:
raise ValueError(f"Time {timestr} does not match {self.name} format")
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2."""
# If specific input subformat is required then use the Python parser.
# Also do this if Time format class does not define `use_fast_parser` or
# if the fast parser is entirely disabled. Note that `use_fast_parser`
# is ignored for format classes that don't have a fast parser.
if (
self.in_subfmt != "*"
or "_fast_parser" not in self.__class__.__dict__
or conf.use_fast_parser == "False"
):
jd1, jd2 = self.get_jds_python(val1, val2)
else:
try:
jd1, jd2 = self.get_jds_fast(val1, val2)
except Exception:
# Fall through to the Python parser unless fast is forced.
if conf.use_fast_parser == "force":
raise
else:
jd1, jd2 = self.get_jds_python(val1, val2)
self.jd1 = jd1
self.jd2 = jd2
def get_jds_python(self, val1, val2):
"""Parse the time strings contained in val1 and get jd1, jd2."""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None, None, None, None, None, None],
flags=["zerosize_ok"],
op_dtypes=[None] + 5 * [np.intc] + [np.double],
)
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
(
iy[...],
im[...],
id[...],
ihr[...],
imin[...],
dsec[...],
) = self.parse_string(val, subfmts)
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"), *iterator.operands[1:]
)
jd1, jd2 = day_frac(jd1, jd2)
return jd1, jd2
def get_jds_fast(self, val1, val2):
"""Use fast C parser to parse time strings in val1 and get jd1, jd2."""
# Handle bytes or str input and convert to uint8. We need to the
# dtype _parse_times.dt_u1 instead of uint8, since otherwise it is
# not possible to create a gufunc with structured dtype output.
# See note about ufunc type resolver in pyerfa/erfa/ufunc.c.templ.
if val1.dtype.kind == "U":
# Note: val1.astype('S') is *very* slow, so we check ourselves
# that the input is pure ASCII.
val1_uint32 = val1.view((np.uint32, val1.dtype.itemsize // 4))
if np.any(val1_uint32 > 127):
raise ValueError("input is not pure ASCII")
# It might be possible to avoid making a copy via astype with
# cleverness in parse_times.c but leave that for another day.
chars = val1_uint32.astype(_parse_times.dt_u1)
else:
chars = val1.view((_parse_times.dt_u1, val1.dtype.itemsize))
# Call the fast parsing ufunc.
time_struct = self._fast_parser(chars)
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"),
time_struct["year"],
time_struct["month"],
time_struct["day"],
time_struct["hour"],
time_struct["minute"],
time_struct["second"],
)
return day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = (self.scale.upper().encode("ascii"),)
iys, ims, ids, ihmsfs = erfa.d2dtf(
scale, self.precision, self.jd1, self.jd2_filled
)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
yday = None
has_yday = "{yday:" in str_fmt
ihrs = ihmsfs["h"]
imins = ihmsfs["m"]
isecs = ihmsfs["s"]
ifracs = ihmsfs["f"]
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs], flags=["zerosize_ok"]
):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {
"year": int(iy),
"mon": int(im),
"day": int(id),
"hour": int(ihr),
"min": int(imin),
"sec": int(isec),
"fracsec": int(ifracsec),
"yday": yday,
}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith("{sec:02d}"):
str_fmt += ".{fracsec:0" + str(self.precision) + "d}"
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "iso"
subfmts = (
(
"date_hms",
"%Y-%m-%d %H:%M:%S",
# XXX To Do - use strftime for output ??
"{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}",
),
(
"date_hm",
"%Y-%m-%d %H:%M",
"{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}",
),
("date", "%Y-%m-%d", "{year:d}-{mon:02d}-{day:02d}"),
)
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000-01-12 13:14:15.678"
# 01234567890123456789012
# yyyy-mm-dd hh:mm:ss.fff
# Parsed as ('yyyy', '-mm', '-dd', ' hh', ':mm', ':ss', '.fff')
fast_parser_pars = dict(
delims=(0, ord("-"), ord("-"), ord(" "), ord(":"), ord(":"), ord(".")),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0,
)
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith("Z"):
if self.scale != "utc":
raise ValueError("Time input terminating in 'Z' must have scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "isot"
subfmts = (
(
"date_hms",
"%Y-%m-%dT%H:%M:%S",
"{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"date_hm",
"%Y-%m-%dT%H:%M",
"{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}",
),
("date", "%Y-%m-%d", "{year:d}-{mon:02d}-{day:02d}"),
)
# See TimeISO for explanation
fast_parser_pars = dict(
delims=(0, ord("-"), ord("-"), ord("T"), ord(":"), ord(":"), ord(".")),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0,
)
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "yday"
subfmts = (
(
"date_hms",
"%Y:%j:%H:%M:%S",
"{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}",
),
("date_hm", "%Y:%j:%H:%M", "{year:d}:{yday:03d}:{hour:02d}:{min:02d}"),
("date", "%Y:%j", "{year:d}:{yday:03d}"),
)
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000:123:13:14:15.678"
# 012345678901234567890
# yyyy:ddd:hh:mm:ss.fff
# Parsed as ('yyyy', ':ddd', ':hh', ':mm', ':ss', '.fff')
#
# delims: character at corresponding `starts` position (0 => no character)
# starts: position where component starts (including delimiter if present)
# stops: position where component ends (-1 => continue to end of string)
fast_parser_pars = dict(
delims=(0, 0, ord(":"), ord(":"), ord(":"), ord(":"), ord(".")),
starts=(0, -1, 4, 8, 11, 14, 17),
stops=(3, -1, 7, 10, 13, 16, -1),
# Break allowed before:
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=1,
)
class TimeDatetime64(TimeISOT):
name = "datetime64"
def _check_val_type(self, val1, val2):
if not val1.dtype.kind == "M":
if val1.size > 0:
raise TypeError(
f"Input values for {self.name} class must be datetime64 objects"
)
else:
val1 = np.array([], "datetime64[D]")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = "2000"
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ["datetime64[M]", "datetime64[Y]"]:
val1 = val1.astype("datetime64[D]")
val1 = val1.astype("S")
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype("datetime64")
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = "fits"
subfmts = (
(
"date_hms",
(
r"(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T"
r"(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)"
),
"{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"date",
r"(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)",
"{year:04d}-{mon:02d}-{day:02d}",
),
(
"longdate_hms",
(
r"(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T"
r"(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)"
),
"{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"longdate",
r"(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)",
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(
subfmt[0],
subfmt[1] + r"(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?",
subfmt[2],
)
for subfmt in subfmts
)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present."""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError(f"Time {timestr} does not match {self.name} format")
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm["scale"] is not None:
warnings.warn(
"FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning,
)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm["scale"].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(TIME_SCALES)}"
)
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError(
f"Input strings for {self.name} class must all "
"have consistent time scales."
)
return [
int(tm["year"]),
int(tm["mon"]),
int(tm["mday"]),
int(tm.get("hour", 0)),
int(tm.get("min", 0)),
float(tm.get("sec", 0.0)),
]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if "long" not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = "long" + self.out_subfmt
return super().value
class TimeEpochDate(TimeNumeric):
"""
Base class for support floating point Besselian and Julian epoch dates.
"""
_default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
value = jd_to_epoch(self.jd1, self.jd2)
return super().to_value(jd1=value, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0."""
name = "byear"
epoch_to_jd = "epb2jd"
jd_to_epoch = "epb"
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
if hasattr(val1, "to") and hasattr(val1, "unit") and val1.unit is not None:
raise ValueError(
"Cannot use Quantities for 'byear' format, as the interpretation "
"would be ambiguous. Use float with Besselian year instead."
)
# FIXME: is val2 really okay here?
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0."""
name = "jyear"
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = "epj2jd"
jd_to_epoch = "epj"
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None], op_dtypes=[val1.dtype, np.double], flags=["zerosize_ok"]
)
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError(f"Time {val} does not match {self.name} format")
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + "%." + str(self.precision) + "f"
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'."""
name = "byear_str"
epoch_to_jd = "epb2jd"
jd_to_epoch = "epb"
epoch_prefix = "B"
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'."""
name = "jyear_str"
epoch_to_jd = "epj2jd"
jd_to_epoch = "epj"
epoch_prefix = "J"
class TimeDeltaFormat(TimeFormat):
"""Base class for time delta representations."""
_registry = TIME_DELTA_FORMATS
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`.
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_DELTA_SCALES}"
)
return scale
class TimeDeltaNumeric(TimeDeltaFormat, TimeNumeric):
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1.0 / self.unit)
def to_value(self, **kwargs):
# Note that 1/unit is always exactly representable, so the
# following multiplications are exact.
factor = 1.0 / self.unit
jd1 = self.jd1 * factor
jd2 = self.jd2 * factor
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDeltaSec(TimeDeltaNumeric):
"""Time delta in SI seconds."""
name = "sec"
unit = 1.0 / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaNumeric):
"""Time delta in Julian days (86400 SI seconds)."""
name = "jd"
unit = 1.0
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta."""
name = "datetime"
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError(
f"Input values for {self.name} class must be datetime.timedelta objects"
)
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer(
[val1, None, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, np.double, np.double],
)
day = datetime.timedelta(days=1)
for val, jd1, jd2 in iterator:
jd1[...], other = divmod(val.item(), day)
jd2[...] = other / day
self.jd1, self.jd2 = day_frac(iterator.operands[-2], iterator.operands[-1])
@property
def value(self):
iterator = np.nditer(
[self.jd1, self.jd2, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, None, object],
)
for jd1, jd2, out in iterator:
jd1_, jd2_ = day_frac(jd1, jd2)
out[...] = datetime.timedelta(days=jd1_, microseconds=jd2_ * 86400 * 1e6)
return self.mask_if_needed(iterator.operands[-1])
def _validate_jd_for_storage(jd):
if isinstance(jd, (float, int)):
return np.array(jd, dtype=np.float_)
if isinstance(jd, np.generic) and (
jd.dtype.kind == "f" and jd.dtype.itemsize <= 8 or jd.dtype.kind in "iu"
):
return np.array(jd, dtype=np.float_)
elif isinstance(jd, np.ndarray) and jd.dtype.kind == "f" and jd.dtype.itemsize == 8:
return jd
else:
raise TypeError(
"JD values must be arrays (possibly zero-dimensional) "
f"of floats but we got {jd!r} of type {type(jd)}"
)
def _broadcast_writeable(jd1, jd2):
if jd1.shape == jd2.shape:
return jd1, jd2
# When using broadcast_arrays, *both* are flagged with
# warn-on-write, even the one that wasn't modified, and
# require "C" only clears the flag if it actually copied
# anything.
shape = np.broadcast(jd1, jd2).shape
if jd1.shape == shape:
s_jd1 = jd1
else:
s_jd1 = np.require(np.broadcast_to(jd1, shape), requirements=["C", "W"])
if jd2.shape == shape:
s_jd2 = jd2
else:
s_jd2 = np.require(np.broadcast_to(jd2, shape), requirements=["C", "W"])
return s_jd1, s_jd2
# Import symbols from core.py that are used in this module. This succeeds
# because __init__.py imports format.py just before core.py.
from .core import TIME_DELTA_SCALES, TIME_SCALES, ScaleValueError, Time # noqa: E402
|
a89659abb1a596f721f3ea0ee0fd770e071017ffc80a2a128659cececd822f89 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["quantity_input"]
import inspect
from collections.abc import Sequence
from functools import wraps
from numbers import Number
import numpy as np
from . import _typing as T
from .core import (
Unit,
UnitBase,
UnitsError,
add_enabled_equivalencies,
dimensionless_unscaled,
)
from .physical import PhysicalType, get_physical_type
from .quantity import Quantity
NoneType = type(None)
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
unit = get_physical_type(target)._unit
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise ValueError(f"Invalid unit or physical type {target!r}.") from None
allowed_units.append(unit)
return allowed_units
def _validate_arg_value(
param_name, func_name, arg, targets, equivalencies, strict_dimensionless=False
):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
# If dimensionless is an allowed unit and the argument is unit-less,
# allow numbers or numpy arrays with numeric dtypes
if (
dimensionless_unscaled in allowed_units
and not strict_dimensionless
and not hasattr(arg, "unit")
):
if isinstance(arg, Number):
return
elif isinstance(arg, np.ndarray) and np.issubdtype(arg.dtype, np.number):
return
for allowed_unit in allowed_units:
try:
if arg.unit.is_equivalent(allowed_unit, equivalencies=equivalencies):
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError(
f"Argument '{param_name}' to function '{func_name}'"
f" has {error_msg}. You should pass in an astropy "
"Quantity instead."
)
else:
error_msg = (
f"Argument '{param_name}' to function '{func_name}' must "
"be in units convertible to"
)
if len(targets) > 1:
targ_names = ", ".join([f"'{str(targ)}'" for targ in targets])
raise UnitsError(f"{error_msg} one of: {targ_names}.")
else:
raise UnitsError(f"{error_msg} '{str(targets[0])}'.")
def _parse_annotation(target):
if target in (None, NoneType, inspect._empty):
return target
# check if unit-like
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
ptype = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
if isinstance(target, str):
raise ValueError(f"invalid unit or physical type {target!r}.") from None
else:
return ptype
else:
return unit
# could be a type hint
origin = T.get_origin(target)
if origin is T.Union:
return [_parse_annotation(t) for t in T.get_args(target)]
elif origin is not T.Annotated: # can't be Quantity[]
return False
# parse type hint
cls, *annotations = T.get_args(target)
if not issubclass(cls, Quantity) or not annotations:
return False
# get unit from type hint
unit, *rest = annotations
if not isinstance(unit, (UnitBase, PhysicalType)):
return False
return unit
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the
decorator, or by using function annotation syntax. Arguments to the
decorator take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator or
in the annotation. If the argument has no unit attribute, i.e. it is not
a Quantity object, a `ValueError` will be raised unless the argument is
an annotation. This is to allow non Quantity annotations to pass
through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
The original function is accessible by the attributed ``__wrapped__``.
See :func:`functools.wraps` for details.
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Or using a unit-aware Quantity annotation.
.. code-block:: python
@u.quantity_input
def myfunction(myangle: u.Quantity[u.arcsec]):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, strict_dimensionless=False, **kwargs):
self.equivalencies = kwargs.pop("equivalencies", [])
self.decorator_kwargs = kwargs
self.strict_dimensionless = strict_dimensionless
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (
param.name not in bound_args.arguments
and param.default is not param.empty
):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# parses to unit if it's an annotation (or list thereof)
targets = _parse_annotation(targets)
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isinstance(targets, Sequence):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets or NoneType in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [
t
for t in valid_targets
if isinstance(t, (str, UnitBase, PhysicalType))
]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(
param.name,
wrapped_function.__name__,
arg,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
# Return
ra = wrapped_signature.return_annotation
valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn)
if ra not in valid_empty:
target = (
ra
if T.get_origin(ra) not in (T.Annotated, T.Union)
else _parse_annotation(ra)
)
if isinstance(target, str) or not isinstance(target, Sequence):
target = [target]
valid_targets = [
t for t in target if isinstance(t, (str, UnitBase, PhysicalType))
]
_validate_arg_value(
"return",
wrapped_function.__name__,
return_,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
if len(valid_targets) > 0:
return_ <<= valid_targets[0]
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
ba003f54bef2e96adbef3ee3688f0d4e6f2d56716b0c707958f325accb668e3c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Colloquially used Imperial units.
These units are available in the `astropy.units.imperial` namespace, but not in the
top-level `astropy.units` namespace, e.g.::
>>> import astropy.units as u
>>> mph = u.imperial.mile / u.hour
>>> mph
Unit("mi / h")
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> import astropy.units as u
>>> u.imperial.enable() # doctest: +SKIP
"""
from . import si
from .core import UnitBase, def_unit
_ns = globals()
###########################################################################
# LENGTH
def_unit(["inch"], 2.54 * si.cm, namespace=_ns, doc="International inch")
def_unit(["ft", "foot"], 12 * inch, namespace=_ns, doc="International foot")
def_unit(["yd", "yard"], 3 * ft, namespace=_ns, doc="International yard")
def_unit(["mi", "mile"], 5280 * ft, namespace=_ns, doc="International mile")
def_unit(["mil", "thou"], 0.001 * inch, namespace=_ns, doc="Thousandth of an inch")
def_unit(["nmi", "nauticalmile", "NM"], 1852 * si.m, namespace=_ns, doc="Nautical mile")
def_unit(["fur", "furlong"], 660 * ft, namespace=_ns, doc="Furlong")
###########################################################################
# AREAS
def_unit(["ac", "acre"], 43560 * ft**2, namespace=_ns, doc="International acre")
###########################################################################
# VOLUMES
def_unit(["gallon"], si.liter / 0.264172052, namespace=_ns, doc="U.S. liquid gallon")
def_unit(["quart"], gallon / 4, namespace=_ns, doc="U.S. liquid quart")
def_unit(["pint"], quart / 2, namespace=_ns, doc="U.S. liquid pint")
def_unit(["cup"], pint / 2, namespace=_ns, doc="U.S. customary cup")
def_unit(
["foz", "fluid_oz", "fluid_ounce"], cup / 8, namespace=_ns, doc="U.S. fluid ounce"
)
def_unit(
["tbsp", "tablespoon"], foz / 2, namespace=_ns, doc="U.S. customary tablespoon"
)
def_unit(["tsp", "teaspoon"], tbsp / 3, namespace=_ns, doc="U.S. customary teaspoon")
###########################################################################
# MASS
def_unit(
["oz", "ounce"],
28.349523125 * si.g,
namespace=_ns,
doc="International avoirdupois ounce: mass",
)
def_unit(
["lb", "lbm", "pound"],
16 * oz,
namespace=_ns,
doc="International avoirdupois pound: mass",
)
def_unit(
["st", "stone"], 14 * lb, namespace=_ns, doc="International avoirdupois stone: mass"
)
def_unit(["ton"], 2000 * lb, namespace=_ns, doc="International avoirdupois ton: mass")
def_unit(["slug"], 32.174049 * lb, namespace=_ns, doc="slug: mass")
###########################################################################
# SPEED
def_unit(
["kn", "kt", "knot", "NMPH"],
nmi / si.h,
namespace=_ns,
doc="nautical unit of speed: 1 nmi per hour",
)
###########################################################################
# FORCE
def_unit("lbf", slug * ft * si.s**-2, namespace=_ns, doc="Pound: force")
def_unit(["kip", "kilopound"], 1000 * lbf, namespace=_ns, doc="Kilopound: force")
##########################################################################
# ENERGY
def_unit(["BTU", "btu"], 1.05505585 * si.kJ, namespace=_ns, doc="British thermal unit")
def_unit(
["cal", "calorie"],
4.184 * si.J,
namespace=_ns,
doc="Thermochemical calorie: pre-SI metric unit of energy",
)
def_unit(
["kcal", "Cal", "Calorie", "kilocal", "kilocalorie"],
1000 * cal,
namespace=_ns,
doc="Calorie: colloquial definition of Calorie",
)
##########################################################################
# PRESSURE
def_unit("psi", lbf * inch**-2, namespace=_ns, doc="Pound per square inch: pressure")
###########################################################################
# POWER
# Imperial units
def_unit(
["hp", "horsepower"],
si.W / 0.00134102209,
namespace=_ns,
doc="Electrical horsepower",
)
###########################################################################
# TEMPERATURE
def_unit(
["deg_F", "Fahrenheit"],
namespace=_ns,
doc="Degrees Fahrenheit",
format={"latex": r"{}^{\circ}F", "unicode": "°F"},
)
def_unit(
["deg_R", "Rankine"],
namespace=_ns,
doc="Rankine scale: absolute scale of thermodynamic temperature",
)
###########################################################################
# CLEANUP
del UnitBase
del def_unit
###########################################################################
# DOCSTRING
if __doc__ is not None:
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable Imperial units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable Imperial
units only temporarily.
"""
# Local import to avoid cyclical import
# Local import to avoid polluting namespace
import inspect
from .core import add_enabled_units
return add_enabled_units(inspect.getmodule(enable))
|
11ca0e7dcf3403770be72dd1d20ae8202e6d3e9d05f800a4fd5bf4725941b234 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units used in the CDS format, both the units
defined in `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_ format and the `complete
set of supported units <https://vizier.u-strasbg.fr/viz-bin/Unit>`_.
This format is used by VOTable up to version 1.2.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.cds`
module::
>>> from astropy.units import cds
>>> q = 10. * cds.lyr # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import cds
>>> cds.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
"""Initialize CDS units module."""
# Local imports to avoid polluting top-level namespace
import numpy as np
from astropy import units as u
from astropy.constants import si as _si
from . import core
# The CDS format also supports power-of-2 prefixes as defined here:
# http://physics.nist.gov/cuu/Units/binary.html
prefixes = core.si_prefixes + core.binary_prefixes
# CDS only uses the short prefixes
prefixes = [(short, short, factor) for (short, long, factor) in prefixes]
# The following units are defined in alphabetical order, directly from
# here: https://vizier.u-strasbg.fr/viz-bin/Unit
mapping = [
(["A"], u.A, "Ampere"),
(["a"], u.a, "year", ["P"]),
(["a0"], _si.a0, "Bohr radius"),
(["al"], u.lyr, "Light year", ["c", "d"]),
(["lyr"], u.lyr, "Light year"),
(["alpha"], _si.alpha, "Fine structure constant"),
((["AA", "Å"], ["Angstrom", "Angstroem"]), u.AA, "Angstrom"),
(["arcmin", "arcm"], u.arcminute, "minute of arc"),
(["arcsec", "arcs"], u.arcsecond, "second of arc"),
(["atm"], _si.atm, "atmosphere"),
(["AU", "au"], u.au, "astronomical unit"),
(["bar"], u.bar, "bar"),
(["barn"], u.barn, "barn"),
(["bit"], u.bit, "bit"),
(["byte"], u.byte, "byte"),
(["C"], u.C, "Coulomb"),
(["c"], _si.c, "speed of light", ["p"]),
(["cal"], 4.1854 * u.J, "calorie"),
(["cd"], u.cd, "candela"),
(["ct"], u.ct, "count"),
(["D"], u.D, "Debye (dipole)"),
(["d"], u.d, "Julian day", ["c"]),
((["deg", "°"], ["degree"]), u.degree, "degree"),
(["dyn"], u.dyn, "dyne"),
(["e"], _si.e, "electron charge", ["m"]),
(["eps0"], _si.eps0, "electric constant"),
(["erg"], u.erg, "erg"),
(["eV"], u.eV, "electron volt"),
(["F"], u.F, "Farad"),
(["G"], _si.G, "Gravitation constant"),
(["g"], u.g, "gram"),
(["gauss"], u.G, "Gauss"),
(["geoMass", "Mgeo"], u.M_earth, "Earth mass"),
(["H"], u.H, "Henry"),
(["h"], u.h, "hour", ["p"]),
(["hr"], u.h, "hour"),
(["\\h"], _si.h, "Planck constant"),
(["Hz"], u.Hz, "Hertz"),
(["inch"], 0.0254 * u.m, "inch"),
(["J"], u.J, "Joule"),
(["JD"], u.d, "Julian day", ["M"]),
(["jovMass", "Mjup"], u.M_jup, "Jupiter mass"),
(["Jy"], u.Jy, "Jansky"),
(["K"], u.K, "Kelvin"),
(["k"], _si.k_B, "Boltzmann"),
(["l"], u.l, "litre", ["a"]),
(["lm"], u.lm, "lumen"),
(["Lsun", "solLum"], u.solLum, "solar luminosity"),
(["lx"], u.lx, "lux"),
(["m"], u.m, "meter"),
(["mag"], u.mag, "magnitude"),
(["me"], _si.m_e, "electron mass"),
(["min"], u.minute, "minute"),
(["MJD"], u.d, "Julian day"),
(["mmHg"], 133.322387415 * u.Pa, "millimeter of mercury"),
(["mol"], u.mol, "mole"),
(["mp"], _si.m_p, "proton mass"),
(["Msun", "solMass"], u.solMass, "solar mass"),
((["mu0", "µ0"], []), _si.mu0, "magnetic constant"),
(["muB"], _si.muB, "Bohr magneton"),
(["N"], u.N, "Newton"),
(["Ohm"], u.Ohm, "Ohm"),
(["Pa"], u.Pa, "Pascal"),
(["pc"], u.pc, "parsec"),
(["ph"], u.ph, "photon"),
(["pi"], u.Unit(np.pi), "π"),
(["pix"], u.pix, "pixel"),
(["ppm"], u.Unit(1e-6), "parts per million"),
(["R"], _si.R, "gas constant"),
(["rad"], u.radian, "radian"),
(["Rgeo"], _si.R_earth, "Earth equatorial radius"),
(["Rjup"], _si.R_jup, "Jupiter equatorial radius"),
(["Rsun", "solRad"], u.solRad, "solar radius"),
(["Ry"], u.Ry, "Rydberg"),
(["S"], u.S, "Siemens"),
(["s", "sec"], u.s, "second"),
(["sr"], u.sr, "steradian"),
(["Sun"], u.Sun, "solar unit"),
(["T"], u.T, "Tesla"),
(["t"], 1e3 * u.kg, "metric tonne", ["c"]),
(["u"], _si.u, "atomic mass", ["da", "a"]),
(["V"], u.V, "Volt"),
(["W"], u.W, "Watt"),
(["Wb"], u.Wb, "Weber"),
(["yr"], u.a, "year"),
]
for entry in mapping:
if len(entry) == 3:
names, unit, doc = entry
excludes = []
else:
names, unit, doc, excludes = entry
core.def_unit(
names,
unit,
prefixes=prefixes,
namespace=_ns,
doc=doc,
exclude_prefixes=excludes,
)
core.def_unit(["µas"], u.microarcsecond, doc="microsecond of arc", namespace=_ns)
core.def_unit(["mas"], u.milliarcsecond, doc="millisecond of arc", namespace=_ns)
core.def_unit(
["---", "-"],
u.dimensionless_unscaled,
doc="dimensionless and unscaled",
namespace=_ns,
)
core.def_unit(["%"], u.percent, doc="percent", namespace=_ns)
# The Vizier "standard" defines this in units of "kg s-3", but
# that may not make a whole lot of sense, so here we just define
# it as its own new disconnected unit.
core.def_unit(["Crab"], prefixes=prefixes, namespace=_ns, doc="Crab (X-ray) flux")
_initialize_module()
###########################################################################
# DOCSTRING
if __doc__ is not None:
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable CDS units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`. This will disable
all of the "default" `astropy.units` units, since there
are some namespace clashes between the two.
This may be used with the ``with`` statement to enable CDS
units only temporarily.
"""
# Local imports to avoid cyclical import and polluting namespace
import inspect
from .core import set_enabled_units
return set_enabled_units(inspect.getmodule(enable))
|
92e52d276c1cd96c9b3c9e754d48ed153785c7f09b7221c8d2c227b99a971b49 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions.
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from . import format as unit_format
from .utils import (
is_effectively_unity,
resolve_fractions,
sanitize_scale,
validate_power,
)
__all__ = [
"UnitsError",
"UnitsWarning",
"UnitConversionError",
"UnitTypeError",
"UnitBase",
"NamedUnit",
"IrreducibleUnit",
"Unit",
"CompositeUnit",
"PrefixUnit",
"UnrecognizedUnit",
"def_unit",
"get_current_unit_registry",
"set_enabled_units",
"add_enabled_units",
"set_enabled_equivalencies",
"add_enabled_equivalencies",
"set_enabled_aliases",
"add_enabled_aliases",
"dimensionless_unscaled",
"one",
]
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
if not (
funit is Unit(funit)
and (tunit is None or tunit is Unit(tunit))
and callable(a)
and callable(b)
):
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[], aliases={}):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._aliases = init._aliases.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {
k: v.copy() for k, v in init._by_physical_type.items()
}
else:
self._reset_units()
self._reset_equivalencies()
self._reset_aliases()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
self.add_enabled_aliases(aliases)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
def _reset_aliases(self):
self._aliases = {}
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if st in self._registry and unit != self._registry[st]:
raise ValueError(
f"Object with name {st!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them."
)
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
@property
def aliases(self):
return self._aliases
def set_enabled_aliases(self, aliases):
"""
Set aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
self._reset_aliases()
self.add_enabled_aliases(aliases)
def add_enabled_aliases(self, aliases):
"""
Add aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
for alias, unit in aliases.items():
if alias in self._registry and unit != self._registry[alias]:
raise ValueError(
f"{alias} already means {self._registry[alias]}, so "
f"cannot be used as an alias for {unit}."
)
if alias in self._aliases and unit != self._aliases[alias]:
raise ValueError(
f"{alias} already is an alias for {self._aliases[alias]}, so "
f"cannot be used as an alias for {unit}."
)
for alias, unit in aliases.items():
if alias not in self._registry and alias not in self._aliases:
self._aliases[alias] = unit
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +FLOAT_CMP
<Quantity -1.+1.2246468e-16j>
"""
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
def set_enabled_aliases(aliases):
"""
Set aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().set_enabled_aliases(aliases)
return context
def add_enabled_aliases(aliases):
"""
Add aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Since no aliases are enabled by default, generally it is recommended
to use `set_enabled_aliases`.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_aliases(aliases)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase:
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
_hash = None
_type_id = None
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit."""
return unit_format.Generic.to_string(self).encode("unicode_escape")
def __str__(self):
"""Return string representation for unit."""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return f'Unit("{string}")'
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
if self._type_id is None:
unit = self.decompose()
self._type_id = tuple(zip((base.name for base in unit.bases), unit.powers))
return self._type_id
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. Perhaps you meant to_string()?"
)
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. Perhaps you meant to_string()?"
)
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. Perhaps you meant to_string()?"
)
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic):
"""
Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
f = unit_format.get_format(format)
return f.to_string(self)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies, ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or None
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __truediv__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rtruediv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self ** (-1))
except TypeError:
return NotImplemented
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, unit=self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, unit=self)
except TypeError:
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __rrshift__(self, m):
warnings.warn(
">> is not implemented. Did you mean to convert "
f"to a Quantity with unit {m} using '<<'?",
AstropyWarning,
)
return NotImplemented
def __hash__(self):
if self._hash is None:
parts = (
[str(self.scale)]
+ [x.name for x in self.bases]
+ [str(x) for x in self.powers]
)
self._hash = hash(tuple(parts))
return self._hash
def __getstate__(self):
# If we get pickled, we should *not* store the memoized members since
# hashes of strings vary between sessions.
state = self.__dict__.copy()
state.pop("_hash", None)
state.pop("_type_id", None)
return state
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1.0 or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1.0 or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.0
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, str, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other = Unit(other, parse_strict="silent")
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if self._get_physical_type_id() == other._get_physical_type_id():
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other / unit).decompose([a])
return True
except Exception:
pass
elif (a._is_equivalent(unit) and b._is_equivalent(other)) or (
b._is_equivalent(unit) and a._is_equivalent(other)
):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
ratio = other.decompose() / unit.decompose()
try:
ratio_in_funit = ratio.decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.0)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string("unscaled")
physical_type = unit.physical_type
if physical_type != "unknown":
unit_str = f"'{unit_str}' ({physical_type})"
else:
unit_str = f"'{unit_str}'"
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(f"{unit_str} and {other_str} are not convertible")
def _get_converter(self, other, equivalencies=[]):
"""Get a converter for values in ``self`` to ``other``.
If no conversion is necessary, returns ``unit_scale_converter``
(which is used as a check in quantity helpers).
"""
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
if scale == 1.0:
return unit_scale_converter
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies)
)
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, "equivalencies"):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
converter = self._get_converter(tunit, equivalencies)
except Exception:
pass
else:
return lambda v: b(converter(v))
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if self_decomposed.powers == other_decomposed.powers and all(
self_base is other_base
for (self_base, other_base) in zip(
self_decomposed.bases, other_decomposed.bases
)
):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(f"'{self!r}' is not a scaled version of '{other!r}'")
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit-like
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(Unit(other), equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : `~astropy.units.CompositeUnit`
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(
self, equivalencies=[], namespace=[], max_depth=2, depth=0, cached_results=None
):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
return all(base in namespace for base in unit.bases)
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [{unit}, set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit**power
tunit_decomposed = tunit_decomposed**power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append((len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth,
depth=depth + 1,
cached_results=cached_results,
)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append((len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
f"Cannot represent unit {self} in terms of the given units"
)
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(
self, equivalencies=[], units=None, max_depth=2, include_prefix_units=None
):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of `~astropy.units.Unit`, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (
isinstance(tunit, UnitBase)
and (include_prefix_units or not isinstance(tunit, PrefixUnit))
and has_bases_in_common_with_equiv(decomposed, tunit.decompose())
):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(
self._compose(
equivalencies=equivalencies,
namespace=units,
max_depth=max_depth,
depth=0,
cached_results={},
)
)
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Physical type(s) dimensionally compatible with the unit.
Returns
-------
`~astropy.units.physical.PhysicalType`
A representation of the physical type(s) of a unit.
Examples
--------
>>> from astropy import units as u
>>> u.m.physical_type
PhysicalType('length')
>>> (u.m ** 2 / u.s).physical_type
PhysicalType({'diffusivity', 'kinematic viscosity'})
Physical types can be compared to other physical types
(recommended in packages) or to strings.
>>> area = (u.m ** 2).physical_type
>>> area == u.m.physical_type ** 2
True
>>> area == "area"
True
`~astropy.units.physical.PhysicalType` objects can be used for
dimensional analysis.
>>> number_density = u.m.physical_type ** -3
>>> velocity = (u.m / u.s).physical_type
>>> number_density * velocity
PhysicalType('particle flux')
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also pull options from.
See :ref:`astropy:unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
HEADING_NAMES = ("Primary name", "Unit definition", "Aliases")
ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant
NO_EQUIV_UNITS_MSG = "There are no equivalent units"
def __repr__(self):
if len(self) == 0:
return self.NO_EQUIV_UNITS_MSG
else:
lines = self._process_equivalent_units(self)
lines.insert(0, self.HEADING_NAMES)
widths = [0] * self.ROW_LEN
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = lines[0:1] + ["["] + [f"{x} ," for x in lines[1:]] + ["]"]
return "\n".join(lines)
def _repr_html_(self):
"""
Outputs a HTML table representation within Jupyter notebooks.
"""
if len(self) == 0:
return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>"
else:
# HTML tags to use to compose the table in HTML
blank_table = '<table style="width:50%">{}</table>'
blank_row_container = "<tr>{}</tr>"
heading_row_content = "<th>{}</th>" * self.ROW_LEN
data_row_content = "<td>{}</td>" * self.ROW_LEN
# The HTML will be rendered & the table is simple, so don't
# bother to include newlines & indentation for the HTML code.
heading_row = blank_row_container.format(
heading_row_content.format(*self.HEADING_NAMES)
)
data_rows = self._process_equivalent_units(self)
all_rows = heading_row
for row in data_rows:
html_row = blank_row_container.format(data_row_content.format(*row))
all_rows += html_row
return blank_table.format(all_rows)
@staticmethod
def _process_equivalent_units(equiv_units_data):
"""
Extract attributes, and sort, the equivalent units pre-formatting.
"""
processed_equiv_units = []
for u in equiv_units_data:
irred = u.decompose().to_string()
if irred == u.name:
irred = "irreducible"
processed_equiv_units.append((u.name, irred, ", ".join(u.aliases)))
processed_equiv_units.sort()
return processed_equiv_units
def find_equivalent_units(
self, equivalencies=[], units=None, include_prefix_units=False
):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of `~astropy.units.Unit`, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies,
units=units,
max_depth=1,
include_prefix_units=include_prefix_units,
)
results = {x.bases[0] for x in results if len(x.bases) == 1}
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError("st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return f"{names[1]} ({names[0]})"
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
f"Object with name {name!r} already exists in "
f"given namespace ({namespace[name]!r})."
)
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (
_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__getstate__(),
)
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1], _error_check=False)
raise UnitConversionError(
f"Unit {self} can not be decomposed into the requested bases"
)
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return f"UnrecognizedUnit({str(self)})"
def __bytes__(self):
return self.name.encode("ascii", "replace")
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
f"The unit {self.name!r} is unrecognized, so all arithmetic operations "
"with it are invalid."
)
__pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = _unrecognized_operator
__lt__ = __gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator
def __eq__(self, other):
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
f"The unit {self.name!r} is unrecognized. It can not be converted "
"to other units."
)
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(type):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(
self,
s="",
represents=None,
format=None,
namespace=None,
doc=None,
parse_strict="raise",
):
# Short-circuit if we're already a unit
if hasattr(s, "_get_physical_type_id"):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
represents = CompositeUnit(
represents.value * represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers,
_error_check=False,
)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(
s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers,
_error_check=False,
)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc
)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode("ascii")
try:
return f.parse(s)
except NotImplementedError:
raise
except Exception as e:
if parse_strict == "silent":
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + " "
else:
format_clause = ""
msg = (
f"'{s}' did not parse as {format_clause}unit: {str(e)} "
"If this is meant to be a custom unit, "
"define it with 'u.def_unit'. To have it "
"recognized inside a file reader or other code, "
"enable it with 'u.add_enabled_units'. "
"For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html"
)
if parse_strict == "raise":
raise ValueError(msg)
elif parse_strict == "warn":
warnings.warn(msg, UnitsWarning)
else:
raise ValueError(
"'parse_strict' must be 'warn', 'raise' or 'silent'"
)
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [], _error_check=False)
elif isinstance(s, tuple):
from .structured import StructuredUnit
return StructuredUnit(s)
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError(f"{s} can not be converted to a Unit")
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From no arguments::
Unit()
Returns the dimensionless unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: https://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None, format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc, format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self._represents))
return self._hash
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers, _error_check=False)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
_decomposed_cache = None
def __init__(
self,
scale,
bases,
powers,
decompose=False,
decompose_bases=set(),
_error_check=True,
):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError("bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
if not decompose and len(bases) == 1 and powers[0] >= 0:
# Short-cut; with one unit there's nothing to expand and gather,
# as that has happened already when creating the unit. But do only
# positive powers, since for negative powers we need to re-sort.
unit = bases[0]
power = powers[0]
if power == 1:
scale *= unit.scale
self._bases = unit.bases
self._powers = unit.powers
elif power == 0:
self._bases = []
self._powers = []
else:
scale *= unit.scale**power
self._bases = unit.bases
self._powers = [
operator.mul(*resolve_fractions(p, power)) for p in unit.powers
]
self._scale = sanitize_scale(scale)
else:
# Regular case: use inputs as preliminary scale, bases, and powers,
# then "expand and gather" identical bases, sanitize the scale, &c.
self._scale = scale
self._bases = bases
self._powers = powers
self._expand_and_gather(decompose=decompose, bases=decompose_bases)
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return f"Unit(dimensionless with a scale of {self._scale})"
else:
return "Unit(dimensionless)"
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale**p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], "name", "")))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if not isinstance(base, IrreducibleUnit) or (
len(bases) and base not in bases
):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(
self.scale, self.bases, self.powers, decompose=True, decompose_bases=bases
)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(["Q"], ["quetta"], 1e30),
(["R"], ["ronna"], 1e27),
(["Y"], ["yotta"], 1e24),
(["Z"], ["zetta"], 1e21),
(["E"], ["exa"], 1e18),
(["P"], ["peta"], 1e15),
(["T"], ["tera"], 1e12),
(["G"], ["giga"], 1e9),
(["M"], ["mega"], 1e6),
(["k"], ["kilo"], 1e3),
(["h"], ["hecto"], 1e2),
(["da"], ["deka", "deca"], 1e1),
(["d"], ["deci"], 1e-1),
(["c"], ["centi"], 1e-2),
(["m"], ["milli"], 1e-3),
(["u"], ["micro"], 1e-6),
(["n"], ["nano"], 1e-9),
(["p"], ["pico"], 1e-12),
(["f"], ["femto"], 1e-15),
(["a"], ["atto"], 1e-18),
(["z"], ["zepto"], 1e-21),
(["y"], ["yocto"], 1e-24),
(["r"], ["ronto"], 1e-27),
(["q"], ["quecto"], 1e-30),
]
binary_prefixes = [
(["Ki"], ["kibi"], 2**10),
(["Mi"], ["mebi"], 2**20),
(["Gi"], ["gibi"], 2**30),
(["Ti"], ["tebi"], 2**40),
(["Pi"], ["pebi"], 2**50),
(["Ei"], ["exbi"], 2**60),
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == "u":
format["latex"] = r"\mu " + u.get_format_name("latex")
format["unicode"] = "\N{MICRO SIGN}" + u.get_format_name("unicode")
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(
names,
CompositeUnit(factor, [u], [1], _error_check=False),
namespace=namespace,
format=format,
)
def def_unit(
s,
represents=None,
doc=None,
format=None,
prefixes=False,
exclude_prefixes=[],
namespace=None,
):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `~astropy.units.UnitBase`
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc, format=format)
else:
result = IrreducibleUnit(s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(
result, excludes=exclude_prefixes, namespace=namespace, prefixes=prefixes
)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (np.ndarray, float, int, complex, np.void)):
return value
avalue = np.array(value)
if avalue.dtype.kind not in ["i", "f", "c"]:
raise ValueError(
"Value not scalar compatible or convertible to "
"an int, float, or complex array"
)
return avalue
def unit_scale_converter(val):
"""Function that just multiplies the value by unity.
This is a separate function so it can be recognized and
discarded in unit conversion.
"""
return 1.0 * _condition_arg(val)
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
07588d32c4a59a7af72b6e0ea5d8d918212862fab7841d5565835b1780d90e3c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Miscellaneous utilities for `astropy.units`.
None of the functions in the module are meant for use outside of the
package.
"""
import io
import re
from fractions import Fraction
import numpy as np
from numpy import finfo
_float_finfo = finfo(float)
# take float here to ensure comparison with another float is fast
# give a little margin since often multiple calculations happened
_JUST_BELOW_UNITY = float(1.0 - 4.0 * _float_finfo.epsneg)
_JUST_ABOVE_UNITY = float(1.0 + 4.0 * _float_finfo.eps)
def _get_first_sentence(s):
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(r".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace("\n", " ")
def _iter_unit_summary(namespace):
"""
Generates the ``(unit, doc, represents, aliases, prefixes)``
tuple used to format the unit summary docs in `generate_unit_summary`.
"""
from . import core
# Get all of the units, and keep track of which ones have SI
# prefixes
units = []
has_prefixes = set()
for key, val in namespace.items():
# Skip non-unit items
if not isinstance(val, core.UnitBase):
continue
# Skip aliases
if key != val.name:
continue
if isinstance(val, core.PrefixUnit):
# This will return the root unit that is scaled by the prefix
# attached to it
has_prefixes.add(val._represents.bases[0].name)
else:
units.append(val)
# Sort alphabetically, case insensitive
units.sort(key=lambda x: x.name.lower())
for unit in units:
doc = _get_first_sentence(unit.__doc__).strip()
represents = ""
if isinstance(unit, core.Unit):
represents = f":math:`{unit._represents.to_string('latex')[1:-1]}`"
aliases = ", ".join(f"``{x}``" for x in unit.aliases)
yield (
unit,
doc,
represents,
aliases,
"Yes" if unit.name in has_prefixes else "No",
)
def generate_unit_summary(namespace):
"""
Generates a summary of units from a given namespace. This is used
to generate the docstring for the modules that define the actual
units.
Parameters
----------
namespace : dict
A namespace containing units.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
docstring = io.StringIO()
docstring.write(
"""
.. list-table:: Available Units
:header-rows: 1
:widths: 10 20 20 20 1
* - Unit
- Description
- Represents
- Aliases
- SI Prefixes
"""
)
template = """
* - ``{}``
- {}
- {}
- {}
- {}
"""
for unit_summary in _iter_unit_summary(namespace):
docstring.write(template.format(*unit_summary))
return docstring.getvalue()
def generate_prefixonly_unit_summary(namespace):
"""
Generates table entries for units in a namespace that are just prefixes
without the base unit. Note that this is intended to be used *after*
`generate_unit_summary` and therefore does not include the table header.
Parameters
----------
namespace : dict
A namespace containing units that are prefixes but do *not* have the
base unit in their namespace.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
from . import PrefixUnit
faux_namespace = {}
for nm, unit in namespace.items():
if isinstance(unit, PrefixUnit):
base_unit = unit.represents.bases[0]
faux_namespace[base_unit.name] = base_unit
docstring = io.StringIO()
template = """
* - Prefixes for ``{}``
- {} prefixes
- {}
- {}
- Only
"""
for unit_summary in _iter_unit_summary(faux_namespace):
docstring.write(template.format(*unit_summary))
return docstring.getvalue()
def is_effectively_unity(value):
# value is *almost* always real, except, e.g., for u.mag**0.5, when
# it will be complex. Use try/except to ensure normal case is fast
try:
return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY
except TypeError: # value is complex
return (
_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY
and _JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY
)
def sanitize_scale(scale):
if is_effectively_unity(scale):
return 1.0
# Maximum speed for regular case where scale is a float.
if scale.__class__ is float:
return scale
# We cannot have numpy scalars, since they don't autoconvert to
# complex if necessary. They are also slower.
if hasattr(scale, "dtype"):
scale = scale.item()
# All classes that scale can be (int, float, complex, Fraction)
# have an "imag" attribute.
if scale.imag:
if abs(scale.real) > abs(scale.imag):
if is_effectively_unity(scale.imag / scale.real + 1):
return scale.real
elif is_effectively_unity(scale.real / scale.imag + 1):
return complex(0.0, scale.imag)
return scale
else:
return scale.real
def maybe_simple_fraction(p, max_denominator=100):
"""Fraction very close to x with denominator at most max_denominator.
The fraction has to be such that fraction/x is unity to within 4 ulp.
If such a fraction does not exist, returns the float number.
The algorithm is that of `fractions.Fraction.limit_denominator`, but
sped up by not creating a fraction to start with.
"""
if p == 0 or p.__class__ is int:
return p
n, d = p.as_integer_ratio()
a = n // d
# Normally, start with 0,1 and 1,0; here we have applied first iteration.
n0, d0 = 1, 0
n1, d1 = a, 1
while d1 <= max_denominator:
if _JUST_BELOW_UNITY <= n1 / (d1 * p) <= _JUST_ABOVE_UNITY:
return Fraction(n1, d1)
n, d = d, n - a * d
a = n // d
n0, n1 = n1, n0 + a * n1
d0, d1 = d1, d0 + a * d1
return p
def validate_power(p):
"""Convert a power to a floating point value, an integer, or a Fraction.
If a fractional power can be represented exactly as a floating point
number, convert it to a float, to make the math much faster; otherwise,
retain it as a `fractions.Fraction` object to avoid losing precision.
Conversely, if the value is indistinguishable from a rational number with a
low-numbered denominator, convert to a Fraction object.
Parameters
----------
p : float, int, Rational, Fraction
Power to be converted
"""
denom = getattr(p, "denominator", None)
if denom is None:
try:
p = float(p)
except Exception:
if not np.isscalar(p):
raise ValueError(
"Quantities and Units may only be raised to a scalar power"
)
else:
raise
# This returns either a (simple) Fraction or the same float.
p = maybe_simple_fraction(p)
# If still a float, nothing more to be done.
if isinstance(p, float):
return p
# Otherwise, check for simplifications.
denom = p.denominator
if denom == 1:
p = p.numerator
elif (denom & (denom - 1)) == 0:
# Above is a bit-twiddling hack to see if denom is a power of two.
# If so, float does not lose precision and will speed things up.
p = float(p)
return p
def resolve_fractions(a, b):
"""
If either input is a Fraction, convert the other to a Fraction
(at least if it does not have a ridiculous denominator).
This ensures that any operation involving a Fraction will use
rational arithmetic and preserve precision.
"""
# We short-circuit on the most common cases of int and float, since
# isinstance(a, Fraction) is very slow for any non-Fraction instances.
a_is_fraction = (
a.__class__ is not int and a.__class__ is not float and isinstance(a, Fraction)
)
b_is_fraction = (
b.__class__ is not int and b.__class__ is not float and isinstance(b, Fraction)
)
if a_is_fraction and not b_is_fraction:
b = maybe_simple_fraction(b)
elif not a_is_fraction and b_is_fraction:
a = maybe_simple_fraction(a)
return a, b
def quantity_asanyarray(a, dtype=None):
from .quantity import Quantity
if (
not isinstance(a, np.ndarray)
and not np.isscalar(a)
and any(isinstance(x, Quantity) for x in a)
):
return Quantity(a, dtype=dtype)
else:
# skip over some dtype deprecation deprecation.
dtype = np.float64 if dtype is np.inexact else dtype
return np.asanyarray(a, dtype=dtype)
|
0024f1846e89b0a915b9078add64a6b4456d76c185ce51e19c90e7e75a16a439 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Support for ``typing`` py3.9+ features while min version is py3.8.
"""
from typing import *
try: # py 3.9+
from typing import Annotated
except (ImportError, ModuleNotFoundError): # optional dependency
try:
from typing_extensions import Annotated
except (ImportError, ModuleNotFoundError):
Annotated = NotImplemented
else: # override typing
from typing_extensions import *
HAS_ANNOTATED = Annotated is not NotImplemented
|
b657974d6d8bbbbc70216e32e162b98a9ade66fe2ba900981e1854b5c340dd3d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# STDLIB
import numbers
import operator
import re
import warnings
from fractions import Fraction
# THIRD PARTY
import numpy as np
# LOCAL
from astropy import config as _config
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from .core import (
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
get_current_unit_registry,
)
from .format import Base, Latex
from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit
from .quantity_helper.function_helpers import (
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .structured import StructuredUnit, _structured_unit_like_dtype
from .utils import is_effectively_unity
__all__ = [
"Quantity",
"SpecificTypeQuantity",
"QuantityInfoBase",
"QuantityInfo",
"allclose",
"isclose",
]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ["Quantity.*"]
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity.
"""
latex_array_threshold = _config.ConfigItem(
100,
"The maximum size an array Quantity can be before its LaTeX "
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
"negative number means that the value will instead be whatever numpy "
"gets from get_printoptions.",
)
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities.
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {"dtype", "unit"} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f"{val.value}"
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("value", "unit")
_construct_from_dict_args = ["value"]
_represent_as_dict_primary_data = "value"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop("shape")
dtype = attrs.pop("dtype")
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {
key: (data if key == "value" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
map["copy"] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
If `None`, the normal `numpy.dtype` introspection is used, e.g.
preventing upcasting of integers.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* if python v3.9+ : `typing.Annotated`
* if :mod:`typing_extensions` is installed : `typing_extensions.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
# LOCAL
from ._typing import HAS_ANNOTATED, Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError(
"unit annotation is not a Unit or PhysicalType"
) from None
# Allow to sort of work for python 3.8- / no typing_extensions
# instead of bailing out, return the unit for `quantity_input`
if not HAS_ANNOTATED:
warnings.warn(
"Quantity annotations are valid static type annotations only"
" if Python is v3.9+ or `typing_extensions` is installed."
)
return unit
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated.__class_getitem__((cls, unit))
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# inexact -> upcast to float dtype
float_default = dtype is np.inexact
if float_default:
dtype = None
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and isinstance(value, cls)):
value = value.view(cls)
if float_default and value.dtype.kind in "iu":
dtype = float
return np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (
r"\s*[+-]?"
r"((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|"
r"([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))"
r"([eE][+-]?\d+)?"
r"[.+-]?"
)
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError(
f'Cannot parse "{value}" as a {cls.__name__}. It does not '
"start with a number."
)
unit_string = v.string[v.end() :].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (
dtype is None
and not hasattr(value, "dtype")
and isinstance(unit, StructuredUnit)
):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
using_default_unit = False
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
using_default_unit = True
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError(
f"The unit attribute {value.unit!r} of the input could "
"not be parsed as an astropy Unit."
) from exc
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# For no-user-input unit, make sure the constructed unit matches the
# structure of the data.
if using_default_unit and value.dtype.names is not None:
unit = value_unit = _structured_unit_like_dtype(value_unit, value.dtype)
# check that array contains numbers or long int objects
if value.dtype.kind in "OSU" and not (
value.dtype.kind == "O" and isinstance(value.item(0), numbers.Number)
):
raise TypeError("The value must be a valid Python or Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if float_default and value.dtype.kind in "iuO":
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, "_quantity_class", cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, "_unit", None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if "info" in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError(
"__array_wrap__ should not be used with a context any more since all "
"use should go through array_function. Please raise an issue on "
"https://github.com/astropy/astropy"
)
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity` or `NotImplemented`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
try:
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get("out", None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs["out"] = (out_array,) if function.nout == 1 else out_array
if method == "reduce" and "initial" in kwargs and unit is not None:
# Special-case for initial argument for reductions like
# np.add.reduce. This should be converted to the output unit as
# well, which is typically the same as the input unit (but can
# in principle be different: unitless for np.equal, radian
# for np.arctan2, though those are not necessarily useful!)
kwargs["initial"] = self._to_own_unit(
kwargs["initial"], check_precision=False, unit=unit
)
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, "value", input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
except (TypeError, ValueError) as e:
out_normalized = kwargs.get("out", tuple())
inputs_and_outputs = inputs + out_normalized
ignored_ufunc = (
None,
np.ndarray.__array_ufunc__,
type(self).__array_ufunc__,
)
if not all(
getattr(type(io), "__array_ufunc__", None) in ignored_ufunc
for io in inputs_and_outputs
):
return NotImplemented
else:
raise e
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in zip(result, unit, out)
)
if out is None:
# View the result array as a Quantity with the proper unit.
return (
result if unit is None else self._new_view(result, unit, finalize=False)
)
elif isinstance(out, Quantity):
# For given Quantity output, just set the unit. We know the unit
# is not None and the output is of the correct Quantity subclass,
# as it was passed through check_output.
# (We cannot do this unconditionally, though, since it is possible
# for out to be ndarray and the unit to be dimensionless.)
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None, finalize=True):
"""Create a Quantity view of some array-like input, and set the unit.
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
finalize : bool, optional
Whether to call ``__array_finalize__`` to transfer properties from
``self`` to the new view of ``obj`` (e.g., ``info`` for all
subclasses, or ``_wrap_angle`` for `~astropy.coordinates.Latitude`).
Default: `True`, as appropriate for, e.g., unit conversions or slicing,
where the nature of the object does not change.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, "_quantity_class", Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
if finalize:
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initializer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if isinstance(self._unit, StructuredUnit) or isinstance(
unit, StructuredUnit
):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict="silent")
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
f"{self.__class__.__name__} instances require normal units, "
f"not {unit.__class__} instances."
)
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(
unit, self.view(np.ndarray), equivalencies=equivalencies
)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(
to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""",
)
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("si"))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("cgs"))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return super().__dir__()
dir_values = set(super().__dir__())
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(equivalencies):
dir_values.update(equivalent.names)
return sorted(dir_values)
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member"
)
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies
)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'"
)
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented # try other.__rlshift__(self)
try:
factor = self.unit._to(other)
except UnitConversionError: # incompatible, or requires an Equivalency
return NotImplemented
except AttributeError: # StructuredUnit does not have `_to`
# In principle, in-place might be possible.
return NotImplemented
view = self.view(np.ndarray)
try:
view *= factor # operates on view
except TypeError:
# The error is `numpy.core._exceptions._UFuncOutputCastingError`,
# which inherits from `TypeError`.
return NotImplemented
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(
">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning,
)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
"""Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), other * self.unit, finalize=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
"""
Right Multiplication between `Quantity` objects and other objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), self.unit / other, finalize=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
"""Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1.0 / self.value, other / self.unit, finalize=False)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(
self.value ** float(other), self.unit**other, finalize=False
)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value is not"
" iterable"
)
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(
self.view(np.ndarray)[key], self.unit[key], finalize=False
)
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value "
"does not support indexing"
)
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and "info" in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""This method raises ValueError, since truthiness of quantities is ambiguous,
especially for logarithmic units and temperatures. Use explicit comparisons.
"""
raise ValueError(
f"{type(self).__name__} truthiness is ambiguous, especially for logarithmic units"
" and temperatures. Use explicit comparisons."
)
def __len__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value has no len()"
)
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError(
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = " " + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string that uses
negative exponents instead of fractions
subfmt : str, optional
Subformat of the result. For the moment, only used for
``format='latex'`` and ``format='latex_inline'``. Supported
values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt
)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
formats["latex_inline"] = formats["latex"]
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f"{self.value}{self._unitstr:s}"
else:
# np.array2string properly formats arrays as well as scalars
return (
np.array2string(self.value, precision=precision, floatmode="fixed")
+ self._unitstr
)
# else, for the moment we assume format="latex" or "latex_inline".
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value, format_spec=format_spec)
def complex_formatter(value):
return "({}{}i)".format(
Latex.format_exponential_notation(value.real, format_spec=format_spec),
Latex.format_exponential_notation(
value.imag, format_spec="+" + format_spec
),
)
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(
conf.latex_array_threshold
if conf.latex_array_threshold > -1
else pops["threshold"]
),
formatter={
"float_kind": float_formatter,
"complex_kind": complex_formatter,
},
max_line_width=np.inf,
separator=",~",
)
latex_value = latex_value.replace("...", r"\dots")
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
if self.unit is None:
latex_unit = _UNIT_NOT_INITIALISED
elif format == "latex":
latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode
elif format == "latex_inline":
latex_unit = self.unit.to_string(format="latex_inline")[1:-1]
delimiter_left, delimiter_right = formats[format][subfmt]
return rf"{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}"
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
arrstr = np.array2string(
self.view(np.ndarray), separator=", ", prefix=prefixstr
)
return f"{prefixstr}{arrstr}{self._unitstr:s}>"
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format="latex", subfmt="inline")
def __format__(self, format_spec):
try:
return self.to_string(format=format_spec)
except ValueError:
# We might have a unit format not implemented in `to_string()`.
if format_spec in Base.registry:
if self.unit is dimensionless_unscaled:
return f"{self.value}"
else:
return f"{self.value} {format(self.unit, format_spec)}"
# Can the value be formatted on its own?
try:
return f"{format(self.value, format_spec)}{self._unitstr:s}"
except ValueError:
# Format the whole thing as a single string.
return format(f"{self.value}{self._unitstr:s}", format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, "scale"):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError(
"cannot make a list of Quantities. Get list of values with"
" q.value.tolist()."
)
def _to_own_unit(self, value, check_precision=True, *, unit=None):
"""Convert value to one's own unit (or that given).
Here, non-quantities are treated as dimensionless, and care is taken
for values of 0, infinity or nan, which are allowed to have any unit.
Parameters
----------
value : anything convertible to `~astropy.units.Quantity`
The value to be converted to the requested unit.
check_precision : bool
Whether to forbid conversion of float to integer if that changes
the input number. Default: `True`.
unit : `~astropy.units.Unit` or None
The unit to convert to. By default, the unit of ``self``.
Returns
-------
value : number or `~numpy.ndarray`
In the requested units.
"""
if unit is None:
unit = self.unit
try:
_value = value.to_value(unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if value is np.ma.masked or (
value is np.ma.masked_print_option and self.dtype.kind == "O"
):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if not hasattr(value, "unit") and can_have_arbitrary_unit(
as_quantity.value
):
_value = as_quantity.value
else:
raise
if self.dtype.kind == "i" and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all((self_dtype_array == _value) | np.isnan(_value)):
raise TypeError(
"cannot convert value type to array type without precision loss"
)
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
if self.dtype.names:
_value.dtype.names = self.dtype.names
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),)))
def tostring(self, order="C"):
"""Not implemented, use ``.value.tostring()`` instead."""
raise NotImplementedError(
"cannot write Quantities to string. Write array with"
" q.value.tostring(...)."
)
def tobytes(self, order="C"):
"""Not implemented, use ``.value.tobytes()`` instead."""
raise NotImplementedError(
"cannot write Quantities to bytes. Write array with q.value.tobytes(...)."
)
def tofile(self, fid, sep="", format="%s"):
"""Not implemented, use ``.value.tofile()`` instead."""
raise NotImplementedError(
"cannot write Quantities to file. Write array with q.value.tofile(...)"
)
def dump(self, file):
"""Not implemented, use ``.value.dump()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to file. Write array with q.value.dump()"
)
def dumps(self):
"""Not implemented, use ``.value.dumps()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to string. Write array with q.value.dumps()"
)
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode="raise"):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode="raise"):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode="raise"):
raise NotImplementedError(
"cannot choose based on quantity. Choose using array with"
" q.value.choose(...)"
)
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind="quicksort", order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(
np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs
) # avoid numpy 1.6 problem
if NUMPY_LT_1_22:
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
else:
def argmax(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmax(axis=axis, out=out, keepdims=keepdims)
def argmin(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmin(axis=axis, out=out, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn(
f"function '{function.__name__}' is not known to astropy's Quantity."
" Will run it anyway, hoping it will treat ndarray subclasses"
" correctly. Please raise an issue at"
" https://github.com/astropy/astropy/issues.",
AstropyWarning,
)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(
issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types
):
raise TypeError(
f"the Quantity implementation cannot handle {function} "
"with the given arguments."
) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple(
(arg.value if isinstance(arg, Quantity) else arg) for arg in args
)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs["out"] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out)
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.var,
axis,
dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
unit=self.unit**2,
)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return self._wrap_function(
np.mean, axis, dtype, out=out, keepdims=keepdims, where=where
)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, "unit", dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)"
)
def any(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)"
)
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
if NUMPY_LT_1_22:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims)
else:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(
self, axis=None, out=None, keepdims=False, *, initial=None, where=True
):
if initial is not None:
initial = self._to_own_unit(initial)
return self._wrap_function(
np.nansum,
axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'".format(
type(self).__name__, self._equivalent_unit
)
+ (
", but no unit was given."
if unit is None
else f", so cannot set it to '{unit}'."
)
)
super()._set_unit(unit)
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
f2076a069cddb78e0b7cd0d53052b861e617b26b50da5945bc10e2b94a8c7568 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""A set of standard astronomical equivalencies."""
import warnings
from collections import UserList
# THIRD-PARTY
import numpy as np
# LOCAL
from astropy.constants import si as _si
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import isiterable
from . import astrophys, cgs, dimensionless_unscaled, misc, si
from .core import Unit, UnitsError
from .function import units as function_units
__all__ = [
"parallax",
"spectral",
"spectral_density",
"doppler_radio",
"doppler_optical",
"doppler_relativistic",
"doppler_redshift",
"mass_energy",
"brightness_temperature",
"thermodynamic_temperature",
"beam_angular_area",
"dimensionless_angles",
"logarithmic",
"temperature",
"temperature_energy",
"molar_mass_amu",
"pixel_scale",
"plate_scale",
"Equivalency",
]
class Equivalency(UserList):
"""
A container for a units equivalency.
Attributes
----------
name: `str`
The name of the equivalency.
kwargs: `dict`
Any positional or keyword arguments used to make the equivalency.
"""
def __init__(self, equiv_list, name="", kwargs=None):
self.data = equiv_list
self.name = [name]
self.kwargs = [kwargs] if kwargs is not None else [dict()]
def __add__(self, other):
if isinstance(other, Equivalency):
new = super().__add__(other)
new.name = self.name[:] + other.name
new.kwargs = self.kwargs[:] + other.kwargs
return new
else:
return self.data.__add__(other)
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.name == other.name
and self.kwargs == other.kwargs
)
def dimensionless_angles():
"""Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1).
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the angle is raised,
and independent of whether it is part of a more complicated unit.
"""
return Equivalency([(si.radian, None)], "dimensionless_angles")
def logarithmic():
"""Allow logarithmic units to be converted to dimensionless fractions."""
return Equivalency(
[(dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.0**x)],
"logarithmic",
)
def parallax():
"""
Returns a list of equivalence pairs that handle the conversion
between parallax angle and distance.
"""
def parallax_converter(x):
x = np.asanyarray(x)
d = 1 / x
if isiterable(d):
d[d < 0] = np.nan
return d
else:
if d < 0:
return np.array(np.nan)
else:
return d
return Equivalency(
[(si.arcsecond, astrophys.parsec, parallax_converter)], "parallax"
)
def spectral():
"""
Returns a list of equivalence pairs that handle spectral
wavelength, wave number, frequency, and energy equivalencies.
Allows conversions between wavelength units, wave number units,
frequency units, and energy units as they relate to light.
There are two types of wave number:
* spectroscopic - :math:`1 / \\lambda` (per meter)
* angular - :math:`2 \\pi / \\lambda` (radian per meter)
"""
c = _si.c.value
h = _si.h.value
hc = h * c
two_pi = 2.0 * np.pi
inv_m_spec = si.m**-1
inv_m_ang = si.radian / si.m
return Equivalency(
[
(si.m, si.Hz, lambda x: c / x),
(si.m, si.J, lambda x: hc / x),
(si.Hz, si.J, lambda x: h * x, lambda x: x / h),
(si.m, inv_m_spec, lambda x: 1.0 / x),
(si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),
(si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),
(inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),
(si.m, inv_m_ang, lambda x: two_pi / x),
(si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),
(si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),
],
"spectral",
)
def spectral_density(wav, factor=None):
"""
Returns a list of equivalence pairs that handle spectral density
with regard to wavelength and frequency.
Parameters
----------
wav : `~astropy.units.Quantity`
`~astropy.units.Quantity` associated with values being converted
(e.g., wavelength or frequency).
Notes
-----
The ``factor`` argument is left for backward-compatibility with the syntax
``spectral_density(unit, factor)`` but users are encouraged to use
``spectral_density(factor * unit)`` instead.
"""
from .core import UnitBase
if isinstance(wav, UnitBase):
if factor is None:
raise ValueError("If `wav` is specified as a unit, `factor` should be set")
wav = factor * wav # Convert to Quantity
c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s
h_cgs = _si.h.cgs.value # erg * s
hc = c_Aps * h_cgs
# flux density
f_la = cgs.erg / si.angstrom / si.cm**2 / si.s
f_nu = cgs.erg / si.Hz / si.cm**2 / si.s
nu_f_nu = cgs.erg / si.cm**2 / si.s
la_f_la = nu_f_nu
phot_f_la = astrophys.photon / (si.cm**2 * si.s * si.AA)
phot_f_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz)
la_phot_f_la = astrophys.photon / (si.cm**2 * si.s)
# luminosity density
L_nu = cgs.erg / si.s / si.Hz
L_la = cgs.erg / si.s / si.angstrom
nu_L_nu = cgs.erg / si.s
la_L_la = nu_L_nu
phot_L_la = astrophys.photon / (si.s * si.AA)
phot_L_nu = astrophys.photon / (si.s * si.Hz)
# surface brightness (flux equiv)
S_la = cgs.erg / si.angstrom / si.cm**2 / si.s / si.sr
S_nu = cgs.erg / si.Hz / si.cm**2 / si.s / si.sr
nu_S_nu = cgs.erg / si.cm**2 / si.s / si.sr
la_S_la = nu_S_nu
phot_S_la = astrophys.photon / (si.cm**2 * si.s * si.AA * si.sr)
phot_S_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz * si.sr)
# surface brightness (luminosity equiv)
SL_nu = cgs.erg / si.s / si.Hz / si.sr
SL_la = cgs.erg / si.s / si.angstrom / si.sr
nu_SL_nu = cgs.erg / si.s / si.sr
la_SL_la = nu_SL_nu
phot_SL_la = astrophys.photon / (si.s * si.AA * si.sr)
phot_SL_nu = astrophys.photon / (si.s * si.Hz * si.sr)
def f_la_to_f_nu(x):
return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def f_la_from_f_nu(x):
return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def f_nu_to_nu_f_nu(x):
return x * wav.to_value(si.Hz, spectral())
def f_nu_from_nu_f_nu(x):
return x / wav.to_value(si.Hz, spectral())
def f_la_to_la_f_la(x):
return x * wav.to_value(si.AA, spectral())
def f_la_from_la_f_la(x):
return x / wav.to_value(si.AA, spectral())
def phot_f_la_to_f_la(x):
return hc * x / wav.to_value(si.AA, spectral())
def phot_f_la_from_f_la(x):
return x * wav.to_value(si.AA, spectral()) / hc
def phot_f_la_to_f_nu(x):
return h_cgs * x * wav.to_value(si.AA, spectral())
def phot_f_la_from_f_nu(x):
return x / (wav.to_value(si.AA, spectral()) * h_cgs)
def phot_f_la_to_phot_f_nu(x):
return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps
def phot_f_la_from_phot_f_nu(x):
return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2
phot_f_nu_to_f_nu = phot_f_la_to_f_la
phot_f_nu_from_f_nu = phot_f_la_from_f_la
def phot_f_nu_to_f_la(x):
return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3
def phot_f_nu_from_f_la(x):
return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps)
# for luminosity density
L_nu_to_nu_L_nu = f_nu_to_nu_f_nu
L_nu_from_nu_L_nu = f_nu_from_nu_f_nu
L_la_to_la_L_la = f_la_to_la_f_la
L_la_from_la_L_la = f_la_from_la_f_la
phot_L_la_to_L_la = phot_f_la_to_f_la
phot_L_la_from_L_la = phot_f_la_from_f_la
phot_L_la_to_L_nu = phot_f_la_to_f_nu
phot_L_la_from_L_nu = phot_f_la_from_f_nu
phot_L_la_to_phot_L_nu = phot_f_la_to_phot_f_nu
phot_L_la_from_phot_L_nu = phot_f_la_from_phot_f_nu
phot_L_nu_to_L_nu = phot_f_nu_to_f_nu
phot_L_nu_from_L_nu = phot_f_nu_from_f_nu
phot_L_nu_to_L_la = phot_f_nu_to_f_la
phot_L_nu_from_L_la = phot_f_nu_from_f_la
return Equivalency(
[
# flux
(f_la, f_nu, f_la_to_f_nu, f_la_from_f_nu),
(f_nu, nu_f_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu),
(f_la, la_f_la, f_la_to_la_f_la, f_la_from_la_f_la),
(phot_f_la, f_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
(phot_f_la, f_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu),
(phot_f_la, phot_f_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu),
(phot_f_nu, f_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu),
(phot_f_nu, f_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la),
# integrated flux
(la_phot_f_la, la_f_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
# luminosity
(L_la, L_nu, f_la_to_f_nu, f_la_from_f_nu),
(L_nu, nu_L_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu),
(L_la, la_L_la, L_la_to_la_L_la, L_la_from_la_L_la),
(phot_L_la, L_la, phot_L_la_to_L_la, phot_L_la_from_L_la),
(phot_L_la, L_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu),
(phot_L_la, phot_L_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu),
(phot_L_nu, L_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu),
(phot_L_nu, L_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la),
# surface brightness (flux equiv)
(S_la, S_nu, f_la_to_f_nu, f_la_from_f_nu),
(S_nu, nu_S_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu),
(S_la, la_S_la, f_la_to_la_f_la, f_la_from_la_f_la),
(phot_S_la, S_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
(phot_S_la, S_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu),
(phot_S_la, phot_S_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu),
(phot_S_nu, S_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu),
(phot_S_nu, S_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la),
# surface brightness (luminosity equiv)
(SL_la, SL_nu, f_la_to_f_nu, f_la_from_f_nu),
(SL_nu, nu_SL_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu),
(SL_la, la_SL_la, L_la_to_la_L_la, L_la_from_la_L_la),
(phot_SL_la, SL_la, phot_L_la_to_L_la, phot_L_la_from_L_la),
(phot_SL_la, SL_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu),
(phot_SL_la, phot_SL_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu),
(phot_SL_nu, SL_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu),
(phot_SL_nu, SL_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la),
],
"spectral_density",
{"wav": wav, "factor": factor},
)
def doppler_radio(rest):
r"""
Return the equivalency pairs for the radio convention for velocity.
The radio convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> radio_CO_equiv = u.doppler_radio(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv)
>>> radio_velocity # doctest: +FLOAT_CMP
<Quantity -31.209092088877583 km / s>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value("km/s")
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return (restfreq - x) / (restfreq) * ckms
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x / ckms
return restfreq * (1 - voverc)
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return (x - restwav) / (x) * ckms
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return restwav * ckms / (ckms - x)
def to_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
return (resten - x) / (resten) * ckms
def from_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
voverc = x / ckms
return resten * (1 - voverc)
return Equivalency(
[
(si.Hz, si.km / si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km / si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km / si.s, to_vel_en, from_vel_en),
],
"doppler_radio",
{"rest": rest},
)
def doppler_optical(rest):
r"""
Return the equivalency pairs for the optical convention for velocity.
The optical convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> optical_CO_equiv = u.doppler_optical(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv)
>>> optical_velocity # doctest: +FLOAT_CMP
<Quantity -31.20584348799674 km / s>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value("km/s")
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return ckms * (restfreq - x) / x
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x / ckms
return restfreq / (1 + voverc)
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return ckms * (x / restwav - 1)
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
voverc = x / ckms
return restwav * (1 + voverc)
def to_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
return ckms * (resten - x) / x
def from_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
voverc = x / ckms
return resten / (1 + voverc)
return Equivalency(
[
(si.Hz, si.km / si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km / si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km / si.s, to_vel_en, from_vel_en),
],
"doppler_optical",
{"rest": rest},
)
def doppler_relativistic(rest):
r"""
Return the equivalency pairs for the relativistic convention for velocity.
The full relativistic convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv)
>>> relativistic_velocity # doctest: +FLOAT_CMP
<Quantity -31.207467619351537 km / s>
>>> measured_velocity = 1250 * u.km/u.s
>>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv)
>>> relativistic_frequency # doctest: +FLOAT_CMP
<Quantity 114.79156866993588 GHz>
>>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv)
>>> relativistic_wavelength # doctest: +FLOAT_CMP
<Quantity 2.6116243681798923 mm>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value("km/s")
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return (restfreq**2 - x**2) / (restfreq**2 + x**2) * ckms
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x / ckms
return restfreq * ((1 - voverc) / (1 + (voverc))) ** 0.5
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return (x**2 - restwav**2) / (restwav**2 + x**2) * ckms
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
voverc = x / ckms
return restwav * ((1 + voverc) / (1 - voverc)) ** 0.5
def to_vel_en(x):
resten = rest.to_value(si.eV, spectral())
return (resten**2 - x**2) / (resten**2 + x**2) * ckms
def from_vel_en(x):
resten = rest.to_value(si.eV, spectral())
voverc = x / ckms
return resten * ((1 - voverc) / (1 + (voverc))) ** 0.5
return Equivalency(
[
(si.Hz, si.km / si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km / si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km / si.s, to_vel_en, from_vel_en),
],
"doppler_relativistic",
{"rest": rest},
)
def doppler_redshift():
"""
Returns the equivalence between Doppler redshift (unitless) and radial velocity.
.. note::
This equivalency is not compatible with cosmological
redshift in `astropy.cosmology.units`.
"""
rv_unit = si.km / si.s
C_KMS = _si.c.to_value(rv_unit)
def convert_z_to_rv(z):
zponesq = (1 + z) ** 2
return C_KMS * (zponesq - 1) / (zponesq + 1)
def convert_rv_to_z(rv):
beta = rv / C_KMS
return np.sqrt((1 + beta) / (1 - beta)) - 1
return Equivalency(
[(dimensionless_unscaled, rv_unit, convert_z_to_rv, convert_rv_to_z)],
"doppler_redshift",
)
def molar_mass_amu():
"""
Returns the equivalence between amu and molar mass.
"""
return Equivalency([(si.g / si.mol, misc.u)], "molar_mass_amu")
def mass_energy():
"""
Returns a list of equivalence pairs that handle the conversion
between mass and energy.
"""
c2 = _si.c.value**2
return Equivalency(
[
(si.kg, si.J, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.m**2, si.J / si.m**2, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.m**3, si.J / si.m**3, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.s, si.J / si.s, lambda x: x * c2, lambda x: x / c2),
],
"mass_energy",
)
def brightness_temperature(frequency, beam_area=None):
r"""
Defines the conversion between Jy/sr and "brightness temperature",
:math:`T_B`, in Kelvins. The brightness temperature is a unit very
commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy"
(Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google
books
<https://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__).
:math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)`
If the input is in Jy/beam or Jy (assuming it came from a single beam), the
beam area is essential for this computation: the brightness temperature is
inversely proportional to the beam area.
Parameters
----------
frequency : `~astropy.units.Quantity`
The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). The variable is named 'frequency' because it
is more commonly used in radio astronomy.
BACKWARD COMPATIBILITY NOTE: previous versions of the brightness
temperature equivalency used the keyword ``disp``, which is no longer
supported.
beam_area : `~astropy.units.Quantity` ['solid angle']
Beam area in angular units, i.e. steradian equivalent
Examples
--------
Arecibo C-band beam::
>>> import numpy as np
>>> from astropy import units as u
>>> beam_sigma = 50*u.arcsec
>>> beam_area = 2*np.pi*(beam_sigma)**2
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 3.526295144567176 K>
VLA synthetic beam::
>>> bmaj = 15*u.arcsec
>>> bmin = 15*u.arcsec
>>> fwhm_to_sigma = 1./(8*np.log(2))**0.5
>>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2)
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 217.2658703625732 K>
Any generic surface brightness:
>>> surf_brightness = 1e6*u.MJy/u.sr
>>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP
<Quantity 130.1931904778803 K>
"""
if frequency.unit.is_equivalent(si.sr):
if not beam_area.unit.is_equivalent(si.Hz):
raise ValueError(
"The inputs to `brightness_temperature` are frequency and angular area."
)
warnings.warn(
"The inputs to `brightness_temperature` have changed. "
"Frequency is now the first input, and angular area "
"is the second, optional input.",
AstropyDeprecationWarning,
)
frequency, beam_area = beam_area, frequency
nu = frequency.to(si.GHz, spectral())
factor_Jy = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value
factor_K = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value
if beam_area is not None:
beam = beam_area.to_value(si.sr)
def convert_Jy_to_K(x_jybm):
return x_jybm / beam / factor_Jy
def convert_K_to_Jy(x_K):
return x_K * beam / factor_K
return Equivalency(
[
(astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy),
(astrophys.Jy / astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy),
],
"brightness_temperature",
{"frequency": frequency, "beam_area": beam_area},
)
else:
def convert_JySr_to_K(x_jysr):
return x_jysr / factor_Jy
def convert_K_to_JySr(x_K):
return x_K / factor_K # multiplied by 1x for 1 steradian
return Equivalency(
[(astrophys.Jy / si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)],
"brightness_temperature",
{"frequency": frequency, "beam_area": beam_area},
)
def beam_angular_area(beam_area):
"""
Convert between the ``beam`` unit, which is commonly used to express the area
of a radio telescope resolution element, and an area on the sky.
This equivalency also supports direct conversion between ``Jy/beam`` and
``Jy/steradian`` units, since that is a common operation.
Parameters
----------
beam_area : unit-like
The area of the beam in angular area units (e.g., steradians)
Must have angular area equivalent units.
"""
return Equivalency(
[
(astrophys.beam, Unit(beam_area)),
(astrophys.beam**-1, Unit(beam_area) ** -1),
(astrophys.Jy / astrophys.beam, astrophys.Jy / Unit(beam_area)),
],
"beam_angular_area",
{"beam_area": beam_area},
)
def thermodynamic_temperature(frequency, T_cmb=None):
r"""Defines the conversion between Jy/sr and "thermodynamic temperature",
:math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very
commonly used in cosmology. See eqn 8 in [1].
:math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)`
with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}`
where :math:`x = h \nu / k T`
Parameters
----------
frequency : `~astropy.units.Quantity`
The observed `spectral` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). Must have spectral units.
T_cmb : `~astropy.units.Quantity` ['temperature'] or None
The CMB temperature at z=0. If `None`, the default cosmology will be
used to get this temperature. Must have units of temperature.
Notes
-----
For broad band receivers, this conversion do not hold
as it highly depends on the frequency
References
----------
.. [1] Planck 2013 results. IX. HFI spectral response
https://arxiv.org/abs/1303.5070
Examples
--------
Planck HFI 143 GHz::
>>> from astropy import units as u
>>> from astropy.cosmology import Planck15
>>> freq = 143 * u.GHz
>>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0)
>>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 0.37993172 MJy / sr>
"""
nu = frequency.to(si.GHz, spectral())
if T_cmb is None:
from astropy.cosmology import default_cosmology
T_cmb = default_cosmology.get().Tcmb0
def f(nu, T_cmb=T_cmb):
x = _si.h * nu / _si.k_B / T_cmb
return x**2 * np.exp(x) / np.expm1(x) ** 2
def convert_Jy_to_K(x_jybm):
factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(
astrophys.Jy
)
return x_jybm / factor
def convert_K_to_Jy(x_K):
factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(
si.K
)
return x_K / factor
return Equivalency(
[(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],
"thermodynamic_temperature",
{"frequency": frequency, "T_cmb": T_cmb},
)
def temperature():
"""Convert between Kelvin, Celsius, Rankine and Fahrenheit here because
Unit and CompositeUnit cannot do addition or subtraction properly.
"""
from .imperial import deg_F as F
from .imperial import deg_R as R
K = si.K
C = si.deg_C
return Equivalency(
[
(K, C, lambda x: x - 273.15, lambda x: x + 273.15),
(C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),
(K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),
(R, F, lambda x: x - 459.67, lambda x: x + 459.67),
(R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),
(R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),
],
"temperature",
)
def temperature_energy():
"""Convert between Kelvin and keV(eV) to an equivalent amount."""
e = _si.e.value
k_B = _si.k_B.value
return Equivalency(
[(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],
"temperature_energy",
)
def assert_is_spectral_unit(value):
try:
value.to(si.Hz, spectral())
except (AttributeError, UnitsError) as ex:
raise UnitsError(
"The 'rest' value must be a spectral equivalent "
"(frequency, wavelength, or energy)."
)
def pixel_scale(pixscale):
"""
Convert between pixel distances (in units of ``pix``) and other units,
given a particular ``pixscale``.
Parameters
----------
pixscale : `~astropy.units.Quantity`
The pixel scale either in units of <unit>/pixel or pixel/<unit>.
"""
decomposed = pixscale.unit.decompose()
dimensions = dict(zip(decomposed.bases, decomposed.powers))
pix_power = dimensions.get(misc.pix, 0)
if pix_power == -1:
physical_unit = Unit(pixscale * misc.pix)
elif pix_power == 1:
physical_unit = Unit(misc.pix / pixscale)
else:
raise UnitsError(
"The pixel scale unit must have pixel dimensionality of 1 or -1."
)
return Equivalency(
[(misc.pix, physical_unit)], "pixel_scale", {"pixscale": pixscale}
)
def plate_scale(platescale):
"""
Convert between lengths (to be interpreted as lengths in the focal plane)
and angular units with a specified ``platescale``.
Parameters
----------
platescale : `~astropy.units.Quantity`
The pixel scale either in units of distance/pixel or distance/angle.
"""
if platescale.unit.is_equivalent(si.arcsec / si.m):
platescale_val = platescale.to_value(si.radian / si.m)
elif platescale.unit.is_equivalent(si.m / si.arcsec):
platescale_val = (1 / platescale).to_value(si.radian / si.m)
else:
raise UnitsError("The pixel scale must be in angle/distance or distance/angle")
return Equivalency(
[(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)],
"plate_scale",
{"platescale": platescale},
)
# -------------------------------------------------------------------------
def __getattr__(attr):
if attr == "with_H0":
import warnings
from astropy.cosmology.units import with_H0
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"`with_H0` is deprecated from `astropy.units.equivalencies` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.with_H0` instead.",
AstropyDeprecationWarning,
)
return with_H0
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
d9568adcc9cbb67de4009167fef4821983fc7aa71c7fbbab1e225bc2107e0292 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for retrieving solar system
ephemerides from jplephem.
"""
import os.path
import re
from urllib.parse import urlparse
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.utils import indent
from astropy.utils.data import download_file
from astropy.utils.decorators import classproperty, deprecated
from astropy.utils.state import ScienceState
from .builtin_frames import GCRS, ICRS, ITRS, TETE
from .builtin_frames.utils import get_jd12
from .representation import CartesianDifferential, CartesianRepresentation
from .sky_coordinate import SkyCoord
__all__ = [
"get_body",
"get_moon",
"get_body_barycentric",
"get_body_barycentric_posvel",
"solar_system_ephemeris",
]
DEFAULT_JPL_EPHEMERIS = "de430"
"""List of kernel pairs needed to calculate positions of a given object."""
BODY_NAME_TO_KERNEL_SPEC = {
"sun": [(0, 10)],
"mercury": [(0, 1), (1, 199)],
"venus": [(0, 2), (2, 299)],
"earth-moon-barycenter": [(0, 3)],
"earth": [(0, 3), (3, 399)],
"moon": [(0, 3), (3, 301)],
"mars": [(0, 4)],
"jupiter": [(0, 5)],
"saturn": [(0, 6)],
"uranus": [(0, 7)],
"neptune": [(0, 8)],
"pluto": [(0, 9)],
}
"""Indices to the plan94 routine for the given object."""
PLAN94_BODY_NAME_TO_PLANET_INDEX = {
"mercury": 1,
"venus": 2,
"earth-moon-barycenter": 3,
"mars": 4,
"jupiter": 5,
"saturn": 6,
"uranus": 7,
"neptune": 8,
}
_EPHEMERIS_NOTE = """
You can either give an explicit ephemeris or use a default, which is normally
a built-in ephemeris that does not require ephemeris files. To change
the default to be the JPL ephemeris::
>>> from astropy.coordinates import solar_system_ephemeris
>>> solar_system_ephemeris.set('jpl') # doctest: +SKIP
Use of any JPL ephemeris requires the jplephem package
(https://pypi.org/project/jplephem/).
If needed, the ephemeris file will be downloaded (and cached).
One can check which bodies are covered by a given ephemeris using::
>>> solar_system_ephemeris.bodies
('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune')
"""[
1:-1
]
class solar_system_ephemeris(ScienceState):
"""Default ephemerides for calculating positions of Solar-System bodies.
This can be one of the following:
- 'builtin': polynomial approximations to the orbital elements.
- 'dexxx[s]', for a JPL dynamical model, where xxx is the three digit
version number (e.g. de430), and the 's' is optional to specify the
'small' version of a kernel. The version number must correspond to an
ephemeris file available at:
https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/
- 'jpl': Alias for the default JPL ephemeris (currently, 'de430').
- URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format.
- PATH: (str) File path to a SPK ephemeris in SPICE binary (.bsp) format.
- `None`: Ensure an Exception is raised without an explicit ephemeris.
The default is 'builtin', which uses the ``epv00`` and ``plan94``
routines from the ``erfa`` implementation of the Standards Of Fundamental
Astronomy library.
Notes
-----
Any file required will be downloaded (and cached) when the state is set.
The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is
~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is
~10MB, and covers years 1950-2050 [2]_ (and similarly for the newer de440
and de440s). Older versions of the JPL ephemerides (such as the widely
used de200) can be used via their URL [3]_.
.. [1] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt
.. [2] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt
.. [3] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/
"""
_value = "builtin"
_kernel = None
@classmethod
def validate(cls, value):
# make no changes if value is None
if value is None:
return cls._value
# Set up Kernel; if the file is not in cache, this will download it.
cls.get_kernel(value)
return value
@classmethod
def get_kernel(cls, value):
# ScienceState only ensures the `_value` attribute is up to date,
# so we need to be sure any kernel returned is consistent.
if cls._kernel is None or cls._kernel.origin != value:
if cls._kernel is not None:
cls._kernel.daf.file.close()
cls._kernel = None
kernel = _get_kernel(value)
if kernel is not None:
kernel.origin = value
cls._kernel = kernel
return cls._kernel
@classproperty
def kernel(cls):
return cls.get_kernel(cls._value)
@classproperty
def bodies(cls):
if cls._value is None:
return None
if cls._value.lower() == "builtin":
return ("earth", "sun", "moon") + tuple(
PLAN94_BODY_NAME_TO_PLANET_INDEX.keys()
)
else:
return tuple(BODY_NAME_TO_KERNEL_SPEC.keys())
def _get_kernel(value):
"""
Try importing jplephem, download/retrieve from cache the Satellite Planet
Kernel corresponding to the given ephemeris.
"""
if value is None or value.lower() == "builtin":
return None
try:
from jplephem.spk import SPK
except ImportError:
raise ImportError(
"Solar system JPL ephemeris calculations require the jplephem package "
"(https://pypi.org/project/jplephem/)"
)
if value.lower() == "jpl":
# Get the default JPL ephemeris URL
value = DEFAULT_JPL_EPHEMERIS
if re.compile(r"de[0-9][0-9][0-9]s?").match(value.lower()):
value = (
"https://naif.jpl.nasa.gov/pub/naif/generic_kernels"
f"/spk/planets/{value.lower():s}.bsp"
)
elif os.path.isfile(value):
return SPK.open(value)
else:
try:
urlparse(value)
except Exception:
raise ValueError(
f"{value} was not one of the standard strings and "
"could not be parsed as a file path or URL"
)
return SPK.open(download_file(value, cache=True))
def _get_body_barycentric_posvel(body, time, ephemeris=None, get_velocity=True):
"""Calculate the barycentric position (and velocity) of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
get_velocity : bool, optional
Whether or not to calculate the velocity as well as the position.
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation` or tuple
Barycentric (ICRS) position or tuple of position and velocity.
Notes
-----
Whether or not velocities are calculated makes little difference for the
built-in ephemerides, but for most JPL ephemeris files, the execution time
roughly doubles.
"""
# If the ephemeris is to be taken from solar_system_ephemeris, or the one
# it already contains, use the kernel there. Otherwise, open the ephemeris,
# possibly downloading it, but make sure the file is closed at the end.
default_kernel = ephemeris is None or ephemeris is solar_system_ephemeris._value
kernel = None
try:
if default_kernel:
if solar_system_ephemeris.get() is None:
raise ValueError(_EPHEMERIS_NOTE)
kernel = solar_system_ephemeris.kernel
else:
kernel = _get_kernel(ephemeris)
jd1, jd2 = get_jd12(time, "tdb")
if kernel is None:
body = body.lower()
earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2)
if body == "earth":
body_pv_bary = earth_pv_bary
elif body == "moon":
# The moon98 documentation notes that it takes TT, but that TDB leads
# to errors smaller than the uncertainties in the algorithm.
# moon98 returns the astrometric position relative to the Earth.
moon_pv_geo = erfa.moon98(jd1, jd2)
body_pv_bary = erfa.pvppv(moon_pv_geo, earth_pv_bary)
else:
sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio)
if body == "sun":
body_pv_bary = sun_pv_bary
else:
try:
body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body]
except KeyError:
raise KeyError(
f"{body}'s position and velocity cannot be "
f"calculated with the '{ephemeris}' ephemeris."
)
body_pv_helio = erfa.plan94(jd1, jd2, body_index)
body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary)
body_pos_bary = CartesianRepresentation(
body_pv_bary["p"], unit=u.au, xyz_axis=-1, copy=False
)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_pv_bary["v"], unit=u.au / u.day, xyz_axis=-1, copy=False
)
else:
if isinstance(body, str):
# Look up kernel chain for JPL ephemeris, based on name
try:
kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()]
except KeyError:
raise KeyError(
f"{body}'s position cannot be calculated with "
f"the {ephemeris} ephemeris."
)
else:
# otherwise, assume the user knows what their doing and intentionally
# passed in a kernel chain
kernel_spec = body
# jplephem cannot handle multi-D arrays, so convert to 1D here.
jd1_shape = getattr(jd1, "shape", ())
if len(jd1_shape) > 1:
jd1, jd2 = jd1.ravel(), jd2.ravel()
# Note that we use the new jd1.shape here to create a 1D result array.
# It is reshaped below.
body_posvel_bary = np.zeros(
(2 if get_velocity else 1, 3) + getattr(jd1, "shape", ())
)
for pair in kernel_spec:
spk = kernel[pair]
if spk.data_type == 3:
# Type 3 kernels contain both position and velocity.
posvel = spk.compute(jd1, jd2)
if get_velocity:
body_posvel_bary += posvel.reshape(body_posvel_bary.shape)
else:
body_posvel_bary[0] += posvel[:4]
else:
# spk.generate first yields the position and then the
# derivative. If no velocities are desired, body_posvel_bary
# has only one element and thus the loop ends after a single
# iteration, avoiding the velocity calculation.
for body_p_or_v, p_or_v in zip(
body_posvel_bary, spk.generate(jd1, jd2)
):
body_p_or_v += p_or_v
body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape
body_pos_bary = CartesianRepresentation(
body_posvel_bary[0], unit=u.km, copy=False
)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_posvel_bary[1], unit=u.km / u.day, copy=False
)
return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary
finally:
if not default_kernel and kernel is not None:
kernel.daf.file.close()
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See Also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
{_EPHEMERIS_NOTE}
"""
return _get_body_barycentric_posvel(body, time, ephemeris)
def get_body_barycentric(body, time, ephemeris=None):
"""Calculate the barycentric position of a solar system body.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) position of the body in cartesian coordinates
See Also
--------
get_body_barycentric_posvel : to calculate both position and velocity.
Notes
-----
{_EPHEMERIS_NOTE}
"""
return _get_body_barycentric_posvel(body, time, ephemeris, get_velocity=False)
def _get_apparent_body_position(body, time, ephemeris, obsgeoloc=None):
"""Calculate the apparent position of body ``body`` relative to Earth.
This corrects for the light-travel time to the object.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``~astropy.coordinates.solar_system_ephemeris.set``
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, optional
The GCRS position of the observer
Returns
-------
cartesian_position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) apparent position of the body in cartesian coordinates
Notes
-----
{_EPHEMERIS_NOTE}
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
# Calculate position given approximate light travel time.
delta_light_travel_time = 20.0 * u.s
emitted_time = time
light_travel_time = 0.0 * u.s
earth_loc = get_body_barycentric("earth", time, ephemeris)
if obsgeoloc is not None:
earth_loc += obsgeoloc
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8 * u.s):
body_loc = get_body_barycentric(body, emitted_time, ephemeris)
earth_distance = (body_loc - earth_loc).norm()
delta_light_travel_time = light_travel_time - earth_distance / speed_of_light
light_travel_time = earth_distance / speed_of_light
emitted_time = time - light_travel_time
return get_body_barycentric(body, emitted_time, ephemeris)
def get_body(body, time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
location : `~astropy.coordinates.EarthLocation`, optional
Location of observer on the Earth. If not given, will be taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the body
Notes
-----
The coordinate returned is the apparent position, which is the position of
the body at time *t* minus the light travel time from the *body* to the
observing *location*.
{_EPHEMERIS_NOTE}
"""
if location is None:
location = time.location
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(time)
else:
obsgeoloc, obsgeovel = None, None
cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc)
icrs = ICRS(cartrep)
gcrs = icrs.transform_to(
GCRS(obstime=time, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
)
return SkyCoord(gcrs)
def get_moon(time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
time : `~astropy.time.Time`
Time of observation
location : `~astropy.coordinates.EarthLocation`
Location of observer on the Earth. If none is supplied, taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the Moon
Notes
-----
The coordinate returned is the apparent position, which is the position of
the moon at time *t* minus the light travel time from the moon to the
observing *location*.
{_EPHEMERIS_NOTE}
"""
return get_body("moon", time, location=location, ephemeris=ephemeris)
# Add note about the ephemeris choices to the docstrings of relevant functions.
# Note: sadly, one cannot use f-strings for docstrings, so we format explicitly.
for f in [
f
for f in locals().values()
if callable(f) and f.__doc__ is not None and "{_EPHEMERIS_NOTE}" in f.__doc__
]:
f.__doc__ = f.__doc__.format(_EPHEMERIS_NOTE=indent(_EPHEMERIS_NOTE)[4:])
deprecation_msg = """
The use of _apparent_position_in_true_coordinates is deprecated because
astropy now implements a True Equator True Equinox Frame (TETE), which
should be used instead.
"""
@deprecated("4.2", deprecation_msg)
def _apparent_position_in_true_coordinates(skycoord):
"""
Convert Skycoord in GCRS frame into one in which RA and Dec
are defined w.r.t to the true equinox and poles of the Earth.
"""
location = getattr(skycoord, "location", None)
if location is None:
gcrs_rep = skycoord.obsgeoloc.with_differentials(
{"s": CartesianDifferential.from_cartesian(skycoord.obsgeovel)}
)
location = (
GCRS(gcrs_rep, obstime=skycoord.obstime)
.transform_to(ITRS(obstime=skycoord.obstime))
.earth_location
)
tete_frame = TETE(obstime=skycoord.obstime, location=location)
return skycoord.transform_to(tete_frame)
|
dd9283fa054b5d60fb89c3db36577a441e860c825a7880a214e870928a5eac3d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
This module contains formatting functions that are for internal use in
astropy.coordinates.angles. Mainly they are conversions from one format
of data to another.
"""
import threading
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils import format_exception, parsing
from astropy.utils.decorators import deprecated
from .errors import (
IllegalHourError,
IllegalHourWarning,
IllegalMinuteError,
IllegalMinuteWarning,
IllegalSecondError,
IllegalSecondWarning,
)
class _AngleParser:
"""
Parses the various angle formats including:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
* 1°2′3″N
This class should not be used directly. Use `parse_angle`
instead.
"""
# For safe multi-threaded operation all class (but not instance)
# members that carry state should be thread-local. They are stored
# in the following class member
_thread_local = threading.local()
def __init__(self):
# TODO: in principle, the parser should be invalidated if we change unit
# system (from CDS to FITS, say). Might want to keep a link to the
# unit_registry used, and regenerate the parser/lexer if it changes.
# Alternatively, perhaps one should not worry at all and just pre-
# generate the parser for each release (as done for unit formats).
# For some discussion of this problem, see
# https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
if "_parser" not in _AngleParser._thread_local.__dict__:
(
_AngleParser._thread_local._parser,
_AngleParser._thread_local._lexer,
) = self._make_parser()
@classmethod
def _get_simple_unit_names(cls):
simple_units = set(u.radian.find_equivalent_units(include_prefix_units=True))
simple_unit_names = set()
# We filter out degree and hourangle, since those are treated
# separately.
for unit in simple_units:
if unit != u.deg and unit != u.hourangle:
simple_unit_names.update(unit.names)
return sorted(simple_unit_names)
@classmethod
def _make_parser(cls):
# List of token names.
tokens = (
"SIGN",
"UINT",
"UFLOAT",
"COLON",
"DEGREE",
"HOUR",
"MINUTE",
"SECOND",
"SIMPLE_UNIT",
"EASTWEST",
"NORTHSOUTH",
)
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?"
# The above includes Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
t.value = float(t.value.replace("−", "-"))
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+−-]"
# The above include Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
if t.value == "+":
t.value = 1.0
else:
t.value = -1.0
return t
def t_EASTWEST(t):
r"[EW]$"
t.value = -1.0 if t.value == "W" else 1.0
return t
def t_NORTHSOUTH(t):
r"[NS]$"
# We cannot use lower-case letters otherwise we'll confuse
# s[outh] with s[econd]
t.value = -1.0 if t.value == "S" else 1.0
return t
def t_SIMPLE_UNIT(t):
t.value = u.Unit(t.value)
return t
t_SIMPLE_UNIT.__doc__ = "|".join(
f"(?:{x})" for x in cls._get_simple_unit_names()
)
t_COLON = ":"
t_DEGREE = r"d(eg(ree(s)?)?)?|°"
t_HOUR = r"hour(s)?|h(r)?|ʰ"
t_MINUTE = r"m(in(ute(s)?)?)?|′|\'|ᵐ"
t_SECOND = r"s(ec(ond(s)?)?)?|″|\"|ˢ"
# A string containing ignored characters (spaces)
t_ignore = " "
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
lexer = parsing.lex(lextab="angle_lextab", package="astropy/coordinates")
def p_angle(p):
"""
angle : sign hms eastwest
| sign dms dir
| sign arcsecond dir
| sign arcminute dir
| sign simple dir
"""
sign = p[1] * p[3]
value, unit = p[2]
if isinstance(value, tuple):
p[0] = ((sign * value[0],) + value[1:], unit)
else:
p[0] = (sign * value, unit)
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_eastwest(p):
"""
eastwest : EASTWEST
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_dir(p):
"""
dir : EASTWEST
| NORTHSOUTH
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_ufloat(p):
"""
ufloat : UFLOAT
| UINT
"""
p[0] = p[1]
def p_colon(p):
"""
colon : UINT COLON ufloat
| UINT COLON UINT COLON ufloat
"""
if len(p) == 4:
p[0] = (p[1], p[3])
elif len(p) == 6:
p[0] = (p[1], p[3], p[5])
def p_spaced(p):
"""
spaced : UINT ufloat
| UINT UINT ufloat
"""
if len(p) == 3:
p[0] = (p[1], p[2])
elif len(p) == 4:
p[0] = (p[1], p[2], p[3])
def p_generic(p):
"""
generic : colon
| spaced
| ufloat
"""
p[0] = p[1]
def p_hms(p):
"""
hms : UINT HOUR
| UINT HOUR ufloat
| UINT HOUR UINT MINUTE
| UINT HOUR UFLOAT MINUTE
| UINT HOUR UINT MINUTE ufloat
| UINT HOUR UINT MINUTE ufloat SECOND
| generic HOUR
"""
if len(p) == 3:
p[0] = (p[1], u.hourangle)
elif len(p) in (4, 5):
p[0] = ((p[1], p[3]), u.hourangle)
elif len(p) in (6, 7):
p[0] = ((p[1], p[3], p[5]), u.hourangle)
def p_dms(p):
"""
dms : UINT DEGREE
| UINT DEGREE ufloat
| UINT DEGREE UINT MINUTE
| UINT DEGREE UFLOAT MINUTE
| UINT DEGREE UINT MINUTE ufloat
| UINT DEGREE UINT MINUTE ufloat SECOND
| generic DEGREE
"""
if len(p) == 3:
p[0] = (p[1], u.degree)
elif len(p) in (4, 5):
p[0] = ((p[1], p[3]), u.degree)
elif len(p) in (6, 7):
p[0] = ((p[1], p[3], p[5]), u.degree)
def p_simple(p):
"""
simple : generic
| generic SIMPLE_UNIT
"""
if len(p) == 2:
p[0] = (p[1], None)
else:
p[0] = (p[1], p[2])
def p_arcsecond(p):
"""
arcsecond : generic SECOND
"""
p[0] = (p[1], u.arcsecond)
def p_arcminute(p):
"""
arcminute : generic MINUTE
"""
p[0] = (p[1], u.arcminute)
def p_error(p):
raise ValueError
parser = parsing.yacc(tabmodule="angle_parsetab", package="astropy/coordinates")
return parser, lexer
def parse(self, angle, unit, debug=False):
try:
found_angle, found_unit = self._thread_local._parser.parse(
angle, lexer=self._thread_local._lexer, debug=debug
)
except ValueError as e:
if str(e):
raise ValueError(f"{str(e)} in angle {angle!r}") from e
else:
raise ValueError(f"Syntax error parsing angle {angle!r}") from e
if unit is None and found_unit is None:
raise u.UnitsError("No unit specified")
return found_angle, found_unit
def _check_hour_range(hrs):
"""
Checks that the given value is in the range (-24, 24).
"""
if np.any(np.abs(hrs) == 24.0):
warn(IllegalHourWarning(hrs, "Treating as 24 hr"))
elif np.any(hrs < -24.0) or np.any(hrs > 24.0):
raise IllegalHourError(hrs)
def _check_minute_range(m):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(m == 60.0):
warn(IllegalMinuteWarning(m, "Treating as 0 min, +1 hr/deg"))
elif np.any(m < -60.0) or np.any(m > 60.0):
# "Error: minutes not in range [-60,60) ({0}).".format(min))
raise IllegalMinuteError(m)
def _check_second_range(sec):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(sec == 60.0):
warn(IllegalSecondWarning(sec, "Treating as 0 sec, +1 min"))
elif sec is None:
pass
elif np.any(sec < -60.0) or np.any(sec > 60.0):
# "Error: seconds not in range [-60,60) ({0}).".format(sec))
raise IllegalSecondError(sec)
def check_hms_ranges(h, m, s):
"""
Checks that the given hour, minute and second are all within
reasonable range.
"""
_check_hour_range(h)
_check_minute_range(m)
_check_second_range(s)
return None
def parse_angle(angle, unit=None, debug=False):
"""
Parses an input string value into an angle value.
Parameters
----------
angle : str
A string representing the angle. May be in one of the following forms:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
unit : `~astropy.units.UnitBase` instance, optional
The unit used to interpret the string. If ``unit`` is not
provided, the unit must be explicitly represented in the
string, either at the end or as number separators.
debug : bool, optional
If `True`, print debugging information from the parser.
Returns
-------
value, unit : tuple
``value`` is the value as a floating point number or three-part
tuple, and ``unit`` is a `Unit` instance which is either the
unit passed in or the one explicitly mentioned in the input
string.
"""
return _AngleParser().parse(angle, unit, debug=debug)
def degrees_to_dms(d):
"""
Convert a floating-point degree value into a ``(degree, arcminute,
arcsecond)`` tuple.
"""
sign = np.copysign(1.0, d)
(df, d) = np.modf(np.abs(d)) # (degree fraction, degree)
(mf, m) = np.modf(df * 60.0) # (minute fraction, minute)
s = mf * 60.0
return np.floor(sign * d), sign * np.floor(m), sign * s
@deprecated(
"dms_to_degrees (or creating an Angle with a tuple) has ambiguous "
"behavior when the degree value is 0",
alternative=(
"another way of creating angles instead (e.g. a less "
"ambiguous string like '-0d1m2.3s'"
),
)
def dms_to_degrees(d, m, s=None):
"""
Convert degrees, arcminute, arcsecond to a float degrees value.
"""
_check_minute_range(m)
_check_second_range(s)
# determine sign
sign = np.copysign(1.0, d)
try:
d = np.floor(np.abs(d))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError as err:
raise ValueError(
format_exception(
"{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.",
d,
m,
s,
)
) from err
return sign * (d + m / 60.0 + s / 3600.0)
@deprecated(
"hms_to_hours (or creating an Angle with a tuple) has ambiguous "
"behavior when the hour value is 0",
alternative=(
"another way of creating angles instead (e.g. a less "
"ambiguous string like '-0h1m2.3s'"
),
)
def hms_to_hours(h, m, s=None):
"""
Convert hour, minute, second to a float hour value.
"""
check_hms_ranges(h, m, s)
# determine sign
sign = np.copysign(1.0, h)
try:
h = np.floor(np.abs(h))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError as err:
raise ValueError(
format_exception(
"{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.",
h,
m,
s,
)
) from err
return sign * (h + m / 60.0 + s / 3600.0)
def hms_to_degrees(h, m, s):
"""
Convert hour, minute, second to a float degrees value.
"""
return hms_to_hours(h, m, s) * 15.0
def hms_to_radians(h, m, s):
"""
Convert hour, minute, second to a float radians value.
"""
return u.degree.to(u.radian, hms_to_degrees(h, m, s))
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s))
def hours_to_decimal(h):
"""
Convert any parseable hour value into a float value.
"""
from . import angles
return angles.Angle(h, unit=u.hourangle).hour
def hours_to_radians(h):
"""
Convert an angle in Hours to Radians.
"""
return u.hourangle.to(u.radian, h)
def hours_to_hms(h):
"""
Convert an floating-point hour value into an ``(hour, minute,
second)`` tuple.
"""
sign = np.copysign(1.0, h)
(hf, h) = np.modf(np.abs(h)) # (degree fraction, degree)
(mf, m) = np.modf(hf * 60.0) # (minute fraction, minute)
s = mf * 60.0
return (np.floor(sign * h), sign * np.floor(m), sign * s)
def radians_to_degrees(r):
"""
Convert an angle in Radians to Degrees.
"""
return u.radian.to(u.degree, r)
def radians_to_hours(r):
"""
Convert an angle in Radians to Hours.
"""
return u.radian.to(u.hourangle, r)
def radians_to_hms(r):
"""
Convert an angle in Radians to an ``(hour, minute, second)`` tuple.
"""
hours = radians_to_hours(r)
return hours_to_hms(hours)
def radians_to_dms(r):
"""
Convert an angle in Radians to an ``(degree, arcminute,
arcsecond)`` tuple.
"""
degrees = u.radian.to(u.degree, r)
return degrees_to_dms(degrees)
def sexagesimal_to_string(values, precision=None, pad=False, sep=(":",), fields=3):
"""
Given an already separated tuple of sexagesimal values, returns
a string.
See `hours_to_string` and `degrees_to_string` for a higher-level
interface to this functionality.
"""
# Check to see if values[0] is negative, using np.copysign to handle -0
sign = np.copysign(1.0, values[0])
# If the coordinates are negative, we need to take the absolute values.
# We use np.abs because abs(-0) is -0
# TODO: Is this true? (MHvK, 2018-02-01: not on my system)
values = [np.abs(value) for value in values]
if pad:
if sign == -1:
pad = 3
else:
pad = 2
else:
pad = 0
if not isinstance(sep, tuple):
sep = tuple(sep)
if fields < 1 or fields > 3:
raise ValueError("fields must be 1, 2, or 3")
if not sep: # empty string, False, or None, etc.
sep = ("", "", "")
elif len(sep) == 1:
if fields == 3:
sep = sep + (sep[0], "")
elif fields == 2:
sep = sep + ("", "")
else:
sep = ("", "", "")
elif len(sep) == 2:
sep = sep + ("",)
elif len(sep) != 3:
raise ValueError(
"Invalid separator specification for converting angle to string."
)
# Simplify the expression based on the requested precision. For
# example, if the seconds will round up to 60, we should convert
# it to 0 and carry upwards. If the field is hidden (by the
# fields kwarg) we round up around the middle, 30.0.
if precision is None:
rounding_thresh = 60.0 - (10.0**-8)
else:
rounding_thresh = 60.0 - (10.0**-precision)
if fields == 3 and values[2] >= rounding_thresh:
values[2] = 0.0
values[1] += 1.0
elif fields < 3 and values[2] >= 30.0:
values[1] += 1.0
if fields >= 2 and values[1] >= 60.0:
values[1] = 0.0
values[0] += 1.0
elif fields < 2 and values[1] >= 30.0:
values[0] += 1.0
literal = []
last_value = ""
literal.append("{0:0{pad}.0f}{sep[0]}")
if fields >= 2:
literal.append("{1:02d}{sep[1]}")
if fields == 3:
if precision is None:
last_value = f"{abs(values[2]):.8f}"
last_value = last_value.rstrip("0").rstrip(".")
else:
last_value = "{0:.{precision}f}".format(abs(values[2]), precision=precision)
if len(last_value) == 1 or last_value[1] == ".":
last_value = "0" + last_value
literal.append("{last_value}{sep[2]}")
literal = "".join(literal)
return literal.format(
np.copysign(values[0], sign),
int(values[1]),
values[2],
sep=sep,
pad=pad,
last_value=last_value,
)
def hours_to_string(h, precision=5, pad=False, sep=("h", "m", "s"), fields=3):
"""
Takes a decimal hour value and returns a string formatted as hms with
separator specified by the 'sep' parameter.
``h`` must be a scalar.
"""
h, m, s = hours_to_hms(h)
return sexagesimal_to_string(
(h, m, s), precision=precision, pad=pad, sep=sep, fields=fields
)
def degrees_to_string(d, precision=5, pad=False, sep=":", fields=3):
"""
Takes a decimal hour value and returns a string formatted as dms with
separator specified by the 'sep' parameter.
``d`` must be a scalar.
"""
d, m, s = degrees_to_dms(d)
return sexagesimal_to_string(
(d, m, s), precision=precision, pad=pad, sep=sep, fields=fields
)
|
c68a95c1747c1a01d6bd01d7fa1aefb2a8116dab6bcb3eaa0faa7d8ce89040b7 | import warnings
from textwrap import indent
import numpy as np
import astropy.units as u
from astropy.constants import c
from astropy.coordinates import (
ICRS,
CartesianDifferential,
CartesianRepresentation,
SkyCoord,
)
from astropy.coordinates.baseframe import BaseCoordinateFrame, frame_transform_graph
from astropy.coordinates.spectral_quantity import SpectralQuantity
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["SpectralCoord"]
class NoVelocityWarning(AstropyUserWarning):
pass
class NoDistanceWarning(AstropyUserWarning):
pass
KMS = u.km / u.s
ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS)
# Default distance to use for target when none is provided
DEFAULT_DISTANCE = 1e6 * u.kpc
# We don't want to run doctests in the docstrings we inherit from Quantity
__doctest_skip__ = ["SpectralCoord.*"]
def _apply_relativistic_doppler_shift(scoord, velocity):
"""
Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity`
that is Doppler shifted by this amount.
Note that the Doppler shift applied is the full relativistic one, so
`SpectralQuantity` currently expressed in velocity and not using the
relativistic convention will temporarily be converted to use the
relativistic convention while the shift is applied.
Positive velocities are assumed to redshift the spectral quantity,
while negative velocities blueshift the spectral quantity.
"""
# NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact
# since we can't guarantee that their metadata would be correct/consistent.
squantity = scoord.view(SpectralQuantity)
beta = velocity / c
doppler_factor = np.sqrt((1 + beta) / (1 - beta))
if squantity.unit.is_equivalent(u.m): # wavelength
return squantity * doppler_factor
elif (
squantity.unit.is_equivalent(u.Hz)
or squantity.unit.is_equivalent(u.eV)
or squantity.unit.is_equivalent(1 / u.m)
):
return squantity / doppler_factor
elif squantity.unit.is_equivalent(KMS): # velocity
return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit)
else: # pragma: no cover
raise RuntimeError(
f"Unexpected units in velocity shift: {squantity.unit}. This should not"
" happen, so please report this in the astropy issue tracker!"
)
def update_differentials_to_match(
original, velocity_reference, preserve_observer_frame=False
):
"""
Given an original coordinate object, update the differentials so that
the final coordinate is at the same location as the original coordinate
but co-moving with the velocity reference object.
If preserve_original_frame is set to True, the resulting object will be in
the frame of the original coordinate, otherwise it will be in the frame of
the velocity reference.
"""
if not velocity_reference.data.differentials:
raise ValueError("Reference frame has no velocities")
# If the reference has an obstime already defined, we should ignore
# it and stick with the original observer obstime.
if "obstime" in velocity_reference.frame_attributes and hasattr(
original, "obstime"
):
velocity_reference = velocity_reference.replicate(obstime=original.obstime)
# We transform both coordinates to ICRS for simplicity and because we know
# it's a simple frame that is not time-dependent (it could be that both
# the original and velocity_reference frame are time-dependent)
original_icrs = original.transform_to(ICRS())
velocity_reference_icrs = velocity_reference.transform_to(ICRS())
differentials = velocity_reference_icrs.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials
data_with_differentials = original_icrs.data.represent_as(
CartesianRepresentation
).with_differentials(differentials)
final_icrs = original_icrs.realize_frame(data_with_differentials)
if preserve_observer_frame:
final = final_icrs.transform_to(original)
else:
final = final_icrs.transform_to(velocity_reference)
return final.replicate(
representation_type=CartesianRepresentation,
differential_type=CartesianDifferential,
)
def attach_zero_velocities(coord):
"""
Set the differentials to be stationary on a coordinate object.
"""
new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES)
return coord.realize_frame(new_data)
def _get_velocities(coord):
if "s" in coord.data.differentials:
return coord.velocity
else:
return ZERO_VELOCITIES
class SpectralCoord(SpectralQuantity):
"""
A spectral coordinate with its corresponding unit.
.. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be
considered experimental at this time. Note that we do not fully
support cases where the observer and target are moving
relativistically relative to each other, so care should be taken
in those cases. It is possible that there will be API changes in
future versions of Astropy based on user feedback. If you have
specific ideas for how it might be improved, please let us know
on the `astropy-dev mailing list`_ or at
http://feedback.astropy.org.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer. If no velocities
are present on this object, the observer is assumed to be stationary
relative to the frame origin.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target. If no velocities
are present on this object, the target is assumed to be stationary
relative to the frame origin.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer. This
can only be specified if ``redshift`` is not specified.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
This can only be specified if ``radial_velocity`` cannot be specified.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
"""
@u.quantity_input(radial_velocity=u.km / u.s)
def __new__(
cls,
value,
unit=None,
observer=None,
target=None,
radial_velocity=None,
redshift=None,
**kwargs,
):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# There are two main modes of operation in this class. Either the
# observer and target are both defined, in which case the radial
# velocity and redshift are automatically computed from these, or
# only one of the observer and target are specified, along with a
# manually specified radial velocity or redshift. So if a target and
# observer are both specified, we can't also accept a radial velocity
# or redshift.
if target is not None and observer is not None:
if radial_velocity is not None or redshift is not None:
raise ValueError(
"Cannot specify radial velocity or redshift if both "
"target and observer are specified"
)
# We only deal with redshifts here and in the redshift property.
# Otherwise internally we always deal with velocities.
if redshift is not None:
if radial_velocity is not None:
raise ValueError("Cannot set both a radial velocity and redshift")
redshift = u.Quantity(redshift)
# For now, we can't specify redshift=u.one in quantity_input above
# and have it work with plain floats, but if that is fixed, for
# example as in https://github.com/astropy/astropy/pull/10232, we
# can remove the check here and add redshift=u.one to the decorator
if not redshift.unit.is_equivalent(u.one):
raise u.UnitsError("redshift should be dimensionless")
radial_velocity = redshift.to(u.km / u.s, u.doppler_redshift())
# If we're initializing from an existing SpectralCoord, keep any
# parameters that aren't being overridden
if observer is None:
observer = getattr(value, "observer", None)
if target is None:
target = getattr(value, "target", None)
# As mentioned above, we should only specify the radial velocity
# manually if either or both the observer and target are not
# specified.
if observer is None or target is None:
if radial_velocity is None:
radial_velocity = getattr(value, "radial_velocity", None)
obj._radial_velocity = radial_velocity
obj._observer = cls._validate_coordinate(observer, label="observer")
obj._target = cls._validate_coordinate(target, label="target")
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._radial_velocity = getattr(obj, "_radial_velocity", None)
self._observer = getattr(obj, "_observer", None)
self._target = getattr(obj, "_target", None)
@staticmethod
def _validate_coordinate(coord, label=""):
"""
Checks the type of the frame and whether a velocity differential and a
distance has been defined on the frame object.
If no distance is defined, the target is assumed to be "really far
away", and the observer is assumed to be "in the solar system".
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame`
The new frame to be used for target or observer.
label : str, optional
The name of the object being validated (e.g. 'target' or 'observer'),
which is then used in error messages.
"""
if coord is None:
return
if not issubclass(coord.__class__, BaseCoordinateFrame):
if isinstance(coord, SkyCoord):
coord = coord.frame
else:
raise TypeError(
f"{label} must be a SkyCoord or coordinate frame instance"
)
# If the distance is not well-defined, ensure that it works properly
# for generating differentials
# TODO: change this to not set the distance and yield a warning once
# there's a good way to address this in astropy.coordinates
# https://github.com/astropy/astropy/issues/10247
with np.errstate(all="ignore"):
distance = getattr(coord, "distance", None)
if distance is not None and distance.unit.physical_type == "dimensionless":
coord = SkyCoord(coord, distance=DEFAULT_DISTANCE)
warnings.warn(
"Distance on coordinate object is dimensionless, an "
f"arbitrary distance value of {DEFAULT_DISTANCE} will be set instead.",
NoDistanceWarning,
)
# If the observer frame does not contain information about the
# velocity of the system, assume that the velocity is zero in the
# system.
if "s" not in coord.data.differentials:
warnings.warn(
f"No velocity defined on frame, assuming {ZERO_VELOCITIES}.",
NoVelocityWarning,
)
coord = attach_zero_velocities(coord)
return coord
def replicate(
self,
value=None,
unit=None,
observer=None,
target=None,
radial_velocity=None,
redshift=None,
doppler_convention=None,
doppler_rest=None,
copy=False,
):
"""
Return a replica of the `SpectralCoord`, optionally changing the
values or attributes.
Note that no conversion is carried out by this method - this keeps
all the values and attributes the same, except for the ones explicitly
passed to this method which are changed.
If ``copy`` is set to `True` then a full copy of the internal arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
copy : bool, optional
If `True`, and ``value`` is not specified, the values are copied to
the new `SkyCoord` - otherwise a reference to the same values is used.
Returns
-------
sc : `SpectralCoord` object
Replica of this object
"""
if isinstance(value, u.Quantity):
if unit is not None:
raise ValueError(
"Cannot specify value as a Quantity and also specify unit"
)
else:
value, unit = value.value, value.unit
value = value if value is not None else self.value
unit = unit or self.unit
observer = self._validate_coordinate(observer) or self.observer
target = self._validate_coordinate(target) or self.target
doppler_convention = doppler_convention or self.doppler_convention
doppler_rest = doppler_rest or self.doppler_rest
# If value is being taken from self and copy is Tru
if copy:
value = value.copy()
# Only include radial_velocity if it is not auto-computed from the
# observer and target.
if (
(self.observer is None or self.target is None)
and radial_velocity is None
and redshift is None
):
radial_velocity = self.radial_velocity
with warnings.catch_warnings():
warnings.simplefilter("ignore", NoVelocityWarning)
return self.__class__(
value=value,
unit=unit,
observer=observer,
target=target,
radial_velocity=radial_velocity,
redshift=redshift,
doppler_convention=doppler_convention,
doppler_rest=doppler_rest,
copy=False,
)
@property
def quantity(self):
"""
Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`.
Equivalent to ``self.view(u.Quantity)``.
Returns
-------
`~astropy.units.Quantity`
This object viewed as a `~astropy.units.Quantity`.
"""
return self.view(u.Quantity)
@property
def observer(self):
"""
The coordinates of the observer.
If set, and a target is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the observation.
"""
return self._observer
@observer.setter
def observer(self, value):
if self.observer is not None:
raise ValueError("observer has already been set")
self._observer = self._validate_coordinate(value, label="observer")
# Switch to auto-computing radial velocity
if self._target is not None:
self._radial_velocity = None
@property
def target(self):
"""
The coordinates of the target being observed.
If set, and an observer is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the target.
"""
return self._target
@target.setter
def target(self, value):
if self.target is not None:
raise ValueError("target has already been set")
self._target = self._validate_coordinate(value, label="target")
# Switch to auto-computing radial velocity
if self._observer is not None:
self._radial_velocity = None
@property
def radial_velocity(self):
"""
Radial velocity of target relative to the observer.
Returns
-------
`~astropy.units.Quantity` ['speed']
Radial velocity of target.
Notes
-----
This is different from the ``.radial_velocity`` property of a
coordinate frame in that this calculates the radial velocity with
respect to the *observer*, not the origin of the frame.
"""
if self._observer is None or self._target is None:
if self._radial_velocity is None:
return 0 * KMS
else:
return self._radial_velocity
else:
return self._calculate_radial_velocity(
self._observer, self._target, as_scalar=True
)
@property
def redshift(self):
"""
Redshift of target relative to observer. Calculated from the radial
velocity.
Returns
-------
`astropy.units.Quantity`
Redshift of target.
"""
return self.radial_velocity.to(u.dimensionless_unscaled, u.doppler_redshift())
@staticmethod
def _calculate_radial_velocity(observer, target, as_scalar=False):
"""
Compute the line-of-sight velocity from the observer to the target.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the observer.
target : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the target.
as_scalar : bool
If `True`, the magnitude of the velocity vector will be returned,
otherwise the full vector will be returned.
Returns
-------
`~astropy.units.Quantity` ['speed']
The radial velocity of the target with respect to the observer.
"""
# Convert observer and target to ICRS to avoid finite differencing
# calculations that lack numerical precision.
observer_icrs = observer.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
d_vel = target_icrs.velocity - observer_icrs.velocity
vel_mag = pos_hat.dot(d_vel)
if as_scalar:
return vel_mag
else:
return vel_mag * pos_hat
@staticmethod
def _normalized_position_vector(observer, target):
"""
Calculate the normalized position vector between two frames.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame or coordinate.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The target frame or coordinate.
Returns
-------
pos_hat : `BaseRepresentation`
Position representation.
"""
d_pos = (
target.cartesian.without_differentials()
- observer.cartesian.without_differentials()
)
dp_norm = d_pos.norm()
# Reset any that are 0 to 1 to avoid nans from 0/0
dp_norm[dp_norm == 0] = 1 * dp_norm.unit
pos_hat = d_pos / dp_norm
return pos_hat
@u.quantity_input(velocity=u.km / u.s)
def with_observer_stationary_relative_to(
self, frame, velocity=None, preserve_observer_frame=False
):
"""
A new `SpectralCoord` with the velocity of the observer altered,
but not the position.
If a coordinate frame is specified, the observer velocities will be
modified to be stationary in the specified frame. If a coordinate
instance is specified, optionally with non-zero velocities, the
observer velocities will be updated so that the observer is co-moving
with the specified coordinates.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame in which the observer will be stationary. This
can be the name of a frame (e.g. 'icrs'), a frame class, frame instance
with no data, or instance with data. This can optionally include
velocities.
velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional
If ``frame`` does not contain velocities, these can be specified as
a 3-element `~astropy.units.Quantity`. In the case where this is
also not specified, the velocities default to zero.
preserve_observer_frame : bool
If `True`, the final observer frame class will be the same as the
original one, and if `False` it will be the frame of the velocity
reference class.
Returns
-------
new_coord : `SpectralCoord`
The new coordinate object representing the spectral data
transformed based on the observer's new velocity frame.
"""
if self.observer is None or self.target is None:
raise ValueError(
"This method can only be used if both observer "
"and target are defined on the SpectralCoord."
)
# Start off by extracting frame if a SkyCoord was passed in
if isinstance(frame, SkyCoord):
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
if not frame.has_data:
frame = frame.realize_frame(
CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)
)
if frame.data.differentials:
if velocity is not None:
raise ValueError(
"frame already has differentials, cannot also specify velocity"
)
# otherwise frame is ready to go
else:
if velocity is None:
differentials = ZERO_VELOCITIES
else:
differentials = CartesianDifferential(velocity)
frame = frame.realize_frame(
frame.data.with_differentials(differentials)
)
if isinstance(frame, (type, str)):
if isinstance(frame, type):
frame_cls = frame
elif isinstance(frame, str):
frame_cls = frame_transform_graph.lookup_name(frame)
if velocity is None:
velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s
elif velocity.shape != (3,):
raise ValueError("velocity should be a Quantity vector with 3 elements")
frame = frame_cls(
0 * u.m,
0 * u.m,
0 * u.m,
*velocity,
representation_type="cartesian",
differential_type="cartesian",
)
observer = update_differentials_to_match(
self.observer, frame, preserve_observer_frame=preserve_observer_frame
)
# Calculate the initial and final los velocity
init_obs_vel = self._calculate_radial_velocity(
self.observer, self.target, as_scalar=True
)
fin_obs_vel = self._calculate_radial_velocity(
observer, self.target, as_scalar=True
)
# Apply transformation to data
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
new_coord = self.replicate(value=new_data, observer=observer)
return new_coord
def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
"""
Apply a velocity shift to this spectral coordinate.
The shift can be provided as a redshift (float value) or radial
velocity (`~astropy.units.Quantity` with physical type of 'speed').
Parameters
----------
target_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current target.
observer_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current observer.
Returns
-------
`SpectralCoord`
New spectral coordinate with the target/observer velocity changed
to incorporate the shift. This is always a new object even if
``target_shift`` and ``observer_shift`` are both `None`.
"""
if observer_shift is not None and (
self.target is None or self.observer is None
):
raise ValueError(
"Both an observer and target must be defined "
"before applying a velocity shift."
)
for arg in [x for x in [target_shift, observer_shift] if x is not None]:
if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)):
raise u.UnitsError(
"Argument must have unit physical type 'speed' for radial velocty"
" or 'dimensionless' for redshift."
)
# The target or observer value is defined but is not a quantity object,
# assume it's a redshift float value and convert to velocity
if target_shift is None:
if self._observer is None or self._target is None:
return self.replicate()
target_shift = 0 * KMS
else:
target_shift = u.Quantity(target_shift)
if target_shift.unit.physical_type == "dimensionless":
target_shift = target_shift.to(u.km / u.s, u.doppler_redshift())
if self._observer is None or self._target is None:
return self.replicate(
value=_apply_relativistic_doppler_shift(self, target_shift),
radial_velocity=self.radial_velocity + target_shift,
)
if observer_shift is None:
observer_shift = 0 * KMS
else:
observer_shift = u.Quantity(observer_shift)
if observer_shift.unit.physical_type == "dimensionless":
observer_shift = observer_shift.to(u.km / u.s, u.doppler_redshift())
target_icrs = self._target.transform_to(ICRS())
observer_icrs = self._observer.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat
observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat
target_velocity = CartesianDifferential(target_velocity.xyz)
observer_velocity = CartesianDifferential(observer_velocity.xyz)
new_target = target_icrs.realize_frame(
target_icrs.cartesian.with_differentials(target_velocity)
).transform_to(self._target)
new_observer = observer_icrs.realize_frame(
observer_icrs.cartesian.with_differentials(observer_velocity)
).transform_to(self._observer)
init_obs_vel = self._calculate_radial_velocity(
observer_icrs, target_icrs, as_scalar=True
)
fin_obs_vel = self._calculate_radial_velocity(
new_observer, new_target, as_scalar=True
)
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data, observer=new_observer, target=new_target)
def to_rest(self):
"""
Transforms the spectral axis to the rest frame.
"""
if self.observer is not None and self.target is not None:
return self.with_observer_stationary_relative_to(self.target)
result = _apply_relativistic_doppler_shift(self, -self.radial_velocity)
return self.replicate(value=result, radial_velocity=0.0 * KMS, redshift=None)
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
try:
radial_velocity = self.radial_velocity
redshift = self.redshift
except ValueError:
radial_velocity = redshift = "Undefined"
repr_items = [f"{prefixstr}"]
if self.observer is not None:
observer_repr = indent(repr(self.observer), 14 * " ").lstrip()
repr_items.append(f" observer: {observer_repr}")
if self.target is not None:
target_repr = indent(repr(self.target), 12 * " ").lstrip()
repr_items.append(f" target: {target_repr}")
if (
self._observer is not None and self._target is not None
) or self._radial_velocity is not None:
if self.observer is not None and self.target is not None:
repr_items.append(" observer to target (computed from above):")
else:
repr_items.append(" observer to target:")
repr_items.append(f" radial_velocity={radial_velocity}")
repr_items.append(f" redshift={redshift}")
if self.doppler_rest is not None or self.doppler_convention is not None:
repr_items.append(f" doppler_rest={self.doppler_rest}")
repr_items.append(f" doppler_convention={self.doppler_convention}")
arrstr = np.array2string(self.view(np.ndarray), separator=", ", prefix=" ")
if len(repr_items) == 1:
repr_items[0] += f"{arrstr}{self._unitstr:s}"
else:
repr_items[1] = " (" + repr_items[1].lstrip()
repr_items[-1] += ")"
repr_items.append(f" {arrstr}{self._unitstr:s}")
return "\n".join(repr_items) + ">"
|
bc7cc28f54cd36935454c70af383f01ba214c217ccfd9b8834c6581a4012bc85 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module defines custom errors and exceptions used in astropy.coordinates.
"""
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"RangeError",
"BoundsError",
"IllegalHourError",
"IllegalMinuteError",
"IllegalSecondError",
"ConvertError",
"IllegalHourWarning",
"IllegalMinuteWarning",
"IllegalSecondWarning",
"UnknownSiteException",
]
class RangeError(ValueError):
"""
Raised when some part of an angle is out of its valid range.
"""
class BoundsError(RangeError):
"""
Raised when an angle is outside of its user-specified bounds.
"""
class IllegalHourError(RangeError):
"""
Raised when an hour value is not in the range [0,24).
Parameters
----------
hour : int, float
Examples
--------
.. code-block:: python
if not 0 <= hr < 24:
raise IllegalHourError(hour)
"""
def __init__(self, hour):
self.hour = hour
def __str__(self):
return (
f"An invalid value for 'hours' was found ('{self.hour}'); must be in the"
" range [0,24)."
)
class IllegalHourWarning(AstropyWarning):
"""
Raised when an hour value is 24.
Parameters
----------
hour : int, float
"""
def __init__(self, hour, alternativeactionstr=None):
self.hour = hour
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'hour' was found to be '{self.hour}', which is not in range (-24, 24)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
class IllegalMinuteError(RangeError):
"""
Raised when an minute value is not in the range [0,60].
Parameters
----------
minute : int, float
Examples
--------
.. code-block:: python
if not 0 <= min < 60:
raise IllegalMinuteError(minute)
"""
def __init__(self, minute):
self.minute = minute
def __str__(self):
return (
f"An invalid value for 'minute' was found ('{self.minute}'); should be in"
" the range [0,60)."
)
class IllegalMinuteWarning(AstropyWarning):
"""
Raised when a minute value is 60.
Parameters
----------
minute : int, float
"""
def __init__(self, minute, alternativeactionstr=None):
self.minute = minute
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'minute' was found to be '{self.minute}', which is not in range [0,60)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
class IllegalSecondError(RangeError):
"""
Raised when an second value (time) is not in the range [0,60].
Parameters
----------
second : int, float
Examples
--------
.. code-block:: python
if not 0 <= sec < 60:
raise IllegalSecondError(second)
"""
def __init__(self, second):
self.second = second
def __str__(self):
return (
f"An invalid value for 'second' was found ('{self.second}'); should be in"
" the range [0,60)."
)
class IllegalSecondWarning(AstropyWarning):
"""
Raised when a second value is 60.
Parameters
----------
second : int, float
"""
def __init__(self, second, alternativeactionstr=None):
self.second = second
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'second' was found to be '{self.second}', which is not in range [0,60)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
# TODO: consider if this should be used to `units`?
class UnitsError(ValueError):
"""
Raised if units are missing or invalid.
"""
class ConvertError(Exception):
"""
Raised if a coordinate system cannot be converted to another.
"""
class UnknownSiteException(KeyError):
def __init__(self, site, attribute, close_names=None):
message = (
f"Site '{site}' not in database. Use {attribute} to see available sites."
)
if close_names:
message += " Did you mean one of: '{}'?'".format("', '".join(close_names))
self.site = site
self.attribute = attribute
self.close_names = close_names
return super().__init__(message)
|
4062e13ea89a15b881ee2fbb3606686ed6e4eae6492a0a59a0581d2c2430a822 | import copy
import operator
import re
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.table import QTable
from astropy.time import Time
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle
from .baseframe import BaseCoordinateFrame, GenericFrame, frame_transform_graph
from .distances import Distance
from .representation import (
RadialDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from .sky_coordinate_parsers import (
_get_frame_class,
_get_frame_without_data,
_parse_coordinate_data,
)
__all__ = ["SkyCoord", "SkyCoordInfo"]
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ["{0." + compname + ".value:}" for compname in repr_data.components]
return ",".join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ",".join(
str(getattr(repr_data, comp).unit) or "None"
for comp in repr_data.components
)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if issubclass(sc.representation_type, SphericalRepresentation) and isinstance(
sc.data, UnitSphericalRepresentation
):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type, in_frame_units=True)
return repr_data
def _represent_as_dict(self):
sc = self._parent
attrs = list(sc.representation_component_names)
# Don't output distance unless it's actually distance.
if isinstance(sc.data, UnitSphericalRepresentation):
attrs = attrs[:-1]
diff = sc.data.differentials.get("s")
if diff is not None:
diff_attrs = list(sc.get_representation_component_names("s"))
# Don't output proper motions if they haven't been specified.
if isinstance(diff, RadialDifferential):
diff_attrs = diff_attrs[2:]
# Don't output radial velocity unless it's actually velocity.
elif isinstance(
diff, (UnitSphericalDifferential, UnitSphericalCosLatDifferential)
):
diff_attrs = diff_attrs[:-1]
attrs.extend(diff_attrs)
attrs.extend(frame_transform_graph.frame_attributes.keys())
out = super()._represent_as_dict(attrs)
out["representation_type"] = sc.representation_type.get_name()
out["frame"] = sc.frame.name
# Note that sc.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
def new_like(self, skycoords, length, metadata_conflicts="warn", name=None):
"""
Return a new SkyCoord instance which is consistent with the input
SkyCoord objects ``skycoords`` and has ``length`` rows. Being
"consistent" is defined as being able to set an item from one to each of
the rest without any exception being raised.
This is intended for creating a new SkyCoord instance whose elements can
be set in-place for table operations like join or vstack. This is used
when a SkyCoord object is used as a mixin column in an astropy Table.
The data values are not predictable and it is expected that the consumer
of the object will fill in all values.
Parameters
----------
skycoords : list
List of input SkyCoord objects
length : int
Length of the output skycoord object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output name (sets output skycoord.info.name)
Returns
-------
skycoord : |SkyCoord| (or subclass)
Instance of this class consistent with ``skycoords``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
skycoords, metadata_conflicts, name, ("meta", "description")
)
skycoord0 = skycoords[0]
# Make a new SkyCoord object with the desired length and attributes
# by using the _apply / __getitem__ machinery to effectively return
# skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame
# attributes with the right shape.
indexes = np.zeros(length, dtype=np.int64)
out = skycoord0[indexes]
# Use __setitem__ machinery to check for consistency of all skycoords
for skycoord in skycoords[1:]:
try:
out[0] = skycoord[0]
except Exception as err:
raise ValueError("Input skycoords are inconsistent.") from err
# Set (merged) info attributes
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The |SkyCoord| class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: https://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a |SkyCoord|
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias -- lower-case versions of the
class name that allow for creating a |SkyCoord| object and transforming
frames without explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this |SkyCoord| should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied coordinate values.
If only one unit is supplied then it applies to all values.
Note that passing only one unit might lead to unit conversion errors
if the coordinate values are expected to have mixed physical meanings
(e.g., angles and distances).
obstime : time-like, optional
Time(s) of observation.
equinox : time-like, optional
Coordinate frame equinox time.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : angle-like, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including ``ICRS``,
``FK5``, ``FK4``, and ``FK4NoETerms``.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components, in angle per time units.
l, b : angle-like, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the ``Galactic`` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components in the `~astropy.coordinates.Galactic` frame,
in angle per time units.
x, y, z : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))
):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError(
"Cannot initialize from a coordinate frame "
"instance without coordinate data"
)
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs
)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError("Cannot create a SkyCoord without data")
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
@property
def shape(self):
return self.frame.shape
def __eq__(self, value):
"""Equality operator for SkyCoord.
This implements strict equality and requires that the frames are
equivalent, extra frame attributes are equivalent, and that the
representation data are exactly equal.
"""
if isinstance(value, BaseCoordinateFrame):
if value._data is None:
raise ValueError("Can only compare SkyCoord to Frame with data")
return self.frame == value
if not isinstance(value, SkyCoord):
return NotImplemented
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(
f"cannot compare: extra frame attribute '{attr}' is not equivalent"
" (perhaps compare the frames directly to avoid this exception)"
)
return self._sky_coord_frame == value._sky_coord_frame
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method, *args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, "_" + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
"""Implement self[item] = value for SkyCoord.
The right hand ``value`` must be strictly consistent with self:
- Identical class
- Equivalent frames
- Identical representation_types
- Identical representation differentials keys
- Identical frame attributes
- Identical "extra" frame attributes (e.g. obstime for an ICRS coord)
With these caveats the setitem ends up as effectively a setitem on
the representation data.
self.frame.data[item] = value.frame.data
"""
if self.__class__ is not value.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(f"attribute {attr} is not equivalent")
# Set the frame values. This checks frame equivalence and also clears
# the cache to ensure that the object is not in an inconsistent state.
self._sky_coord_frame[item] = value._sky_coord_frame
def insert(self, obj, values, axis=0):
"""
Insert coordinate values before the given indices in the object and
return a new Frame object.
The values to be inserted must conform to the rules for in-place setting
of |SkyCoord| objects.
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple insertion before the index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.coordinates.SkyCoord` instance
New coordinate object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
# Set the output values. This is where validation of `values` takes place to ensure
# that it can indeed be inserted.
out[:idx0] = self[:idx0]
out[idx0 : idx0 + n_values] = values
out[idx0 + n_values :] = self[idx0:]
return out
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : frame class, frame object, or str
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
# TODO! like matplotlib, do string overrides for modified methods
new_frame = (
_get_frame_class(new_frame) if isinstance(new_frame, str) else new_frame
)
return self.frame.is_transformable_to(new_frame)
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
|SkyCoord| that are not part of the destination frame's definition are
kept (stored on the resulting |SkyCoord|), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without losing obstime).
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` class or instance, or |SkyCoord| instance
The frame to transform this coordinate into. If a |SkyCoord|, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : |SkyCoord|
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if frame_val is not None and not (
merge_attributes and frame.is_frame_attr_default(attr)
):
frame_kwargs[attr] = frame_val
elif self_val is not None and not self.is_frame_attr_default(attr):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError(
"Transform `frame` must be a frame name, class, or instance"
)
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError(
f"Cannot transform from {self.frame.__class__} to {new_frame_cls}"
)
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in set(new_coord.frame_attributes) & set(frame_kwargs.keys()):
frame_kwargs.pop(attr)
# Always remove the origin frame attribute, as that attribute only makes
# sense with a SkyOffsetFrame (in which case it will be stored on the frame).
# See gh-11277.
# TODO: Should it be a property of the frame attribute that it can
# or cannot be stored on a SkyCoord?
frame_kwargs.pop("origin", None)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""Compute the position to a new time using the velocities.
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation".
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : |SkyCoord|
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
from .builtin_frames.icrs import ICRS
if (new_obstime is None) == (dt is None):
raise ValueError(
"You must specify one of `new_obstime` or `dt`, but not both."
)
# Validate that we have velocity info
if "s" not in self.frame.data.differentials:
raise ValueError("SkyCoord requires velocity data to evolve the position.")
if "obstime" in self.frame.frame_attributes:
raise NotImplementedError(
"Updating the coordinates in a frame with explicit time dependence is"
" currently not supported. If you would like this functionality, please"
" open an issue on github:\nhttps://github.com/astropy/astropy"
)
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError(
"This object has no associated `obstime`. apply_space_motion() must"
" receive a time difference, `dt`, and not a new obstime."
)
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time("J2000")
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials["s"]
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.0
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km / u.s)
except u.UnitConversionError: # No RV
rv = 0.0
starpm = erfa.pmsafe(
icrsrep.lon.radian,
icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian / u.yr),
icrsvel.d_lat.to_value(u.radian / u.yr),
plx,
rv,
t1.jd1,
t1.jd2,
t2.jd1,
t2.jd2,
)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(
ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian / u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian / u.yr, copy=False),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km / u.s, copy=False),
differential_type=SphericalDifferential,
)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {
attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names
}
frattrs["obstime"] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return self.frame.name == string or (
isinstance(self.frame.name, list) and string in self.frame.name
)
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the primary transform graph.
"""
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.frame_attributes:
return getattr(self.frame, attr)
else:
return getattr(self, "_" + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Call __getattribute__; this will give correct exception.
return self.__getattribute__(attr)
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__("_" + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__("_" + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
def __dir__(self):
"""Original dir() behavior, plus frame attributes and transforms.
This dir includes:
- All attributes of the SkyCoord class
- Coordinate transforms available by aliases
- Attribute / methods of the underlying self.frame objects
"""
dir_values = set(super().__dir__())
# determine the aliases that this can be transformed to.
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(
{attr for attr in dir(self.frame) if not attr.startswith("_")}
)
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return sorted(dir_values)
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ": " + frameattrs
data = self.frame._data_repr()
if data:
data = ": " + data
return f"<{clsnm} ({coonm}{frameattrs}){data}>"
def to_string(self, style="decimal", **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
**kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {
"hmsdms": {
"lonargs": {"unit": u.hour, "pad": True},
"latargs": {"unit": u.degree, "pad": True, "alwayssign": True},
},
"dms": {"lonargs": {"unit": u.degree}, "latargs": {"unit": u.degree}},
"decimal": {
"lonargs": {"unit": u.degree, "decimal": True},
"latargs": {"unit": u.degree, "decimal": True},
},
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]["lonargs"])
latargs.update(styles[style]["latargs"])
else:
raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}")
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (
f"{sph_coord.lon.to_string(**lonargs)}"
f" {sph_coord.lat.to_string(**latargs)}"
)
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [
f"{lonangle.to_string(**lonargs)} {latangle.to_string(**latargs)}"
]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def to_table(self):
"""
Convert this |SkyCoord| to a |QTable|.
Any attributes that have the same length as the |SkyCoord| will be
converted to columns of the |QTable|. All other attributes will be
recorded as metadata.
Returns
-------
`~astropy.table.QTable`
A |QTable| containing the data of this |SkyCoord|.
Examples
--------
>>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg,
... obstime=Time([2000, 2010], format='jyear'))
>>> t = sc.to_table()
>>> t
<QTable length=2>
ra dec obstime
deg deg
float64 float64 Time
------- ------- -------
40.0 0.0 2000.0
70.0 -20.0 2010.0
>>> t.meta
{'representation_type': 'spherical', 'frame': 'icrs'}
"""
self_as_dict = self.info._represent_as_dict()
tabledata = {}
metadata = {}
# Record attributes that have the same length as self as columns in the
# table, and the other attributes as table metadata. This matches
# table.serialize._represent_mixin_as_column().
for key, value in self_as_dict.items():
if getattr(value, "shape", ())[:1] == (len(self),):
tabledata[key] = value
else:
metadata[key] = value
return QTable(tabledata, meta=metadata)
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two |SkyCoord| objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a |SkyCoord| or a subclass of
`~astropy.coordinates.BaseCoordinateFrame`.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if not BaseCoordinateFrame._frameattr_equiv(
getattr(self, fattrnm), getattr(other, fattrnm)
):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't frame-like"
)
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation :
for the *total* angular offset (not broken out into components).
position_angle :
for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError(
"Tried to use spherical_offsets_to with two non-matching frames!"
)
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def spherical_offsets_by(self, d_lon, d_lat):
"""
Computes the coordinate that is a specified pair of angular offsets away
from this coordinate.
Parameters
----------
d_lon : angle-like
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
d_lat : angle-like
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Returns
-------
newcoord : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
``d_lat`` in the latitude direction and ``d_lon`` in the longitude
direction.
Notes
-----
This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the
transformation. For a more complete set of transform offsets, use
`~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually.
This specific method can be reproduced by doing
``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``.
See Also
--------
spherical_offsets_to : compute the angular offsets to another coordinate
directional_offset_by : offset a coordinate by an angle in a direction
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return self.__class__(
SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self)
)
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat, posang=position_angle, distance=separation
)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_sky(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_sky"
)
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_3d(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_3d"
)
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : coordinate-like
The coordinates to search around to try to find matching points in
this |SkyCoord|. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` ['angle']
The on-sky separation to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(
searcharoundcoords, self, seplimit, storekdtree="_kdtree_sky"
)
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this |SkyCoord|. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` ['length']
The physical radius to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(
searcharoundcoords, self, distlimit, storekdtree="_kdtree_3d"
)
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
SkyCoord and another.
Parameters
----------
other : |SkyCoord|
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError(
"Can only get position_angle to another "
"SkyCoord or a coordinate frame with data"
)
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this SkyCoord at the origin.
Parameters
----------
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this |SkyCoord| (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) of the coordinates this SkyCoord contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array |SkyCoord|, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position information is used
extra_frameattrs = {nm: getattr(self, nm) for nm in self._extra_frameattr_names}
novel = SkyCoord(
self.realize_frame(self.data.without_differentials()), **extra_frameattrs
)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode="all"):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
from astropy.wcs.utils import skycoord_to_pixel
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode="all"):
"""
Create a new SkyCoord from pixel coordinates using a World Coordinate System.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : `~astropy.coordinates.SkyCoord`
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
from astropy.wcs.utils import pixel_to_skycoord
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the coordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(
self, kind="barycentric", obstime=None, location=None
):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the |SkyCoord| will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the |SkyCoord| will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this |SkyCoord|.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` ['speed']
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. The barycentric correction returned uses the optical
approximation v = z * c. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord, EarthLocation
>>> from astropy.constants import c
>>> t = Time(56370.5, format='mjd', scale='utc')
>>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA
>>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP
Also note that this method returns the correction velocity in the so-called
*optical convention*::
>>> vcorr = zb * c # doctest: +SKIP
where ``zb`` is the barycentric correction redshift as defined in section 3
of Wright & Eastman (2014). The application formula given above follows from their
equation (11) under assumption that the radial velocity ``rv`` has also been defined
using the same optical convention. Note, this can be regarded as a matter of
velocity definition and does not by itself imply any loss of accuracy, provided
sufficient care has been taken during interpretation of the results. If you need
the barycentric correction expressed as the full relativistic velocity (e.g., to provide
it as the input to another software which performs the application), the
following recipe can be used::
>>> zb = vcorr / c # doctest: +REMOTE_DATA
>>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA
>>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA
or alternatively using just equivalencies::
>>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA
See also `~astropy.units.equivalencies.doppler_optical`,
`~astropy.units.equivalencies.doppler_radio`, and
`~astropy.units.equivalencies.doppler_relativistic` for more information on
the velocity conventions.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
>>> from astropy.coordinates import solar_system_ephemeris
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA
... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, "location", None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError(
"`location` cannot be in both the passed-in `obstime` and this"
" `SkyCoord` because it is ambiguous which is meant for the"
" radial_velocity_correction."
)
elif timeloc is not None:
location = timeloc
else:
raise TypeError(
"Must provide a `location` to radial_velocity_correction, either as"
" a SkyCoord frame attribute, as an attribute on the passed in"
" `obstime`, or in the method call."
)
elif self.location is not None or timeloc is not None:
raise ValueError(
"Cannot compute radial velocity correction if `location` argument is"
" passed in and there is also a `location` attribute on this SkyCoord"
" or the passed-in `obstime`."
)
# obstime validation
coo_at_rv_obstime = self # assume we need no space motion for now
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError(
"Must provide an `obstime` to radial_velocity_correction, either as"
" a SkyCoord frame attribute or in the method call."
)
elif self.obstime is not None and self.frame.data.differentials:
# we do need space motion after all
coo_at_rv_obstime = self.apply_space_motion(obstime)
elif self.obstime is None:
# warn the user if the object has differentials set
if "s" in self.data.differentials:
warnings.warn(
"SkyCoord has space motion, and therefore the specified "
"position of the SkyCoord may not be the same as "
"the `obstime` for the radial velocity measurement. "
"This may affect the rv correction at the order of km/s"
"for very high proper motions sources. If you wish to "
"apply space motion of the SkyCoord to correct for this"
"the `obstime` attribute of the SkyCoord must be set",
AstropyUserWarning,
)
pos_earth, v_earth = get_body_barycentric_posvel("earth", obstime)
if kind == "barycentric":
v_origin_to_earth = v_earth
elif kind == "heliocentric":
v_sun = get_body_barycentric_posvel("sun", obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError(
"`kind` argument to radial_velocity_correction must "
f"be 'barycentric' or 'heliocentric', but got '{kind}'"
)
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
icrs_cart = coo_at_rv_obstime.icrs.cartesian
icrs_cart_novel = icrs_cart.without_differentials()
if self.data.__class__ is UnitSphericalRepresentation:
targcart = icrs_cart_novel
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
targcart = icrs_cart_novel - obs_icrs_cart
targcart /= targcart.norm()
if kind == "barycentric":
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm() ** 2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr / speed_of_light)
# try and get terms corresponding to stellar motion.
if icrs_cart.differentials:
try:
ro = self.icrs.cartesian
beta_star = ro.differentials["s"].to_cartesian() / speed_of_light
# ICRS unit vector at coordinate epoch
ro = ro.without_differentials()
ro /= ro.norm()
zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))
except u.UnitConversionError:
warnings.warn(
"SkyCoord contains some velocity information, but not enough to"
" calculate the full space motion of the source, and so this"
" has been ignored for the purposes of calculating the radial"
" velocity correction. This can lead to errors on the order of"
" metres/second.",
AstropyUserWarning,
)
zb = zb - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new SkyCoord from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames (including
differentials), if they are also followed by a non-alphanumeric
character. It will also match columns that *end* with the component name
if a non-alphanumeric character is *before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : `~astropy.table.Table` or subclass
The table to load data from.
**coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord` or subclass
The new instance.
Raises
------
ValueError
If more than one match is found in the table for a component,
unless the additional matches are also valid frame component names.
If a "coord_kwargs" is provided for a value also found in the table.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs["frame"] = coord_kwargs.get("frame", frame)
representation_component_names = set(
frame.get_representation_component_names()
).union(set(frame.get_representation_component_names("s")))
comp_kwargs = {}
for comp_name in representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r"(\W|\b|_)"
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r".*(\W|\b|_)" + comp_name + r"\b"
# the final regex ORs together the two patterns
rex = re.compile(
rf"({starts_with_comp})|({ends_with_comp})", re.IGNORECASE | re.UNICODE
)
# find all matches
matches = {col_name for col_name in table.colnames if rex.match(col_name)}
# now need to select among matches, also making sure we don't have
# an exact match with another component
if len(matches) == 0: # no matches
continue
elif len(matches) == 1: # only one match
col_name = matches.pop()
else: # more than 1 match
# try to sieve out other components
matches -= representation_component_names - {comp_name}
# if there's only one remaining match, it worked.
if len(matches) == 1:
col_name = matches.pop()
else:
raise ValueError(
f'Found at least two matches for component "{comp_name}":'
f' "{matches}". Cannot guess coordinates from a table with this'
" ambiguity."
)
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError(
f'Found column "{v.name}" in table, but it was already provided as'
' "{k}" keyword to guess_from_table function.'
)
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame="icrs", parse=False, cache=True):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names, e.g.,
'CRTS SSS100805 J194428-420209', this may be much faster than a
Sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, optional
Determines whether to cache the results or not. To update or
overwrite an existing value, pass ``cache='update'``.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse, cache=cache)
icrs_sky_coord = cls(icrs_coord)
if frame in ("icrs", icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
|
dcb56b715721eafe8f288fd2214734d57432b0ca0c54a3820c6a90ebdaa98964 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Currently the only site accessible without internet access is the Royal
Greenwich Observatory, as an example (and for testing purposes). In future
releases, a canonical set of sites may be bundled into astropy for when the
online registry is unavailable.
Additions or corrections to the observatory list can be submitted via Pull
Request to the [astropy-data GitHub repository](https://github.com/astropy/astropy-data),
updating the ``location.json`` file.
"""
import json
from collections.abc import Mapping
from difflib import get_close_matches
from astropy import units as u
from astropy.utils.data import get_file_contents, get_pkg_data_contents
from .earth import EarthLocation
from .errors import UnknownSiteException
class SiteRegistry(Mapping):
"""
A bare-bones registry of EarthLocation objects.
This acts as a mapping (dict-like object) but with the important caveat that
it's always transforms its inputs to lower-case. So keys are always all
lower-case, and even if you ask for something that's got mixed case, it will
be interpreted as the all lower-case version.
"""
def __init__(self):
# the keys to this are always lower-case
self._lowercase_names_to_locations = {}
# these can be whatever case is appropriate
self._names = []
def __getitem__(self, site_name):
"""
Returns an EarthLocation for a known site in this registry.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
Returns
-------
site : `~astropy.coordinates.EarthLocation`
The location of the observatory.
"""
if site_name.lower() not in self._lowercase_names_to_locations:
# If site name not found, find close matches and suggest them in error
close_names = get_close_matches(
site_name, self._lowercase_names_to_locations
)
close_names = sorted(close_names, key=len)
raise UnknownSiteException(
site_name, "the 'names' attribute", close_names=close_names
)
return self._lowercase_names_to_locations[site_name.lower()]
def __len__(self):
return len(self._lowercase_names_to_locations)
def __iter__(self):
return iter(self._lowercase_names_to_locations)
def __contains__(self, site_name):
return site_name.lower() in self._lowercase_names_to_locations
@property
def names(self):
"""
The names in this registry. Note that these are *not* exactly the same
as the keys: keys are always lower-case, while `names` is what you
should use for the actual readable names (which may be case-sensitive).
Returns
-------
site : list of str
The names of the sites in this registry
"""
return sorted(self._names)
def add_site(self, names, locationobj):
"""
Adds a location to the registry.
Parameters
----------
names : list of str
All the names this site should go under
locationobj : `~astropy.coordinates.EarthLocation`
The actual site object
"""
for name in names:
self._lowercase_names_to_locations[name.lower()] = locationobj
self._names.append(name)
@classmethod
def from_json(cls, jsondb):
reg = cls()
for site in jsondb:
site_info = jsondb[site].copy()
location = EarthLocation.from_geodetic(
site_info.pop("longitude") * u.Unit(site_info.pop("longitude_unit")),
site_info.pop("latitude") * u.Unit(site_info.pop("latitude_unit")),
site_info.pop("elevation") * u.Unit(site_info.pop("elevation_unit")),
)
name = site_info.pop("name")
location.info.name = name
aliases = [alias for alias in site_info.pop("aliases") if alias]
if name not in aliases and name != site:
aliases.append(name)
location.info.meta = site_info # whatever is left
reg.add_site([site] + aliases, location)
reg._loaded_jsondb = jsondb
return reg
def get_builtin_sites():
"""
Load observatory database from data/observatories.json and parse them into
a SiteRegistry.
"""
jsondb = json.loads(get_pkg_data_contents("data/sites.json"))
return SiteRegistry.from_json(jsondb)
def get_downloaded_sites(jsonurl=None):
"""
Load observatory database from data.astropy.org and parse into a SiteRegistry.
"""
# we explicitly set the encoding because the default is to leave it set by
# the users' locale, which may fail if it's not matched to the sites.json
if jsonurl is None:
content = get_pkg_data_contents("coordinates/sites.json", encoding="UTF-8")
else:
content = get_file_contents(jsonurl, encoding="UTF-8")
jsondb = json.loads(content)
return SiteRegistry.from_json(jsondb)
|
716bb7185b19f78b4e1442bbe39a74c3b9759ba793e85bfbfe7d3d93648c5d40 | import numpy as np
from astropy.units import Unit
from astropy.units import equivalencies as eq
from astropy.units import si
from astropy.units.decorators import quantity_input
from astropy.units.quantity import Quantity, SpecificTypeQuantity
__all__ = ["SpectralQuantity"]
# We don't want to run doctests in the docstrings we inherit from Quantity
__doctest_skip__ = ["SpectralQuantity.*"]
KMS = si.km / si.s
SPECTRAL_UNITS = (si.Hz, si.m, si.J, si.m**-1, KMS)
DOPPLER_CONVENTIONS = {
"radio": eq.doppler_radio,
"optical": eq.doppler_optical,
"relativistic": eq.doppler_relativistic,
}
class SpectralQuantity(SpecificTypeQuantity):
"""
One or more value(s) with spectral units.
The spectral units should be those for frequencies, wavelengths, energies,
wavenumbers, or velocities (interpreted as Doppler velocities relative to a
rest spectral value). The advantage of using this class over the regular
`~astropy.units.Quantity` class is that in `SpectralQuantity`, the
``u.spectral`` equivalency is enabled by default (allowing automatic
conversion between spectral units), and a preferred Doppler rest value and
convention can be stored for easy conversion to/from velocities.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralQuantity`
Spectral axis data values.
unit : unit-like
Unit for the given data.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value to use for conversions from/to velocities
doppler_convention : str, optional
The convention to use when converting the spectral data to/from
velocities.
"""
_equivalent_unit = SPECTRAL_UNITS
_include_easy_conversion_members = True
def __new__(
cls, value, unit=None, doppler_rest=None, doppler_convention=None, **kwargs
):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# If we're initializing from an existing SpectralQuantity, keep any
# parameters that aren't being overridden
if doppler_rest is None:
doppler_rest = getattr(value, "doppler_rest", None)
if doppler_convention is None:
doppler_convention = getattr(value, "doppler_convention", None)
obj._doppler_rest = doppler_rest
obj._doppler_convention = doppler_convention
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._doppler_rest = getattr(obj, "_doppler_rest", None)
self._doppler_convention = getattr(obj, "_doppler_convention", None)
def __quantity_subclass__(self, unit):
# Always default to just returning a Quantity, unless we explicitly
# choose to return a SpectralQuantity - even if the units match, we
# want to avoid doing things like adding two SpectralQuantity instances
# together and getting a SpectralQuantity back
if unit is self.unit:
return SpectralQuantity, True
else:
return Quantity, False
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# We always return Quantity except in a few specific cases
result = super().__array_ufunc__(function, method, *inputs, **kwargs)
if (
(
function is np.multiply
or function is np.true_divide
and inputs[0] is self
)
and result.unit == self.unit
or (
function in (np.minimum, np.maximum, np.fmax, np.fmin)
and method in ("reduce", "reduceat")
)
):
result = result.view(self.__class__)
result.__array_finalize__(self)
else:
if result is self:
raise TypeError(
"Cannot store the result of this operation in"
f" {self.__class__.__name__}"
)
if result.dtype.kind == "b":
result = result.view(np.ndarray)
else:
result = result.view(Quantity)
return result
@property
def doppler_rest(self):
"""
The rest value of the spectrum used for transformations to/from
velocity space.
Returns
-------
`~astropy.units.Quantity` ['speed']
Rest value as an astropy `~astropy.units.Quantity` object.
"""
return self._doppler_rest
@doppler_rest.setter
@quantity_input(value=SPECTRAL_UNITS)
def doppler_rest(self, value):
"""
New rest value needed for velocity-space conversions.
Parameters
----------
value : `~astropy.units.Quantity` ['speed']
Rest value.
"""
if self._doppler_rest is not None:
raise AttributeError(
"doppler_rest has already been set, and cannot be changed. Use the"
" ``to`` method to convert the spectral values(s) to use a different"
" rest value"
)
self._doppler_rest = value
@property
def doppler_convention(self):
"""
The defined convention for conversions to/from velocity space.
Returns
-------
str
One of 'optical', 'radio', or 'relativistic' representing the
equivalency used in the unit conversions.
"""
return self._doppler_convention
@doppler_convention.setter
def doppler_convention(self, value):
"""
New velocity convention used for velocity space conversions.
Parameters
----------
value
Notes
-----
More information on the equations dictating the transformations can be
found in the astropy documentation [1]_.
References
----------
.. [1] Astropy documentation: https://docs.astropy.org/en/stable/units/equivalencies.html#spectral-doppler-equivalencies
"""
if self._doppler_convention is not None:
raise AttributeError(
"doppler_convention has already been set, and cannot be changed. Use"
" the ``to`` method to convert the spectral values(s) to use a"
" different convention"
)
if value is not None and value not in DOPPLER_CONVENTIONS:
raise ValueError(
"doppler_convention should be one of"
f" {'/'.join(sorted(DOPPLER_CONVENTIONS))}"
)
self._doppler_convention = value
@quantity_input(doppler_rest=SPECTRAL_UNITS)
def to(self, unit, equivalencies=[], doppler_rest=None, doppler_convention=None):
"""
Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit.
By default, the ``spectral`` equivalency will be enabled, as well as
one of the Doppler equivalencies if converting to/from velocities.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package, and should be a spectral unit.
equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional
A list of equivalence pairs to try if the units are not
directly convertible (along with spectral).
See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, spectral equivalencies will be used.
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value used when converting to/from velocities. This will
also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
doppler_convention : {'relativistic', 'optical', 'radio'}, optional
The Doppler convention used when converting to/from velocities.
This will also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
Returns
-------
`SpectralQuantity`
New spectral coordinate object with data converted to the new unit.
"""
# Make sure units can be passed as strings
unit = Unit(unit)
# If equivalencies is explicitly set to None, we should just use the
# default Quantity.to with equivalencies also set to None
if equivalencies is None:
result = super().to(unit, equivalencies=None)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
# FIXME: need to consider case where doppler equivalency is passed in
# equivalencies list, or is u.spectral equivalency is already passed
if doppler_rest is None:
doppler_rest = self._doppler_rest
if doppler_convention is None:
doppler_convention = self._doppler_convention
elif doppler_convention not in DOPPLER_CONVENTIONS:
raise ValueError(
"doppler_convention should be one of"
f" {'/'.join(sorted(DOPPLER_CONVENTIONS))}"
)
if self.unit.is_equivalent(KMS) and unit.is_equivalent(KMS):
# Special case: if the current and final units are both velocity,
# and either the rest value or the convention are different, we
# need to convert back to frequency temporarily.
if doppler_convention is not None and self._doppler_convention is None:
raise ValueError("Original doppler_convention not set")
if doppler_rest is not None and self._doppler_rest is None:
raise ValueError("Original doppler_rest not set")
if doppler_rest is None and doppler_convention is None:
result = super().to(unit, equivalencies=equivalencies)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
elif (doppler_rest is None) is not (doppler_convention is None):
raise ValueError(
"Either both or neither doppler_rest and doppler_convention should"
" be defined for velocity conversions"
)
vel_equiv1 = DOPPLER_CONVENTIONS[self._doppler_convention](
self._doppler_rest
)
freq = super().to(si.Hz, equivalencies=equivalencies + vel_equiv1)
vel_equiv2 = DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
result = freq.to(unit, equivalencies=equivalencies + vel_equiv2)
else:
additional_equivalencies = eq.spectral()
if self.unit.is_equivalent(KMS) or unit.is_equivalent(KMS):
if doppler_convention is None:
raise ValueError(
"doppler_convention not set, cannot convert to/from velocities"
)
if doppler_rest is None:
raise ValueError(
"doppler_rest not set, cannot convert to/from velocities"
)
additional_equivalencies = (
additional_equivalencies
+ DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
)
result = super().to(
unit, equivalencies=equivalencies + additional_equivalencies
)
# Since we have to explicitly specify when we want to keep this as a
# SpectralQuantity, we need to convert it back from a Quantity to
# a SpectralQuantity here. Note that we don't use __array_finalize__
# here since we might need to set the output doppler convention and
# rest based on the parameters passed to 'to'
result = result.view(self.__class__)
result.__array_finalize__(self)
result._doppler_convention = doppler_convention
result._doppler_rest = doppler_rest
return result
def to_value(self, unit=None, *args, **kwargs):
if unit is None:
return self.view(np.ndarray)
return self.to(unit, *args, **kwargs).value
|
bd17562fb365ed4ab74985f660a2aa3c6d24a1241a169a72972a2499df5ef78d | """
In this module, we define the coordinate representation classes, which are
used to represent low-level cartesian, spherical, cylindrical, and other
coordinates.
"""
import abc
import functools
import inspect
import operator
import warnings
import numpy as np
from erfa import ufunc as erfa_ufunc
import astropy.units as u
from astropy.utils import ShapedLikeNDArray, classproperty
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import DuplicateRepresentationWarning
from .angles import Angle, Latitude, Longitude
from .distances import Distance
from .matrix_utilities import is_O3
__all__ = [
"BaseRepresentationOrDifferential",
"BaseRepresentation",
"CartesianRepresentation",
"SphericalRepresentation",
"UnitSphericalRepresentation",
"RadialRepresentation",
"PhysicsSphericalRepresentation",
"CylindricalRepresentation",
"BaseDifferential",
"CartesianDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
"SphericalDifferential",
"SphericalCosLatDifferential",
"UnitSphericalDifferential",
"UnitSphericalCosLatDifferential",
"RadialDifferential",
"CylindricalDifferential",
"PhysicsSphericalDifferential",
]
# Module-level dict mapping representation string alias names to classes.
# This is populated by __init_subclass__ when called by Representation or
# Differential classes so that they are all registered automatically.
REPRESENTATION_CLASSES = {}
DIFFERENTIAL_CLASSES = {}
# set for tracking duplicates
DUPLICATE_REPRESENTATIONS = set()
# a hash for the content of the above two dicts, cached for speed.
_REPRDIFF_HASH = None
def _fqn_class(cls):
"""Get the fully qualified name of a class."""
return cls.__module__ + "." + cls.__qualname__
def get_reprdiff_cls_hash():
"""
Returns a hash value that should be invariable if the
`REPRESENTATION_CLASSES` and `DIFFERENTIAL_CLASSES` dictionaries have not
changed.
"""
global _REPRDIFF_HASH
if _REPRDIFF_HASH is None:
_REPRDIFF_HASH = hash(tuple(REPRESENTATION_CLASSES.items())) + hash(
tuple(DIFFERENTIAL_CLASSES.items())
)
return _REPRDIFF_HASH
def _invalidate_reprdiff_cls_hash():
global _REPRDIFF_HASH
_REPRDIFF_HASH = None
def _array2string(values, prefix=""):
# Work around version differences for array2string.
kwargs = {"separator": ", ", "prefix": prefix}
kwargs["formatter"] = {}
return np.array2string(values, **kwargs)
class BaseRepresentationOrDifferentialInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Indicates unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
# Create numpy dtype so that numpy formatting will work.
components = val.components
values = tuple(getattr(val, component).value for component in components)
a = np.empty(
getattr(val, "shape", ()),
[(component, value.dtype) for component, value in zip(components, values)],
)
for component, value in zip(components, values):
a[component] = value
return str(a)
@property
def _represent_as_dict_attrs(self):
return self._parent.components
@property
def unit(self):
if self._parent is None:
return None
unit = self._parent._unitstr
return unit[1:-1] if unit.startswith("(") else unit
def new_like(self, reps, length, metadata_conflicts="warn", name=None):
"""
Return a new instance like ``reps`` with ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
reps : list
List of input representations or differentials.
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential` subclass instance
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
reps, metadata_conflicts, name, ("meta", "description")
)
# Make a new representation or differential with the desired length
# using the _apply / __getitem__ machinery to effectively return
# rep0[[0, 0, ..., 0, 0]]. This will have the right shape, and
# include possible differentials.
indexes = np.zeros(length, dtype=np.int64)
out = reps[0][indexes]
# Use __setitem__ machinery to check whether all representations
# can represent themselves as this one without loss of information.
for rep in reps[1:]:
try:
out[0] = rep[0]
except Exception as err:
raise ValueError("input representations are inconsistent.") from err
# Set (merged) info attributes.
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class BaseRepresentationOrDifferential(ShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied; if `False`, they will be
broadcast together but not use new memory.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__ = 50000
info = BaseRepresentationOrDifferentialInfo()
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
if (
args
and isinstance(args[0], self.__class__)
and all(arg is None for arg in args[1:])
):
rep_or_diff = args[0]
copy = kwargs.pop("copy", True)
attrs = [getattr(rep_or_diff, component) for component in components]
if "info" in rep_or_diff.__dict__:
self.info = rep_or_diff.info
if kwargs:
raise TypeError(
"unexpected keyword arguments for case "
f"where class instance is passed in: {kwargs}"
)
else:
attrs = []
for component in components:
try:
attr = args.pop(0) if args else kwargs.pop(component)
except KeyError:
raise TypeError(
"__init__() missing 1 required positional "
f"argument: {component!r}"
) from None
if attr is None:
raise TypeError(
"__init__() missing 1 required positional argument:"
f" {component!r} (or first argument should be an instance of"
f" {self.__class__.__name__})."
)
attrs.append(attr)
copy = args.pop(0) if args else kwargs.pop("copy", True)
if args:
raise TypeError(f"unexpected arguments: {args}")
if kwargs:
for component in components:
if component in kwargs:
raise TypeError(
f"__init__() got multiple values for argument {component!r}"
)
raise TypeError(f"unexpected keyword arguments: {kwargs}")
# Pass attributes through the required initializing classes.
attrs = [
self.attr_classes[component](attr, copy=copy, subok=True)
for component, attr in zip(components, attrs)
]
try:
bc_attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError as err:
if len(components) <= 2:
c_str = " and ".join(components)
else:
c_str = ", ".join(components[:2]) + ", and " + components[2]
raise ValueError(f"Input parameters {c_str} cannot be broadcast") from err
# The output of np.broadcast_arrays() has limitations on writeability, so we perform
# additional handling to enable writeability in most situations. This is primarily
# relevant for allowing the changing of the wrap angle of longitude components.
#
# If the shape has changed for a given component, broadcasting is needed:
# If copy=True, we make a copy of the broadcasted array to ensure writeability.
# Note that array had already been copied prior to the broadcasting.
# TODO: Find a way to avoid the double copy.
# If copy=False, we use the broadcasted array, and writeability may still be
# limited.
# If the shape has not changed for a given component, we can proceed with using the
# non-broadcasted array, which avoids writeability issues from np.broadcast_arrays().
attrs = [
(bc_attr.copy() if copy else bc_attr)
if bc_attr.shape != attr.shape
else attr
for attr, bc_attr in zip(attrs, bc_attrs)
]
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, "_" + component, attr)
@classmethod
def get_name(cls):
"""Name of the representation or differential.
In lower case, with any trailing 'representation' or 'differential'
removed. (E.g., 'spherical' for
`~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
"""
name = cls.__name__.lower()
if name.endswith("representation"):
name = name[:-14]
elif name.endswith("differential"):
name = name[:-12]
return name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `~astropy.coordinates.CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` subclass instance
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. For example, transforming
an angular position defined at distance=0 through cartesian coordinates
and back will lose the original angular coordinates::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> rep = coord.SphericalRepresentation(
... lon=15*u.deg,
... lat=-11*u.deg,
... distance=0*u.pc)
>>> rep.to_cartesian().represent_as(coord.SphericalRepresentation)
<SphericalRepresentation (lon, lat, distance) in (rad, rad, pc)
(0., 0., 0.)>
Returns
-------
cartrepr : `~astropy.coordinates.CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def __eq__(self, value):
"""Equality operator.
This implements strict equality and requires that the representation
classes are identical and that the representation data are exactly equal.
"""
if self.__class__ is not value.__class__:
raise TypeError(
"cannot compare: objects must have same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
try:
np.broadcast(self, value)
except ValueError as exc:
raise ValueError(f"cannot compare: {exc}") from exc
out = True
for comp in self.components:
out &= getattr(self, "_" + comp) == getattr(value, "_" + comp)
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, "_" + component, apply_method(getattr(self, component)))
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
if value.__class__ is not self.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
for component in self.components:
getattr(self, "_" + component)[item] = getattr(value, "_" + component)
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items])
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return {cmpnt: getattr(self, cmpnt).unit for cmpnt in self.components}
@property
def _unitstr(self):
units_set = set(self._units.values())
if len(units_set) == 1:
unitstr = units_set.pop().to_string()
else:
unitstr = "({})".format(
", ".join(
self._units[component].to_string() for component in self.components
)
)
return unitstr
def __str__(self):
return f"{_array2string(self._values)} {self._unitstr:s}"
def __repr__(self):
prefixstr = " "
arrstr = _array2string(self._values, prefix=prefixstr)
diffstr = ""
if getattr(self, "differentials", None):
diffstr = "\n (has differentials w.r.t.: {})".format(
", ".join([repr(key) for key in self.differentials.keys()])
)
unitstr = ("in " + self._unitstr) if self._unitstr else "[dimensionless]"
return (
f"<{self.__class__.__name__} ({', '.join(self.components)})"
f" {unitstr:s}\n{prefixstr}{arrstr}{diffstr}>"
)
def _make_getter(component):
"""Make an attribute getter for use in a property.
Parameters
----------
component : str
The name of the component that should be accessed. This assumes the
actual value is stored in an attribute of that name prefixed by '_'.
"""
# This has to be done in a function to ensure the reference to component
# is not lost/redirected.
component = "_" + component
def get_component(self):
return getattr(self, component)
return get_component
class RepresentationInfo(BaseRepresentationOrDifferentialInfo):
@property
def _represent_as_dict_attrs(self):
attrs = super()._represent_as_dict_attrs
if self._parent._differentials:
attrs += ("differentials",)
return attrs
def _represent_as_dict(self, attrs=None):
out = super()._represent_as_dict(attrs)
for key, value in out.pop("differentials", {}).items():
out[f"differentials.{key}"] = value
return out
def _construct_from_dict(self, map):
differentials = {}
for key in list(map.keys()):
if key.startswith("differentials."):
differentials[key[14:]] = map.pop(key)
map["differentials"] = differentials
return super()._construct_from_dict(map)
class BaseRepresentation(BaseRepresentationOrDifferential):
"""Base for representing a point in a 3D coordinate system.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D points. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
subclass instance, or a dictionary with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All representation classes should subclass this base representation class,
and define an ``attr_classes`` attribute, a `dict`
which maps component names to the class that creates them. They must also
define a ``to_cartesian`` method and a ``from_cartesian`` class method. By
default, transformations are done via the cartesian system, but classes
that want to define a smarter transformation path can overload the
``represent_as`` method. If one wants to use an associated differential
class, one should also define ``unit_vectors`` and ``scale_factors``
methods (see those methods for details).
"""
info = RepresentationInfo()
def __init_subclass__(cls, **kwargs):
# Register representation name (except for BaseRepresentation)
if cls.__name__ == "BaseRepresentation":
return
if not hasattr(cls, "attr_classes"):
raise NotImplementedError(
'Representations must have an "attr_classes" class attribute.'
)
repr_name = cls.get_name()
# first time a duplicate is added
# remove first entry and add both using their qualnames
if repr_name in REPRESENTATION_CLASSES:
DUPLICATE_REPRESENTATIONS.add(repr_name)
fqn_cls = _fqn_class(cls)
existing = REPRESENTATION_CLASSES[repr_name]
fqn_existing = _fqn_class(existing)
if fqn_cls == fqn_existing:
raise ValueError(f'Representation "{fqn_cls}" already defined')
msg = (
f'Representation "{repr_name}" already defined, removing it to avoid'
f' confusion.Use qualnames "{fqn_cls}" and "{fqn_existing}" or class'
" instances directly"
)
warnings.warn(msg, DuplicateRepresentationWarning)
del REPRESENTATION_CLASSES[repr_name]
REPRESENTATION_CLASSES[fqn_existing] = existing
repr_name = fqn_cls
# further definitions with the same name, just add qualname
elif repr_name in DUPLICATE_REPRESENTATIONS:
fqn_cls = _fqn_class(cls)
warnings.warn(
f'Representation "{repr_name}" already defined, using qualname '
f'"{fqn_cls}".'
)
repr_name = fqn_cls
if repr_name in REPRESENTATION_CLASSES:
raise ValueError(f'Representation "{repr_name}" already defined')
REPRESENTATION_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# define getters for any component that does not yet have one.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
_make_getter(component),
doc=f"The '{component}' component of the points(s).",
),
)
super().__init_subclass__(**kwargs)
def __init__(self, *args, differentials=None, **kwargs):
# Handle any differentials passed in.
super().__init__(*args, **kwargs)
if differentials is None and args and isinstance(args[0], self.__class__):
differentials = args[0]._differentials
self._differentials = self._validate_differentials(differentials)
def _validate_differentials(self, differentials):
"""
Validate that the provided differentials are appropriate for this
representation and recast/reshape as necessary and then return.
Note that this does *not* set the differentials on
``self._differentials``, but rather leaves that for the caller.
"""
# Now handle the actual validation of any specified differential classes
if differentials is None:
differentials = dict()
elif isinstance(differentials, BaseDifferential):
# We can't handle auto-determining the key for this combo
if isinstance(differentials, RadialDifferential) and isinstance(
self, UnitSphericalRepresentation
):
raise ValueError(
"To attach a RadialDifferential to a UnitSphericalRepresentation,"
" you must supply a dictionary with an appropriate key."
)
key = differentials._get_deriv_key(self)
differentials = {key: differentials}
for key in differentials:
try:
diff = differentials[key]
except TypeError as err:
raise TypeError(
"'differentials' argument must be a dictionary-like object"
) from err
diff._check_base(self)
if isinstance(diff, RadialDifferential) and isinstance(
self, UnitSphericalRepresentation
):
# We trust the passing of a key for a RadialDifferential
# attached to a UnitSphericalRepresentation because it will not
# have a paired component name (UnitSphericalRepresentation has
# no .distance) to automatically determine the expected key
pass
else:
expected_key = diff._get_deriv_key(self)
if key != expected_key:
raise ValueError(
f"For differential object '{repr(diff)}', expected "
f"unit key = '{expected_key}' but received key = '{key}'"
)
# For now, we are very rigid: differentials must have the same shape
# as the representation. This makes it easier to handle __getitem__
# and any other shape-changing operations on representations that
# have associated differentials
if diff.shape != self.shape:
# TODO: message of IncompatibleShapeError is not customizable,
# so use a valueerror instead?
raise ValueError(
"Shape of differentials must be the same "
f"as the shape of the representation ({diff.shape} vs {self.shape})"
)
return differentials
def _raise_if_has_differentials(self, op_name):
"""
Used to raise a consistent exception for any operation that is not
supported when a representation has differentials attached.
"""
if self.differentials:
raise TypeError(
f"Operation '{op_name}' is not supported when "
f"differentials are attached to a {self.__class__.__name__}."
)
@classproperty
def _compatible_differentials(cls):
return [DIFFERENTIAL_CLASSES[cls.get_name()]]
@property
def differentials(self):
"""A dictionary of differential class instances.
The keys of this dictionary must be a string representation of the SI
unit with which the differential (derivative) is taken. For example, for
a velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
"""
return self._differentials
# We do not make unit_vectors and scale_factors abstract methods, since
# they are only necessary if one also defines an associated Differential.
# Also, doing so would break pre-differential representation subclasses.
def unit_vectors(self):
r"""Cartesian unit vectors in the direction of each component.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented unit vectors")
def scale_factors(self):
r"""Scale factors for each component's direction.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
scale_factors : dict of `~astropy.units.Quantity`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented scale factors.")
def _re_represent_differentials(self, new_rep, differential_class):
"""Re-represent the differentials to the specified classes.
This returns a new dictionary with the same keys but with the
attached differentials converted to the new differential classes.
"""
if differential_class is None:
return dict()
if not self.differentials and differential_class:
raise ValueError("No differentials associated with this representation!")
elif (
len(self.differentials) == 1
and inspect.isclass(differential_class)
and issubclass(differential_class, BaseDifferential)
):
# TODO: is there a better way to do this?
differential_class = {
list(self.differentials.keys())[0]: differential_class
}
elif differential_class.keys() != self.differentials.keys():
raise ValueError(
"Desired differential classes must be passed in as a dictionary with"
" keys equal to a string representation of the unit of the derivative"
" for each differential stored with this "
f"representation object ({self.differentials})"
)
new_diffs = dict()
for k in self.differentials:
diff = self.differentials[k]
try:
new_diffs[k] = diff.represent_as(differential_class[k], base=self)
except Exception as err:
if differential_class[k] not in new_rep._compatible_differentials:
raise TypeError(
f"Desired differential class {differential_class[k]} is not "
"compatible with the desired "
f"representation class {new_rep.__class__}"
) from err
else:
raise
return new_diffs
def represent_as(self, other_class, differential_class=None):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via Cartesian coordinates.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. See the docstring for
:meth:`~astropy.coordinates.BaseRepresentationOrDifferential.to_cartesian`
for an example.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional
Classes in which the differentials should be represented.
Can be a single class if only a single differential is attached,
otherwise it should be a `dict` keyed by the same keys as the
differentials.
"""
if other_class is self.__class__ and not differential_class:
return self.without_differentials()
else:
if isinstance(other_class, str):
raise ValueError(
"Input to a representation's represent_as must be a class, not "
"a string. For strings, use frame objects."
)
if other_class is not self.__class__:
# The default is to convert via cartesian coordinates
new_rep = other_class.from_cartesian(self.to_cartesian())
else:
new_rep = self
new_rep._differentials = self._re_represent_differentials(
new_rep, differential_class
)
return new_rep
def transform(self, matrix):
"""Transform coordinates using a 3x3 matrix in a Cartesian basis.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
"""
# route transformation through Cartesian
difs_cls = {k: CartesianDifferential for k in self.differentials.keys()}
crep = self.represent_as(
CartesianRepresentation, differential_class=difs_cls
).transform(matrix)
# move back to original representation
difs_cls = {k: diff.__class__ for k, diff in self.differentials.items()}
rep = crep.represent_as(self.__class__, difs_cls)
return rep
def with_differentials(self, differentials):
"""
Create a new representation with the same positions as this
representation, but with these new differentials.
Differential keys that already exist in this object's differential dict
are overwritten.
Parameters
----------
differentials : sequence of `~astropy.coordinates.BaseDifferential` subclass instance
The differentials for the new representation to have.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A copy of this representation, but with the ``differentials`` as
its differentials.
"""
if not differentials:
return self
args = [getattr(self, component) for component in self.components]
# We shallow copy the differentials dictionary so we don't update the
# current object's dictionary when adding new keys
new_rep = self.__class__(
*args, differentials=self.differentials.copy(), copy=False
)
new_rep._differentials.update(new_rep._validate_differentials(differentials))
return new_rep
def without_differentials(self):
"""Return a copy of the representation without attached differentials.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A shallow copy of this representation, without any differentials.
If no differentials were present, no copy is made.
"""
if not self._differentials:
return self
args = [getattr(self, component) for component in self.components]
return self.__class__(*args, copy=False)
@classmethod
def from_representation(cls, representation):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
"""
return representation.represent_as(cls)
def __eq__(self, value):
"""Equality operator for BaseRepresentation.
This implements strict equality and requires that the representation
classes are identical, the differentials are identical, and that the
representation data are exactly equal.
"""
# BaseRepresentationOrDifferental (checks classes and compares components)
out = super().__eq__(value)
# super() checks that the class is identical so can this even happen?
# (same class, different differentials ?)
if self._differentials.keys() != value._differentials.keys():
raise ValueError("cannot compare: objects must have same differentials")
for self_diff, value_diff in zip(
self._differentials.values(), value._differentials.values()
):
out &= self_diff == value_diff
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation with ``method`` applied to the component
data.
This is not a simple inherit from ``BaseRepresentationOrDifferential``
because we need to call ``._apply()`` on any associated differential
classes.
See docstring for `BaseRepresentationOrDifferential._apply`.
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
rep = super()._apply(method, *args, **kwargs)
rep._differentials = {
k: diff._apply(method, *args, **kwargs)
for k, diff in self._differentials.items()
}
return rep
def __setitem__(self, item, value):
if not isinstance(value, BaseRepresentation):
raise TypeError(
f"value must be a representation instance, not {type(value)}."
)
if not (
isinstance(value, self.__class__)
or len(value.attr_classes) == len(self.attr_classes)
):
raise ValueError(
f"value must be representable as {self.__class__.__name__} "
"without loss of information."
)
diff_classes = {}
if self._differentials:
if self._differentials.keys() != value._differentials.keys():
raise ValueError("value must have the same differentials.")
for key, self_diff in self._differentials.items():
diff_classes[key] = self_diff_cls = self_diff.__class__
value_diff_cls = value._differentials[key].__class__
if not (
isinstance(value_diff_cls, self_diff_cls)
or (
len(value_diff_cls.attr_classes)
== len(self_diff_cls.attr_classes)
)
):
raise ValueError(
f"value differential {key!r} must be representable as "
f"{self_diff.__class__.__name__} without loss of information."
)
value = value.represent_as(self.__class__, diff_classes)
super().__setitem__(item, value)
for key, differential in self._differentials.items():
differential[item] = value._differentials[key]
def _scale_operation(self, op, *args):
"""Scale all non-angular components, leaving angular ones unchanged.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
results = []
for component, cls in self.attr_classes.items():
value = getattr(self, component)
if issubclass(cls, Angle):
results.append(value)
else:
results.append(op(value, *args))
# try/except catches anything that cannot initialize the class, such
# as operations that returned NotImplemented or a representation
# instead of a quantity (as would happen for, e.g., rep * rep).
try:
result = self.__class__(*results)
except Exception:
return NotImplemented
for key, differential in self.differentials.items():
diff_result = differential._scale_operation(op, *args, scaled_base=True)
result.differentials[key] = diff_result
return result
def _combine_operation(self, op, other, reverse=False):
"""Combine two representation.
By default, operate on the cartesian representations of both.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self.from_cartesian(result)
# We need to override this setter to support differentials
@BaseRepresentationOrDifferential.shape.setter
def shape(self, shape):
orig_shape = self.shape
# See: https://stackoverflow.com/questions/3336767/ for an example
BaseRepresentationOrDifferential.shape.fset(self, shape)
# also try to perform shape-setting on any associated differentials
try:
for k in self.differentials:
self.differentials[k].shape = shape
except Exception:
BaseRepresentationOrDifferential.shape.fset(self, orig_shape)
for k in self.differentials:
self.differentials[k].shape = orig_shape
raise
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.sqrt(
sum(
getattr(self, component) ** 2
for component, cls in self.attr_classes.items()
if not issubclass(cls, Angle)
)
)
def mean(self, *args, **kwargs):
"""Vector mean.
Averaging is done by converting the representation to cartesian, and
taking the mean of the x, y, and z components. The result is converted
back to the same representation as the input.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
mean : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector mean, in the same representation as that of the input.
"""
self._raise_if_has_differentials("mean")
return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
Adding is done by converting the representation to cartesian, and
summing the x, y, and z components. The result is converted back to the
same representation as the input.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
sum : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector sum, in the same representation as that of the input.
"""
self._raise_if_has_differentials("sum")
return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs))
def dot(self, other):
"""Dot product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation`
The representation to take the dot product with.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of the
cartesian representations of ``self`` and ``other``.
"""
return self.to_cartesian().dot(other)
def cross(self, other):
"""Vector cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to the type of representation of ``self``.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.BaseRepresentation` subclass instance
With vectors perpendicular to both ``self`` and ``other``, in the
same type of representation as ``self``.
"""
self._raise_if_has_differentials("cross")
return self.from_cartesian(self.to_cartesian().cross(other))
class CartesianRepresentation(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : unit-like
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `~astropy.coordinates.CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.CartesianDifferential` instance, or a dictionary of
`~astropy.coordinates.CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"x": u.Quantity, "y": u.Quantity, "z": u.Quantity}
_xyz = None
def __init__(
self, x, y=None, z=None, unit=None, xyz_axis=None, differentials=None, copy=True
):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
elif (
isinstance(x, CartesianRepresentation)
and unit is None
and xyz_axis is None
):
if differentials is None:
differentials = x._differentials
return super().__init__(x, differentials=differentials, copy=copy)
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if x, y, and z are in a single array"
" passed in through x, i.e., y and z should not be not given."
)
if y is None or z is None:
raise ValueError(
f"x, y, and z are required to instantiate {self.__class__.__name__}"
)
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (
self._x.unit.is_equivalent(self._y.unit)
and self._x.unit.is_equivalent(self._z.unit)
):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
o = np.broadcast_to(0.0 * u.one, self.shape, subok=True)
return {
"x": CartesianRepresentation(l, o, o, copy=False),
"y": CartesianRepresentation(o, l, o, copy=False),
"z": CartesianRepresentation(o, o, l, copy=False),
}
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"x": l, "y": l, "z": l}
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._x, self._y, self._z], axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : ndarray
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates.matrix_utilities import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1))
# transformed representation
rep = self.__class__(p, xyz_axis=-1, copy=False)
# Handle differentials attached to this representation
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = (self, other_c) if not reverse else (other_c, self)
return self.__class__(
*(
op(getattr(first, component), getattr(second, component))
for component in first.components
)
)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# erfa pm: Modulus of p-vector.
return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._apply("mean", *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._apply("sum", *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"can only take dot product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pdp: p-vector inner (=scalar=dot) product.
return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"cannot only take cross product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pxp: p-vector outer (=vector=cross) product.
sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
return self.__class__(sxo, xyz_axis=-1)
class UnitSphericalRepresentation(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle'] or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude}
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat=None, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=False),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=False
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1.0 / u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return {"lon": sf_lon, "lat": sf_lat}
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
p = erfa_ufunc.s2c(self.lon, self.lat)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa c2s: P-vector to [unit]spherical coordinates.
return cls(*erfa_ufunc.c2s(p), copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO! for differential_class. This cannot (currently) be implemented
# like in the other Representations since `_re_represent_differentials`
# keeps differentials' unit keys, but this can result in a mismatch
# between the UnitSpherical expected key (e.g. "s") and that expected
# in the other class (here "s / m"). For more info, see PR #11467
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(
phi=self.lon, theta=90 * u.deg - self.lat, r=1.0, copy=False
)
elif issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, distance=1.0, copy=False)
return super().represent_as(other_class, differential_class)
def transform(self, matrix):
r"""Transform the unit-spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
Returns
-------
`~astropy.coordinates.UnitSphericalRepresentation` or `~astropy.coordinates.SphericalRepresentation`
If ``matrix`` is O(3) -- :math:`M \dot M^T = I` -- like a rotation,
then the result is a `~astropy.coordinates.UnitSphericalRepresentation`.
All other matrices will change the distance, so the dimensional
representation is used instead.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat = erfa_ufunc.c2s(p)
rep = self.__class__(lon=lon, lat=lat)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
rep = rep.with_differentials(new_diffs)
else: # switch to dimensional representation
rep = self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1, differentials=self.differentials
).transform(matrix)
return rep
def _scale_operation(self, op, *args):
return self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1.0, differentials=self.differentials
)._scale_operation(op, *args)
def __neg__(self):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super().__neg__()
result = self.__class__(self.lon + 180.0 * u.deg, -self.lat, copy=False)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, operator.neg), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs)
)
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs)
)
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other)
)
class RadialRepresentation(BaseRepresentation):
"""
Representation of the distance of points from the origin.
Note that this is mostly intended as an internal helper representation.
It can do little else but being used as a scale in multiplication.
Parameters
----------
distance : `~astropy.units.Quantity` ['length']
The distance of the point(s) from the origin.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"distance": u.Quantity}
def __init__(self, distance, differentials=None, copy=True):
super().__init__(distance, differentials=differentials, copy=copy)
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
"""Cartesian unit vectors are undefined for radial representation."""
raise NotImplementedError(
f"Cartesian unit vectors are undefined for {self.__class__} instances"
)
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"distance": l}
def to_cartesian(self):
"""Cannot convert radial representation to cartesian."""
raise NotImplementedError(
f"cannot convert {self.__class__} instance to cartesian."
)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to radial coordinate.
"""
return cls(distance=cart.norm(), copy=False)
def __mul__(self, other):
if isinstance(other, BaseRepresentation):
return self.distance * other
else:
return super().__mul__(other)
def norm(self):
"""Vector norm.
Just the distance itself.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return self.distance
def _combine_operation(self, op, other, reverse=False):
return NotImplemented
def transform(self, matrix):
"""Radial representations cannot be transformed by a Cartesian matrix.
Parameters
----------
matrix : array-like
The transformation matrix in a Cartesian basis.
Must be a multiplication: a diagonal matrix with identical elements.
Must have shape (..., 3, 3), where the last 2 indices are for the
matrix on each other axis. Make sure that the matrix shape is
compatible with the shape of this representation.
Raises
------
ValueError
If the matrix is not a multiplication.
"""
scl = matrix[..., 0, 0]
# check that the matrix is a scaled identity matrix on the last 2 axes.
if np.any(matrix != scl[..., np.newaxis, np.newaxis] * np.identity(3)):
raise ValueError(
"Radial representations can only be "
"transformed by a scaled identity matrix"
)
return self * scl
def _spherical_op_funcs(op, *args):
"""For given operator, return functions that adjust lon, lat, distance."""
if op is operator.neg:
return lambda x: x + 180 * u.deg, operator.neg, operator.pos
try:
scale_sign = np.sign(args[0])
except Exception:
# This should always work, even if perhaps we get a negative distance.
return operator.pos, operator.pos, lambda x: op(x, *args)
scale = abs(args[0])
return (
lambda x: x + 180 * u.deg * np.signbit(scale_sign),
lambda x: x * scale_sign,
lambda x: op(x, scale),
)
class SphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle']
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity` ['length']
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude, "distance": u.Quantity}
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat=None, distance=None, differentials=None, copy=True):
super().__init__(lon, lat, distance, copy=copy, differentials=differentials)
if (
not isinstance(self._distance, Distance)
and self._distance.unit.physical_type == "length"
):
try:
self._distance = Distance(self._distance, copy=False)
except ValueError as e:
if e.args[0].startswith("distance must be >= 0"):
raise ValueError(
"Distance must be >= 0. To allow negative distance values, you"
" must explicitly pass in a `Distance` object with the the "
"argument 'allow_negative=True'."
) from e
else:
raise
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=False),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=False
),
"distance": CartesianRepresentation(
coslat * coslon, coslat * sinlon, sinlat, copy=False
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"lon": sf_lon, "lat": sf_lat, "distance": sf_distance}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if inspect.isclass(other_class):
if issubclass(other_class, PhysicsSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
phi=self.lon,
theta=90 * u.deg - self.lat,
r=self.distance,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.lon, lat=self.lat, differentials=diffs, copy=False
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
# erfa s2p: Convert spherical polar coordinates to p-vector.
p = erfa_ufunc.s2p(self.lon, self.lat, d)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa p2s: P-vector to spherical polar coordinates.
return cls(*erfa_ufunc.p2s(p), copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p)
rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
def _scale_operation(self, op, *args):
# TODO: expand special-casing to UnitSpherical and RadialDifferential.
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
lon_op, lat_op, distance_op = _spherical_op_funcs(op, *args)
result = self.__class__(
lon_op(self.lon), lat_op(self.lat), distance_op(self.distance), copy=False
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, lat_op, distance_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class PhysicsSphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"phi": Angle, "theta": Angle, "r": u.Quantity}
def __init__(self, phi, theta=None, r=None, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
# Note that _phi already holds our own copy if copy=True.
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.0 * u.deg) or np.any(self._theta > 180.0 * u.deg):
raise ValueError(
"Inclination angle(s) must be within 0 deg <= angle <= 180 deg, "
f"got {theta.to(u.degree)}"
)
if self._r.unit.physical_type == "length":
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
"phi": CartesianRepresentation(-sinphi, cosphi, 0.0, copy=False),
"theta": CartesianRepresentation(
costheta * cosphi, costheta * sinphi, -sintheta, copy=False
),
"r": CartesianRepresentation(
sintheta * cosphi, sintheta * sinphi, costheta, copy=False
),
}
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"phi": r * sintheta, "theta": r, "r": l}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if inspect.isclass(other_class):
if issubclass(other_class, SphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
distance=self.r,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
differentials=diffs,
copy=False,
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
# apply transformation in unit-spherical coordinates
xyz = erfa_ufunc.s2c(self.phi, 90 * u.deg - self.theta)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p) # `ur` is transformed unit-`r`
# create transformed physics-spherical representation,
# reapplying the distance scaling
rep = self.__class__(phi=lon, theta=90 * u.deg - lat, r=self.r * ur)
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, adjust_theta_sign, r_op = _spherical_op_funcs(op, *args)
# Also run phi_op on theta to ensure theta remains between 0 and 180:
# any time the scale is negative, we do -theta + 180 degrees.
result = self.__class__(
phi_op(self.phi),
phi_op(adjust_theta_sign(self.theta)),
r_op(self.r),
copy=False,
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, adjust_theta_sign, r_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class CylindricalRepresentation(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `~astropy.coordinates.CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"rho": u.Quantity, "phi": Angle, "z": u.Quantity}
def __init__(self, rho, phi=None, z=None, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1.0, self.shape)
return {
"rho": CartesianRepresentation(cosphi, sinphi, 0, copy=False),
"phi": CartesianRepresentation(-sinphi, cosphi, 0, copy=False),
"z": CartesianRepresentation(0, 0, l, unit=u.one, copy=False),
}
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"rho": l, "phi": rho, "z": l}
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, _, rho_op = _spherical_op_funcs(op, *args)
z_op = lambda x: op(x, *args)
result = self.__class__(
rho_op(self.rho), phi_op(self.phi), z_op(self.z), copy=False
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(rho_op, operator.pos, z_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class BaseDifferential(BaseRepresentationOrDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
def __init_subclass__(cls, **kwargs):
"""Set default ``attr_classes`` and component getters on a Differential.
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
# Don't do anything for base helper classes.
if cls.__name__ in (
"BaseDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
):
return
if not hasattr(cls, "base_representation"):
raise NotImplementedError(
"Differential representations must have a"
'"base_representation" class attribute.'
)
# If not defined explicitly, create attr_classes.
if not hasattr(cls, "attr_classes"):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = {"d_" + c: u.Quantity for c in base_attr_classes}
repr_name = cls.get_name()
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError(f"Differential class {repr_name} already defined")
DIFFERENTIAL_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
_make_getter(component),
doc=f"Component '{component}' of the Differential.",
),
)
super().__init_subclass__(**kwargs)
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError(
f"Differential class {cls} is not compatible with the "
f"base (representation) class {base.__class__}"
)
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, f"d_{name}", None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# This is quite a bit faster than using to_system() or going
# through Quantity()
d_unit_si = d_unit.decompose(u.si.bases)
d_unit_si._scale = 1 # remove the scale from the unit
return str(d_unit_si)
else:
raise RuntimeError(
"Invalid representation-differential units! This likely happened "
"because either the representation or the associated differential "
"have non-standard units. Check that the input positional data have "
"positional units, and the input velocity data have velocity units, "
"or are both dimensionless."
)
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
`~astropy.coordinates.CartesianDifferential`
This object, converted.
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add,
(
getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)
),
)
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other
The object to convert into this differential.
base : `~astropy.coordinates.BaseRepresentation`
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Will be converted to ``cls.base_representation`` if needed.
Returns
-------
`~astropy.coordinates.BaseDifferential` subclass instance
A new differential object that is this class' type.
"""
base = base.represent_as(cls.base_representation)
base_e, base_sf = cls._get_base_vectors(base)
return cls(
*(other.dot(e / base_sf[component]) for component, e in base_e.items()),
copy=False,
)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation)
)
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# route transformation through Cartesian
cdiff = self.represent_as(CartesianDifferential, base=base).transform(matrix)
# move back to original representation
diff = cdiff.represent_as(self.__class__, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
scaled_base : bool, optional
Whether the base was scaled the same way. This affects whether
differential components should be scaled. For instance, a differential
in longitude should not be scaled if its spherical base is scaled
in radius.
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(
*[op(getattr(first, c), getattr(second, c)) for c in self.components]
)
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but Cartesian differentials or radial differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# RadialDifferential overrides this function, so there is no handling here
if not isinstance(self, CartesianDifferential) and base is None:
raise ValueError(
"`base` must be provided to calculate the norm of a"
f" {type(self).__name__}"
)
return self.to_cartesian(base).norm()
class CartesianDifferential(BaseDifferential):
"""Differentials in of points in 3D cartesian coordinates.
Parameters
----------
d_x, d_y, d_z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``,
and ``d_z`` have different shapes, they should be broadcastable. If not
quantities, ``unit`` should be set. If only ``d_x`` is given, it is
assumed that it contains an array with the 3 coordinates stored along
``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the differentials will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CartesianRepresentation
_d_xyz = None
def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None, copy=True):
if d_y is None and d_z is None:
if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._d_xyz = d_x
if xyz_axis:
d_x = np.moveaxis(d_x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._d_x, self._d_y, self._d_z = d_x
return
else:
d_x, d_y, d_z = d_x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if d_x, d_y, and d_z are in a single array"
" passed in through d_x, i.e., d_y and d_z should not be not given."
)
if d_y is None or d_z is None:
raise ValueError(
"d_x, d_y, and d_z are required to instantiate"
f" {self.__class__.__name__}"
)
if unit is not None:
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
d_y = u.Quantity(d_y, unit, copy=copy, subok=True)
d_z = u.Quantity(d_z, unit, copy=copy, subok=True)
copy = False
super().__init__(d_x, d_y, d_z, copy=copy)
if not (
self._d_x.unit.is_equivalent(self._d_y.unit)
and self._d_x.unit.is_equivalent(self._d_z.unit)
):
raise u.UnitsError("d_x, d_y and d_z should have equivalent units.")
def to_cartesian(self, base=None):
return CartesianRepresentation(*[getattr(self, c) for c in self.components])
@classmethod
def from_cartesian(cls, other, base=None):
return cls(*[getattr(other, c) for c in other.components])
def transform(self, matrix, base=None, transformed_base=None):
"""Transform differentials using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base, transformed_base : `~astropy.coordinates.CartesianRepresentation` or None, optional
Not used in the Cartesian transformation.
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_d_xyz(xyz_axis=-1))
return self.__class__(p, xyz_axis=-1, copy=False)
def get_d_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
d_xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._d_xyz is not None:
if self._xyz_axis == xyz_axis:
return self._d_xyz
else:
return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _d_xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._d_x, self._d_y, self._d_z], axis=xyz_axis)
d_xyz = property(get_d_xyz)
class BaseSphericalDifferential(BaseDifferential):
def _d_lon_coslat(self, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon * np.cos(base.lat)
@classmethod
def _get_d_lon(cls, d_lon_coslat, base):
"""Convert longitude differential d_lon_coslat to d_lon.
Parameters
----------
d_lon_coslat : `~astropy.units.Quantity`
Longitude differential that includes ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon_coslat / np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalDifferential)
and not isinstance(self, type(other))
or isinstance(other, RadialDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
@classproperty
def _dimensional_differential(cls):
return SphericalDifferential
def __init__(self, d_lon, d_lat=None, copy=True):
super().__init__(d_lon, d_lat, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon and d_lat should have equivalent units.")
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, representation.d_lat)
elif isinstance(
representation,
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential),
):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
# some of this can be moved to the parent class.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lon.unit / base.lon.unit # derivative unit
diff = self._dimensional_differential(
d_lon=self.d_lon, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The differential longitude and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalDifferential
def __init__(self, d_lon, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon, d_lat, d_distance, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_lon, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self.d_lon, -self.d_lat, self.d_distance)
else:
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(
representation.d_phi, -representation.d_theta, representation.d_r
)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_lon, self.d_lat, op(self.d_distance, *args))
else:
return super()._scale_operation(op, *args)
class BaseSphericalCosLatDifferential(BaseDifferential):
"""Differentials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalCosLatDifferential)
and not isinstance(self, type(other))
or isinstance(other, RadialDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
attr_classes = {"d_lon_coslat": u.Quantity, "d_lat": u.Quantity}
@classproperty
def _dimensional_differential(cls):
return SphericalCosLatDifferential
def __init__(self, d_lon_coslat, d_lat=None, copy=True):
super().__init__(d_lon_coslat, d_lat, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though w/o CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
return cls(representation.d_lon_coslat, representation.d_lat)
elif isinstance(
representation, (SphericalDifferential, UnitSphericalDifferential)
):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lat.unit / base.lat.unit # derivative unit
diff = self._dimensional_differential(
d_lon_coslat=self.d_lon_coslat, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = {
"d_lon_coslat": u.Quantity,
"d_lat": u.Quantity,
"d_distance": u.Quantity,
}
def __init__(self, d_lon_coslat, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta, representation.d_r)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(
self.d_lon_coslat, self.d_lat, op(self.d_distance, *args)
)
else:
return super()._scale_operation(op, *args)
class RadialDifferential(BaseDifferential):
"""Differential(s) of radial distances.
Parameters
----------
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = RadialRepresentation
def to_cartesian(self, base):
unit_vec = base.represent_as(UnitSphericalRepresentation).to_cartesian()
return self.d_distance * unit_vec
def norm(self, base=None):
return self.d_distance
@classmethod
def from_cartesian(cls, other, base):
return cls(
other.dot(base.represent_as(UnitSphericalRepresentation)), copy=False
)
@classmethod
def from_representation(cls, representation, base=None):
if isinstance(
representation, (SphericalDifferential, SphericalCosLatDifferential)
):
return cls(representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_r)
else:
return super().from_representation(representation, base)
def _combine_operation(self, op, other, reverse=False):
if isinstance(other, self.base_representation):
if reverse:
first, second = other.distance, self.d_distance
else:
first, second = self.d_distance, other.distance
return other.__class__(op(first, second), copy=False)
elif isinstance(
other, (BaseSphericalDifferential, BaseSphericalCosLatDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalDifferential(**result_args)
else:
return super()._combine_operation(op, other, reverse)
class PhysicsSphericalDifferential(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta=None, d_r=None, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError("d_phi and d_theta should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(
representation.d_lon, -representation.d_lat, representation.d_distance
)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_phi, self.d_theta, op(self.d_r, *args))
else:
return super()._scale_operation(op, *args)
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
Parameters
----------
d_rho : `~astropy.units.Quantity` ['speed']
The differential cylindrical radius.
d_phi : `~astropy.units.Quantity` ['angular speed']
The differential azimuth.
d_z : `~astropy.units.Quantity` ['speed']
The differential height.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CylindricalRepresentation
def __init__(self, d_rho, d_phi=None, d_z=None, copy=False):
super().__init__(d_rho, d_phi, d_z, copy=copy)
if not self._d_rho.unit.is_equivalent(self._d_z.unit):
raise u.UnitsError("d_rho and d_z should have equivalent units.")
|
86453ef42493ce11e189012ccb3fbf4e07da027200126720fa7c4a3e2bbb500d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for getting a coordinate object
for a named object by querying SESAME and getting the first returned result.
Note that this is intended to be a convenience, and is very simple. If you
need precise coordinates for an object you should find the appropriate
reference for that measurement and input the coordinates manually.
"""
# Standard library
import os
import re
import socket
import urllib.error
import urllib.parse
import urllib.request
# Astropy
from astropy import units as u
from astropy.utils import data
from astropy.utils.data import download_file, get_file_contents
from astropy.utils.state import ScienceState
from .sky_coordinate import SkyCoord
__all__ = ["get_icrs_coordinates"]
class sesame_url(ScienceState):
"""
The URL(s) to Sesame's web-queryable database.
"""
_value = [
"http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/",
"http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/",
]
@classmethod
def validate(cls, value):
# TODO: Implement me
return value
class sesame_database(ScienceState):
"""
This specifies the default database that SESAME will query when
using the name resolve mechanism in the coordinates
subpackage. Default is to search all databases, but this can be
'all', 'simbad', 'ned', or 'vizier'.
"""
_value = "all"
@classmethod
def validate(cls, value):
if value not in ["all", "simbad", "ned", "vizier"]:
raise ValueError(f"Unknown database '{value}'")
return value
class NameResolveError(Exception):
pass
def _parse_response(resp_data):
"""
Given a string response from SESAME, parse out the coordinates by looking
for a line starting with a J, meaning ICRS J2000 coordinates.
Parameters
----------
resp_data : str
The string HTTP response from SESAME.
Returns
-------
ra : str
The string Right Ascension parsed from the HTTP response.
dec : str
The string Declination parsed from the HTTP response.
"""
pattr = re.compile(r"%J\s*([0-9\.]+)\s*([\+\-\.0-9]+)")
matched = pattr.search(resp_data)
if matched is None:
return None, None
else:
ra, dec = matched.groups()
return ra, dec
def get_icrs_coordinates(name, parse=False, cache=False):
"""
Retrieve an ICRS object by using an online name resolving service to
retrieve coordinates for the specified name. By default, this will
search all available databases until a match is found. If you would like
to specify the database, use the science state
``astropy.coordinates.name_resolve.sesame_database``. You can also
specify a list of servers to use for querying Sesame using the science
state ``astropy.coordinates.name_resolve.sesame_url``. This will try
each one in order until a valid response is returned. By default, this
list includes the main Sesame host and a mirror at vizier. The
configuration item `astropy.utils.data.Conf.remote_timeout` controls the
number of seconds to wait for a response from the server before giving
up.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names eg:
'CRTS SSS100805 J194428-420209', this may be much faster than a
sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, str, optional
Determines whether to cache the results or not. Passed through to
`~astropy.utils.data.download_file`, so pass "update" to update the
cached value.
Returns
-------
coord : `astropy.coordinates.ICRS` object
The object's coordinates in the ICRS frame.
"""
# if requested, first try extract coordinates embedded in the object name.
# Do this first since it may be much faster than doing the sesame query
if parse:
from . import jparser
if jparser.search(name):
return jparser.to_skycoord(name)
else:
# if the parser failed, fall back to sesame query.
pass
# maybe emit a warning instead of silently falling back to sesame?
database = sesame_database.get()
# The web API just takes the first letter of the database name
db = database.upper()[0]
# Make sure we don't have duplicates in the url list
urls = []
domains = []
for url in sesame_url.get():
domain = urllib.parse.urlparse(url).netloc
# Check for duplicates
if domain not in domains:
domains.append(domain)
# Add the query to the end of the url, add to url list
fmt_url = os.path.join(url, "{db}?{name}")
fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db)
urls.append(fmt_url)
exceptions = []
for url in urls:
try:
resp_data = get_file_contents(
download_file(url, cache=cache, show_progress=False)
)
break
except urllib.error.URLError as e:
exceptions.append(e)
continue
except socket.timeout as e:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
e.reason = (
"Request took longer than the allowed "
f"{data.conf.remote_timeout:.1f} seconds"
)
exceptions.append(e)
continue
# All Sesame URL's failed...
else:
messages = [f"{url}: {e.reason}" for url, e in zip(urls, exceptions)]
raise NameResolveError(
"All Sesame queries failed. Unable to retrieve coordinates. See errors per"
f" URL below: \n {os.linesep.join(messages)}"
)
ra, dec = _parse_response(resp_data)
if ra is None or dec is None:
if db == "A":
err = f"Unable to find coordinates for name '{name}' using {url}"
else:
err = (
f"Unable to find coordinates for name '{name}' in database"
f" {database} using {url}"
)
raise NameResolveError(err)
# Return SkyCoord object
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame="icrs")
return sc
|
4f9414817a533a9b28a08efe77859956092b5a0779197d8e4523b8a86342562b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains utility functions for working with angles. These are both
used internally in astropy.coordinates.angles, and of possible.
"""
__all__ = [
"angular_separation",
"position_angle",
"offset_by",
"golden_spiral_grid",
"uniform_spherical_random_surface",
"uniform_spherical_random_volume",
]
# Third-party
import numpy as np
# Astropy
import astropy.units as u
from astropy.coordinates.representation import (
SphericalRepresentation,
UnitSphericalRepresentation,
)
_TWOPI = 2 * np.pi
def angular_separation(lon1, lat1, lon2, lat2):
"""
Angular separation between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
angular separation : `~astropy.units.Quantity` ['angle'] or float
Type depends on input; ``Quantity`` in angular units, or float in
radians.
Notes
-----
The angular separation is calculated using the Vincenty formula [1]_,
which is slightly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.arctan2(np.hypot(num1, num2), denominator)
def position_angle(lon1, lat1, lon2, lat2):
"""
Position Angle (East of North) between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from position 1 to
position 2. If any of the angles are arrays, this will contain an array
following the appropriate `numpy` broadcasting rules.
"""
from .angles import Angle
deltalon = lon2 - lon1
colat = np.cos(lat2)
x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon)
y = np.sin(deltalon) * colat
return Angle(np.arctan2(y, x), u.radian).wrap_at(360 * u.deg)
def offset_by(lon, lat, posang, distance):
"""
Point with the given offset from the given point.
Parameters
----------
lon, lat, posang, distance : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the starting point,
position angle and distance to the final point.
Quantities should be in angular units; floats in radians.
Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon.
Returns
-------
lon, lat : `~astropy.coordinates.Angle`
The position of the final point. If any of the angles are arrays,
these will contain arrays following the appropriate `numpy` broadcasting rules.
0 <= lon < 2pi.
"""
from .angles import Angle
# Calculations are done using the spherical trigonometry sine and cosine rules
# of the triangle A at North Pole, B at starting point, C at final point
# with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang)
# with sides a (distance), b (final co-latitude), c (starting colatitude)
# B, a, c are knowns; A and b are unknowns
# https://en.wikipedia.org/wiki/Spherical_trigonometry
cos_a = np.cos(distance)
sin_a = np.sin(distance)
cos_c = np.sin(lat)
sin_c = np.cos(lat)
cos_B = np.cos(posang)
sin_B = np.sin(posang)
# cosine rule: Know two sides: a,c and included angle: B; get unknown side b
cos_b = cos_c * cos_a + sin_c * sin_a * cos_B
# sin_b = np.sqrt(1 - cos_b**2)
# sine rule and cosine rule for A (using both lets arctan2 pick quadrant).
# multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors
# at poles. Correct for the x=0 multiplication a few lines down.
# sin_A/sin_a == sin_B/sin_b # Sine rule
xsin_A = sin_a * sin_B * sin_c
# cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule
xcos_A = cos_a - cos_b * cos_c
A = Angle(np.arctan2(xsin_A, xcos_A), u.radian)
# Treat the poles as if they are infinitesimally far from pole but at given lon
small_sin_c = sin_c < 1e-12
if small_sin_c.any():
# For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang
A_pole = (90 * u.deg + cos_c * (90 * u.deg - Angle(posang, u.radian))).to(u.rad)
if A.shape:
# broadcast to ensure the shape is like that of A, which is also
# affected by the (possible) shapes of lat, posang, and distance.
small_sin_c = np.broadcast_to(small_sin_c, A.shape)
A[small_sin_c] = A_pole[small_sin_c]
else:
A = A_pole
outlon = (Angle(lon, u.radian) + A).wrap_at(360.0 * u.deg).to(u.deg)
outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg)
return outlon, outlat
def golden_spiral_grid(size):
"""Generate a grid of points on the surface of the unit sphere using the
Fibonacci or Golden Spiral method.
.. seealso::
`Evenly distributing points on a sphere <https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere>`_
Parameters
----------
size : int
The number of points to generate.
Returns
-------
rep : `~astropy.coordinates.UnitSphericalRepresentation`
The grid of points.
"""
golden_r = (1 + 5**0.5) / 2
grid = np.arange(0, size, dtype=float) + 0.5
lon = _TWOPI / golden_r * grid * u.rad
lat = np.arcsin(1 - 2 * grid / size) * u.rad
return UnitSphericalRepresentation(lon, lat)
def uniform_spherical_random_surface(size=1):
"""Generate a random sampling of points on the surface of the unit sphere.
Parameters
----------
size : int
The number of points to generate.
Returns
-------
rep : `~astropy.coordinates.UnitSphericalRepresentation`
The random points.
"""
rng = np.random # can maybe switch to this being an input later - see #11628
lon = rng.uniform(0, _TWOPI, size) * u.rad
lat = np.arcsin(rng.uniform(-1, 1, size=size)) * u.rad
return UnitSphericalRepresentation(lon, lat)
def uniform_spherical_random_volume(size=1, max_radius=1):
"""Generate a random sampling of points that follow a uniform volume
density distribution within a sphere.
Parameters
----------
size : int
The number of points to generate.
max_radius : number, quantity-like, optional
A dimensionless or unit-ful factor to scale the random distances.
Returns
-------
rep : `~astropy.coordinates.SphericalRepresentation`
The random points.
"""
rng = np.random # can maybe switch to this being an input later - see #11628
usph = uniform_spherical_random_surface(size=size)
r = np.cbrt(rng.uniform(size=size)) * u.Quantity(max_radius, copy=False)
return SphericalRepresentation(usph.lon, usph.lat, r)
from astropy.coordinates import angle_formats # noqa: E402
# # below here can be deleted in v5.0
from astropy.utils.decorators import deprecated # noqa: E402
__old_angle_utilities_funcs = [
"check_hms_ranges",
"degrees_to_dms",
"degrees_to_string",
"dms_to_degrees",
"format_exception",
"hms_to_degrees",
"hms_to_dms",
"hms_to_hours",
"hms_to_radians",
"hours_to_decimal",
"hours_to_hms",
"hours_to_radians",
"hours_to_string",
"parse_angle",
"radians_to_degrees",
"radians_to_dms",
"radians_to_hms",
"radians_to_hours",
"sexagesimal_to_string",
]
for funcname in __old_angle_utilities_funcs:
vars()[funcname] = deprecated(
name="astropy.coordinates.angle_utilities." + funcname,
alternative="astropy.coordinates.angle_formats." + funcname,
since="v4.3",
)(getattr(angle_formats, funcname))
|
7a94e29ded42f22b460e734ea699434cff9eae93f2b667921b59b42dd39b76bc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager, suppress
from inspect import signature
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"TransformGraph",
"CoordinateTransform",
"FunctionTransform",
"BaseAffineTransform",
"AffineTransform",
"StaticMatrixTransform",
"DynamicMatrixTransform",
"FunctionTransformWithFiniteDifference",
"CompositeTransform",
]
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, "name", None)
if nm is not None:
if not isinstance(nm, list):
nm = [nm]
for name in nm:
dct[name] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this TransformGraph.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this TransformGraph.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this TransformGraph.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : `~astropy.coordinates.CoordinateTransform`
The transformation object. Typically a
`~astropy.coordinates.CoordinateTransform` object, although it may
be some other callable that is called with the same signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
if not callable(transform):
raise TypeError("transform must be callable")
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError(
f"Frame(s) {list(invalid_frames)} contain invalid attribute names:"
f" {invalid_attrs}\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
)
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or None
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or None
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or None
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError("fromsys and tosys must both be None if either are")
if transform is None:
raise ValueError("cannot give all Nones to remove_transform")
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if agraph[b] is transform:
del agraph[b]
fromsys = a
break
# If the transform was found, need to break out of the outer for loop too
if fromsys:
break
else:
raise ValueError(f"Could not find transform {transform} in the graph")
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError(
f"Current transform from {fromsys} to {tosys} is not"
f" {transform}"
)
# Remove the subgraph if it is now empty
if self._graph[fromsys] == {}:
self._graph.pop(fromsys)
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of class or None
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : float or int
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float("inf")
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, "priority") else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(getattr(agraph[b], "priority", 1))
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError(
"n2 not in heap - this should be impossible!"
)
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""Generates and returns the CompositeTransform for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `~astropy.coordinates.CompositeTransform` or None
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
A `~astropy.coordinates.CompositeTransform` is always returned, because
`~astropy.coordinates.CompositeTransform` is slightly more adaptable in
the way it can be called than other transform classes. Specifically, it
takes care of intermediate steps of transformations in a way that is
consistent with 1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys is not a class")
if not inspect.isclass(tosys):
raise TypeError("tosys is not a class")
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(
transforms, fromsys, tosys, register_graph=False
)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
`BaseCoordinateFrame` subclass
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(
self,
priorities=True,
addnodes=[],
savefn=None,
savelayout="plain",
saveformat=None,
color_edges=True,
):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : None or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = {
f: [k for k, v in self._cached_names.items() if v == f]
for f in self.frame_set
}
for n in nodes:
if n in invclsaliases:
aliases = "`\\n`".join(invclsaliases[n])
nodenames.append(
'{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases)
)
else:
nodenames.append(n.__name__ + "[ shape=oval ]")
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__] if color_edges else "black"
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ["digraph AstropyCoordinateTransformGraph {"]
lines.append("graph [rankdir=LR]")
lines.append("; ".join(nodenames) + ";")
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = "[ {0} {1} ]"
if priorities:
priority_part = f'label = "{weights}"'
else:
priority_part = ""
color_part = f'color = "{color}"'
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append(f"{enm1} -> {enm2}{labelstr};")
lines.append("")
lines.append("overlap=false")
lines.append("}")
dotgraph = "\n".join(lines)
if savefn is not None:
if savelayout == "plain":
with open(savefn, "w") as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append("-T" + saveformat)
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError("problem running graphviz: \n" + stderr)
with open(savefn, "w") as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <https://networkx.github.io/>`_
package installed for this to work.
Returns
-------
nxgraph : ``networkx.Graph``
This `~astropy.coordinates.TransformGraph` as a
`networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third are
``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use
`~astropy.coordinates.TransformGraph.add_transform` instead of this
decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(
func, fromsys, tosys, priority=priority, register_graph=self, **kwargs
)
return func
return deco
def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1):
"""
Add a single-step transform that encapsulates a multi-step transformation path,
using the transforms that already exist in the graph.
The created transform internally calls the existing transforms. If all of the
transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
This method is primarily useful for defining loopback transformations
(i.e., where ``fromsys`` and the final ``tosys`` are the same).
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform to.
*furthersys : class
Additional coordinate frame classes to transform to in order.
priority : number
The priority of this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Notes
-----
Even though the created transform is a single step in the graph, it
will still internally call the constituent transforms. Thus, there is
no performance benefit for using this created transform.
For Astropy's built-in frames, loopback transformations typically use
`~astropy.coordinates.ICRS` to be safe. Transforming through an inertial
frame ensures that changes in observation time and observer
location/velocity are properly accounted for.
An error will be raised if a direct transform between ``fromsys`` and
``tosys`` already exist.
"""
frames = [fromsys, tosys, *furthersys]
lastsys = frames[-1]
full_path = self.get_transform(fromsys, lastsys)
transforms = [
self.get_transform(frame_a, frame_b)
for frame_a, frame_b in zip(frames[:-1], frames[1:])
]
if None in transforms:
raise ValueError("This transformation path is not possible")
if len(full_path.transforms) == 1:
raise ValueError(
f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already"
" exists"
)
self.add_transform(
fromsys,
lastsys,
CompositeTransform(
transforms, fromsys, lastsys, priority=priority
)._as_single_transform(),
)
@contextmanager
def impose_finite_difference_dt(self, dt):
"""
Context manager to impose a finite-difference time step on all applicable transformations.
For each transformation in this transformation graph that has the attribute
``finite_difference_dt``, that attribute is set to the provided value. The only standard
transformation with this attribute is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
Parameters
----------
dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the finite difference.
If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value.
"""
key = "finite_difference_dt"
saved_settings = []
try:
for to_frames in self._graph.values():
for transform in to_frames.values():
if hasattr(transform, key):
old_setting = (transform, key, getattr(transform, key))
saved_settings.append(old_setting)
setattr(transform, key, dt)
yield
finally:
for setting in saved_settings:
setattr(*setting)
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to start from.
tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError("fromsys and tosys must be classes")
self.overlapping_frame_attr_names = overlap = []
if hasattr(fromsys, "frame_attributes") and hasattr(tosys, "frame_attributes"):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.frame_attributes:
if from_nm in tosys.frame_attributes:
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : `~astropy.coordinates.TransformGraph` object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary ``tosys.frame_attributes``.
Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError("func must be callable")
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (
len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds
):
raise ValueError("provided function does not accept two arguments")
self.func = func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError(
f"the transformation function yielded {res} but "
f"should have been of type {self.tosys}"
)
if fromcoord.data.differentials and not res.data.differentials:
warn(
"Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.",
AstropyWarning,
)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""Transormation based on functions using finite difference for velocities.
A coordinate transformation that works like a
`~astropy.coordinates.FunctionTransform`, but computes velocity shifts
based on the finite-difference relative to one of the frame attributes.
Note that the transform function should *not* change the differential at
all in this case, as any differentials will be overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any existing differential.
finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`~astropy.coordinates.FunctionTransform`.
"""
def __init__(
self,
func,
fromsys,
tosys,
priority=1,
register_graph=None,
finite_difference_frameattr_name="obstime",
finite_difference_dt=1 * u.second,
symmetric_finite_difference=True,
):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError(
f"Frame attribute name {value} is not a frame attribute of"
f" {self.fromsys} or {self.tosys}"
)
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import CartesianDifferential, CartesianRepresentation
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt / 2
from_diffless = fromcoord.realize_frame(
fromcoord.data.without_differentials()
)
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (
fromcoord_cart.xyz
+ fromcoord_cart.differentials["s"].d_xyz * halfdt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
backxyz = (
fromcoord_cart.xyz
- fromcoord_cart.differentials["s"].d_xyz * halfdt
)
back = supcall(
fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe
)
else:
fwdxyz = (
fromcoord_cart.xyz + fromcoord_cart.differentials["s"].d_xyz * dt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(
newdiff
)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because `~astropy.coordinates.AffineTransform`
and the matrix transform classes share the ``__call__()`` method, but
differ in how they generate the affine parameters.
`~astropy.coordinates.StaticMatrixTransform` passes in a matrix stored as a
class attribute, and both of the matrix transforms pass in ``None`` for the
offset. Hence, user subclasses would likely want to subclass this (rather
than `~astropy.coordinates.AffineTransform`) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (
CartesianDifferential,
RadialDifferential,
SphericalCosLatDifferential,
SphericalDifferential,
UnitSphericalRepresentation,
)
data = fromcoord.data
has_velocity = "s" in data.differentials
# Bail out if no transform is actually requested
if matrix is None and offset is None:
return data
# list of unit differentials
_unit_diffs = (
SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential,
)
unit_vel_diff = has_velocity and isinstance(
data.differentials["s"], _unit_diffs
)
rad_vel_diff = has_velocity and isinstance(
data.differentials["s"], RadialDifferential
)
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError(
"Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {data.__class__})"
)
elif (
has_velocity
and (unit_vel_diff or rad_vel_diff)
and offset is not None
and "s" in offset.differentials
):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError(
"Velocity information stored on coordinate frame is insufficient to do"
" a full-space velocity transformation (differential class:"
f" {data.differentials['s'].__class__})"
)
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError(
"Representation passed to AffineTransform contains multiple associated"
" differentials. Only a single differential with velocity units is"
f" presently supported (differentials: {data.differentials})."
)
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (
has_velocity
and isinstance(data, UnitSphericalRepresentation)
and not unit_vel_diff
and not rad_vel_diff
):
# retrieve just velocity differential
unit_diff = data.differentials["s"].represent_as(
data.differentials["s"]._unit_differential, data
)
data = data.with_differentials({"s": unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = {
k: diff.represent_as(CartesianDifferential, data)
for k, diff in data.differentials.items()
}
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = rep.without_differentials() + offset.without_differentials()
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials["s"] # already in Cartesian form
if offset is not None and "s" in offset.differentials:
veldiff = veldiff + offset.differentials["s"]
newrep = newrep.with_differentials({"s": veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials["s"]
_unit_cls = fromcoord.data.differentials["s"]._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = {comp: getattr(newdiff, comp) for comp in newdiff.components}
kwargs["d_distance"] = fromcoord.data.differentials["s"].d_distance
diffs = {
"s": fromcoord.data.differentials["s"].__class__(
copy=False, **kwargs
)
}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials["s"].represent_as(
fromcoord.data.differentials["s"].__class__, newrep
)
diffs = {"s": newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials["s"].__class__
newrep = newrep.represent_as(
fromcoord.data.__class__, diff_cls._dimensional_differential
)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials({"s": fromcoord.data.differentials["s"]})
return newrep
def __call__(self, fromcoord, toframe):
params = self._affine_params(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, *params)
return toframe.realize_frame(newrep)
@abstractmethod
def _affine_params(self, fromcoord, toframe):
pass
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(transform_func):
raise TypeError("transform_func is not callable")
self.transform_func = transform_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.transform_func(fromcoord, toframe)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError("Provided matrix is not 3 x 3")
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix, None
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(matrix_func):
raise TypeError("matrix_func is not callable")
self.matrix_func = matrix_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `~astropy.coordinates.CoordinateTransform` object
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `~astropy.coordinates.StaticMatrixTransform`
will be collapsed into a single transformation to speed up the
calculation.
"""
def __init__(
self,
transforms,
fromsys,
tosys,
priority=1,
register_graph=None,
collapse_static_mats=True,
):
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of StaticMatrixTransform's into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if isinstance(lasttrans, StaticMatrixTransform) and isinstance(
currtrans, StaticMatrixTransform
):
newtrans[-1] = StaticMatrixTransform(
currtrans.matrix @ lasttrans.matrix,
lasttrans.fromsys,
currtrans.tosys,
)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `toframe`, or if not there, `fromcoord`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.frame_attributes:
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutable, so copying is not needed
return curr_coord
def _as_single_transform(self):
"""
Return an encapsulated version of the composite transform so that it appears to
be a single transform.
The returned transform internally calls the constituent transforms. If all of
the transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
"""
# Create a list of the transforms including flattening any constituent CompositeTransform
transforms = [
t if not isinstance(t, CompositeTransform) else t._as_single_transform()
for t in self.transforms
]
if all([isinstance(t, BaseAffineTransform) for t in transforms]):
# Check if there may be an origin shift
fixed_origin = all(
isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform))
for t in transforms
)
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return None if fixed_origin else (None, None)
# Create a merged attribute dictionary for any intermediate frames
# For any attributes shared by the "from"/"to" frames, the "to" frame takes
# precedence because this is the same choice implemented in __call__()
merged_attr = {
name: getattr(from_coo, name) for name in from_coo.frame_attributes
}
merged_attr.update(
{
name: getattr(to_frame, name)
for name in to_frame.frame_attributes
}
)
affine_params = (None, None)
# Step through each transform step (frame A -> frame B)
for i, t in enumerate(transforms):
# Extract the relevant attributes for frame A
if i == 0:
# If frame A is actually the initial frame, preserve its attributes
a_attr = {
name: getattr(from_coo, name)
for name in from_coo.frame_attributes
}
else:
a_attr = {
k: v
for k, v in merged_attr.items()
if k in t.fromsys.frame_attributes
}
# Extract the relevant attributes for frame B
b_attr = {
k: v
for k, v in merged_attr.items()
if k in t.tosys.frame_attributes
}
# Obtain the affine parameters for the transform
# Note that we insert some dummy data into frame A because the transformation
# machinery requires there to be data present. Removing that limitation
# is a possible TODO, but some care would need to be taken because some affine
# transforms have branching code depending on the presence of differentials.
next_affine_params = t._affine_params(
t.fromsys(from_coo.data, **a_attr), t.tosys(**b_attr)
)
# Combine the affine parameters with the running set
affine_params = _combine_affine_params(
affine_params, next_affine_params
)
# If there is no origin shift, return only the matrix
return affine_params[0] if fixed_origin else affine_params
# The return type depends on whether there is any origin shift
transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform
else:
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return to_frame.realize_frame(from_coo.data)
return self(from_coo, to_frame)
transform_type = FunctionTransformWithFiniteDifference
return transform_type(
single_transform, self.fromsys, self.tosys, priority=self.priority
)
def _combine_affine_params(params, next_params):
"""
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``.
"""
M, vec = params
next_M, next_vec = next_params
# Multiply the transformation matrices if they both exist
if M is not None and next_M is not None:
new_M = next_M @ M
else:
new_M = M if M is not None else next_M
if vec is not None:
# Transform the first displacement vector by the second transformation matrix
if next_M is not None:
vec = vec.transform(next_M)
# Calculate the new displacement vector
if next_vec is not None:
if "s" in vec.differentials and "s" in next_vec.differentials:
# Adding vectors with velocities takes more steps
# TODO: Add support in representation.py
new_vec_velocity = vec.differentials["s"] + next_vec.differentials["s"]
new_vec = vec.without_differentials() + next_vec.without_differentials()
new_vec = new_vec.with_differentials({"s": new_vec_velocity})
else:
new_vec = vec + next_vec
else:
new_vec = vec
else:
new_vec = next_vec
return new_M, new_vec
# map class names to colorblind-safe colors
trans_to_color = {}
trans_to_color[AffineTransform] = "#555555" # gray
trans_to_color[FunctionTransform] = "#783001" # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = "#d95f02" # red-ish
trans_to_color[StaticMatrixTransform] = "#7570b3" # blue-ish
trans_to_color[DynamicMatrixTransform] = "#1b9e77" # green-ish
|
12c57f8fe8773f1a7a19692b54eee1ccc262c94369c11a2774258421f5a14299 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the classes and utility functions for distance and
cartesian coordinates.
"""
import warnings
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
from .angles import Angle
__all__ = ["Distance"]
__doctest_requires__ = {"*": ["scipy"]}
class Distance(u.SpecificTypeQuantity):
"""
A one-dimensional distance.
This can be initialized by providing one of the following:
* Distance ``value`` (array or float) and a ``unit``
* |Quantity| object with dimensionality of length
* Redshift and (optionally) a `~astropy.cosmology.Cosmology`
* Distance modulus
* Parallax
Parameters
----------
value : scalar or `~astropy.units.Quantity` ['length']
The value of this distance.
unit : `~astropy.units.UnitBase` ['length']
The unit for this distance.
z : float
A redshift for this distance. It will be converted to a distance
by computing the luminosity distance for this redshift given the
cosmology specified by ``cosmology``. Must be given as a keyword
argument.
cosmology : `~astropy.cosmology.Cosmology` or None
A cosmology that will be used to compute the distance from ``z``.
If `None`, the current cosmology will be used (see
`astropy.cosmology` for details).
distmod : float or `~astropy.units.Quantity`
The distance modulus for this distance. Note that if ``unit`` is not
provided, a guess will be made at the unit between AU, pc, kpc, and Mpc.
parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle`
The parallax in angular units.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
order : {'C', 'F', 'A'}, optional
See `~astropy.units.Quantity`.
subok : bool, optional
See `~astropy.units.Quantity`.
ndmin : int, optional
See `~astropy.units.Quantity`.
allow_negative : bool, optional
Whether to allow negative distances (which are possible in some
cosmologies). Default: `False`.
Raises
------
`~astropy.units.UnitsError`
If the ``unit`` is not a length unit.
ValueError
If value specified is less than 0 and ``allow_negative=False``.
If ``cosmology`` is provided when ``z`` is *not* given.
If either none or more than one of ``value``, ``z``, ``distmod``,
or ``parallax`` were given.
Examples
--------
>>> from astropy import units as u
>>> from astropy.cosmology import WMAP5
>>> Distance(10, u.Mpc)
<Distance 10. Mpc>
>>> Distance(40*u.pc, unit=u.kpc)
<Distance 0.04 kpc>
>>> Distance(z=0.23) # doctest: +FLOAT_CMP
<Distance 1184.01657566 Mpc>
>>> Distance(z=0.23, cosmology=WMAP5) # doctest: +FLOAT_CMP
<Distance 1147.78831918 Mpc>
>>> Distance(distmod=24.47*u.mag) # doctest: +FLOAT_CMP
<Distance 783.42964277 kpc>
>>> Distance(parallax=21.34*u.mas) # doctest: +FLOAT_CMP
<Distance 46.86035614 pc>
"""
_equivalent_unit = u.m
_include_easy_conversion_members = True
def __new__(
cls,
value=None,
unit=None,
z=None,
cosmology=None,
distmod=None,
parallax=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
allow_negative=False,
):
n_not_none = sum(x is not None for x in [value, z, distmod, parallax])
if n_not_none == 0:
raise ValueError(
"none of `value`, `z`, `distmod`, or `parallax` "
"were given to Distance constructor"
)
elif n_not_none > 1:
raise ValueError(
"more than one of `value`, `z`, `distmod`, or "
"`parallax` were given to Distance constructor"
)
if value is None:
# If something else but `value` was provided then a new array will
# be created anyways and there is no need to copy that.
copy = False
if z is not None:
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
value = cosmology.luminosity_distance(z)
elif cosmology is not None:
raise ValueError(
"a `cosmology` was given but `z` was not "
"provided in Distance constructor"
)
elif distmod is not None:
value = cls._distmod_to_pc(distmod)
if unit is None:
# if the unit is not specified, guess based on the mean of
# the log of the distance
meanlogval = np.log10(value.value).mean()
if meanlogval > 6:
unit = u.Mpc
elif meanlogval > 3:
unit = u.kpc
elif meanlogval < -3: # ~200 AU
unit = u.AU
else:
unit = u.pc
elif parallax is not None:
if unit is None:
unit = u.pc
value = parallax.to_value(unit, equivalencies=u.parallax())
if np.any(parallax < 0):
if allow_negative:
warnings.warn(
"negative parallaxes are converted to NaN distances even when"
" `allow_negative=True`, because negative parallaxes cannot be"
" transformed into distances. See the discussion in this paper:"
" https://arxiv.org/abs/1507.02105",
AstropyWarning,
)
else:
raise ValueError(
"some parallaxes are negative, which are not "
"interpretable as distances. See the discussion in "
"this paper: https://arxiv.org/abs/1507.02105 . You "
"can convert negative parallaxes to NaN distances by "
"providing the `allow_negative=True` argument."
)
# now we have arguments like for a Quantity, so let it do the work
distance = super().__new__(
cls,
value,
unit,
dtype=dtype,
copy=copy,
order=order,
subok=subok,
ndmin=ndmin,
)
if not allow_negative and np.any(distance.value < 0):
raise ValueError(
"distance must be >= 0. Use the argument "
"`allow_negative=True` to allow negative values."
)
return distance
@property
def z(self):
"""Short for ``self.compute_z()``."""
return self.compute_z()
def compute_z(self, cosmology=None, **atzkw):
"""
The redshift for this distance assuming its physical distance is
a luminosity distance.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` or None
The cosmology to assume for this calculation, or `None` to use the
current cosmology (see `astropy.cosmology` for details).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
z : `~astropy.units.Quantity`
The redshift of this distance given the provided ``cosmology``.
Warnings
--------
This method can be slow for large arrays.
The redshift is determined using :func:`astropy.cosmology.z_at_value`,
which handles vector inputs (e.g. an array of distances) by
element-wise calling of :func:`scipy.optimize.minimize_scalar`.
For faster results consider using an interpolation table;
:func:`astropy.cosmology.z_at_value` provides details.
See Also
--------
:func:`astropy.cosmology.z_at_value` : Find the redshift corresponding to a
:meth:`astropy.cosmology.FLRW.luminosity_distance`.
"""
from astropy.cosmology import z_at_value
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
atzkw.setdefault("ztol", 1.0e-10)
return z_at_value(cosmology.luminosity_distance, self, **atzkw)
@property
def distmod(self):
"""The distance modulus as a `~astropy.units.Quantity`."""
val = 5.0 * np.log10(self.to_value(u.pc)) - 5.0
return u.Quantity(val, u.mag, copy=False)
@classmethod
def _distmod_to_pc(cls, dm):
dm = u.Quantity(dm, u.mag)
return cls(10 ** ((dm.value + 5) / 5.0), u.pc, copy=False)
@property
def parallax(self):
"""The parallax angle as an `~astropy.coordinates.Angle` object."""
return Angle(self.to(u.milliarcsecond, u.parallax()))
|
677ecccc971b8d802ac3a4f473cd0909ef53969930889baeea60b69293fce697 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import re
from collections.abc import Sequence
import numpy as np
from astropy import units as u
from astropy.units import IrreducibleUnit, Unit
from .baseframe import (
BaseCoordinateFrame,
_get_diff_cls,
_get_repr_cls,
frame_transform_graph,
)
from .representation import (
BaseRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
)
"""
This module contains utility functions to make the SkyCoord initializer more modular
and maintainable. No functionality here should be in the public API, but rather used as
part of creating SkyCoord objects.
"""
PLUS_MINUS_RE = re.compile(r"(\+|\-)")
J_PREFIXED_RA_DEC_RE = re.compile(
r"""J # J prefix
([0-9]{6,7}\.?[0-9]{0,2}) # RA as HHMMSS.ss or DDDMMSS.ss, optional decimal digits
([\+\-][0-9]{6}\.?[0-9]{0,2})\s*$ # Dec as DDMMSS.ss, optional decimal digits
""",
re.VERBOSE,
)
def _get_frame_class(frame):
"""
Get a frame class from the input `frame`, which could be a frame name
string, or frame class.
"""
if isinstance(frame, str):
frame_names = frame_transform_graph.get_names()
if frame not in frame_names:
raise ValueError(
f'Coordinate frame name "{frame}" is not a known '
f"coordinate frame ({sorted(frame_names)})"
)
frame_cls = frame_transform_graph.lookup_name(frame)
elif inspect.isclass(frame) and issubclass(frame, BaseCoordinateFrame):
frame_cls = frame
else:
raise ValueError(
"Coordinate frame must be a frame name or frame class, not a"
f" '{frame.__class__.__name__}'"
)
return frame_cls
_conflict_err_msg = (
"Coordinate attribute '{0}'={1!r} conflicts with keyword argument '{0}'={2!r}. This"
" usually means an attribute was set on one of the input objects and also in the "
"keyword arguments to {3}"
)
def _get_frame_without_data(args, kwargs):
"""
Determines the coordinate frame from input SkyCoord args and kwargs.
This function extracts (removes) all frame attributes from the kwargs and
determines the frame class either using the kwargs, or using the first
element in the args (if a single frame object is passed in, for example).
This function allows a frame to be specified as a string like 'icrs' or a
frame class like ICRS, or an instance ICRS(), as long as the instance frame
attributes don't conflict with kwargs passed in (which could require a
three-way merge with the coordinate data possibly specified via the args).
"""
from .sky_coordinate import SkyCoord
# We eventually (hopefully) fill and return these by extracting the frame
# and frame attributes from the input:
frame_cls = None
frame_cls_kwargs = {}
# The first place to check: the frame could be specified explicitly
frame = kwargs.pop("frame", None)
if frame is not None:
# Here the frame was explicitly passed in as a keyword argument.
# If the frame is an instance or SkyCoord, we extract the attributes
# and split the instance into the frame class and an attributes dict
if isinstance(frame, SkyCoord):
# If the frame was passed as a SkyCoord, we also want to preserve
# any extra attributes (e.g., obstime) if they are not already
# specified in the kwargs. We preserve these extra attributes by
# adding them to the kwargs dict:
for attr in frame._extra_frameattr_names:
if attr in kwargs and np.any(getattr(frame, attr) != kwargs[attr]):
# This SkyCoord attribute passed in with the frame= object
# conflicts with an attribute passed in directly to the
# SkyCoord initializer as a kwarg:
raise ValueError(
_conflict_err_msg.format(
attr, getattr(frame, attr), kwargs[attr], "SkyCoord"
)
)
else:
kwargs[attr] = getattr(frame, attr)
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
# Extract any frame attributes
for attr in frame.frame_attributes:
# If the frame was specified as an instance, we have to make
# sure that no frame attributes were specified as kwargs - this
# would require a potential three-way merge:
if attr in kwargs:
raise ValueError(
f"Cannot specify frame attribute '{attr}' directly as an"
" argument to SkyCoord because a frame instance was passed in."
" Either pass a frame class, or modify the frame attributes of"
" the input frame instance."
)
elif not frame.is_frame_attr_default(attr):
kwargs[attr] = getattr(frame, attr)
frame_cls = frame.__class__
# Make sure we propagate representation/differential _type choices,
# unless these are specified directly in the kwargs:
kwargs.setdefault("representation_type", frame.representation_type)
kwargs.setdefault("differential_type", frame.differential_type)
if frame_cls is None: # frame probably a string
frame_cls = _get_frame_class(frame)
# Check that the new frame doesn't conflict with existing coordinate frame
# if a coordinate is supplied in the args list. If the frame still had not
# been set by this point and a coordinate was supplied, then use that frame.
for arg in args:
# this catches the "single list passed in" case. For that case we want
# to allow the first argument to set the class. That's OK because
# _parse_coordinate_arg goes and checks that the frames match between
# the first and all the others
if isinstance(arg, (Sequence, np.ndarray)) and len(args) == 1 and len(arg) > 0:
arg = arg[0]
coord_frame_obj = coord_frame_cls = None
if isinstance(arg, BaseCoordinateFrame):
coord_frame_obj = arg
elif isinstance(arg, SkyCoord):
coord_frame_obj = arg.frame
if coord_frame_obj is not None:
coord_frame_cls = coord_frame_obj.__class__
frame_diff = coord_frame_obj.get_representation_cls("s")
if frame_diff is not None:
# we do this check because otherwise if there's no default
# differential (i.e. it is None), the code below chokes. but
# None still gets through if the user *requests* it
kwargs.setdefault("differential_type", frame_diff)
for attr in coord_frame_obj.frame_attributes:
if (
attr in kwargs
and not coord_frame_obj.is_frame_attr_default(attr)
and np.any(kwargs[attr] != getattr(coord_frame_obj, attr))
):
raise ValueError(
f"Frame attribute '{attr}' has conflicting values between the"
" input coordinate data and either keyword arguments or the "
"frame specification (frame=...):"
f" {getattr(coord_frame_obj, attr)} =/= {kwargs[attr]}"
)
elif attr not in kwargs and not coord_frame_obj.is_frame_attr_default(
attr
):
kwargs[attr] = getattr(coord_frame_obj, attr)
if coord_frame_cls is not None:
if frame_cls is None:
frame_cls = coord_frame_cls
elif frame_cls is not coord_frame_cls:
raise ValueError(
f"Cannot override frame='{coord_frame_cls.__name__}' of input "
f"coordinate with new frame='{frame_cls.__name__}'. Instead, "
"transform the coordinate."
)
if frame_cls is None:
from .builtin_frames import ICRS
frame_cls = ICRS
# By now, frame_cls should be set - if it's not, something went wrong
if not issubclass(frame_cls, BaseCoordinateFrame):
# We should hopefully never get here...
raise ValueError(f"Frame class has unexpected type: {frame_cls.__name__}")
for attr in frame_cls.frame_attributes:
if attr in kwargs:
frame_cls_kwargs[attr] = kwargs.pop(attr)
if "representation_type" in kwargs:
frame_cls_kwargs["representation_type"] = _get_repr_cls(
kwargs.pop("representation_type")
)
differential_type = kwargs.pop("differential_type", None)
if differential_type is not None:
frame_cls_kwargs["differential_type"] = _get_diff_cls(differential_type)
return frame_cls, frame_cls_kwargs
def _parse_coordinate_data(frame, args, kwargs):
"""
Extract coordinate data from the args and kwargs passed to SkyCoord.
By this point, we assume that all of the frame attributes have been
extracted from kwargs (see _get_frame_without_data()), so all that are left
are (1) extra SkyCoord attributes, and (2) the coordinate data, specified in
any of the valid ways.
"""
valid_skycoord_kwargs = {}
valid_components = {}
info = None
# Look through the remaining kwargs to see if any are valid attribute names
# by asking the frame transform graph:
attr_names = list(kwargs.keys())
for attr in attr_names:
if attr in frame_transform_graph.frame_attributes:
valid_skycoord_kwargs[attr] = kwargs.pop(attr)
# By this point in parsing the arguments, anything left in the args and
# kwargs should be data. Either as individual components, or a list of
# objects, or a representation, etc.
# Get units of components
units = _get_representation_component_units(args, kwargs)
# Grab any frame-specific attr names like `ra` or `l` or `distance` from
# kwargs and move them to valid_components.
valid_components.update(_get_representation_attrs(frame, units, kwargs))
# Error if anything is still left in kwargs
if kwargs:
# The next few lines add a more user-friendly error message to a
# common and confusing situation when the user specifies, e.g.,
# `pm_ra` when they really should be passing `pm_ra_cosdec`. The
# extra error should only turn on when the positional representation
# is spherical, and when the component 'pm_<lon>' is passed.
pm_message = ""
if frame.representation_type == SphericalRepresentation:
frame_names = list(frame.get_representation_component_names().keys())
lon_name = frame_names[0]
lat_name = frame_names[1]
if f"pm_{lon_name}" in list(kwargs.keys()):
pm_message = (
"\n\n By default, most frame classes expect the longitudinal proper"
" motion to include the cos(latitude) term, named"
f" `pm_{lon_name}_cos{lat_name}`. Did you mean to pass in this"
" component?"
)
raise ValueError(
"Unrecognized keyword argument(s) {}{}".format(
", ".join(f"'{key}'" for key in kwargs), pm_message
)
)
# Finally deal with the unnamed args. This figures out what the arg[0]
# is and returns a dict with appropriate key/values for initializing
# frame class. Note that differentials are *never* valid args, only
# kwargs. So they are not accounted for here (unless they're in a frame
# or SkyCoord object)
if args:
if len(args) == 1:
# One arg which must be a coordinate. In this case coord_kwargs
# will contain keys like 'ra', 'dec', 'distance' along with any
# frame attributes like equinox or obstime which were explicitly
# specified in the coordinate object (i.e. non-default).
_skycoord_kwargs, _components = _parse_coordinate_arg(
args[0], frame, units, kwargs
)
# Copy other 'info' attr only if it has actually been defined.
if "info" in getattr(args[0], "__dict__", ()):
info = args[0].info
elif len(args) <= 3:
_skycoord_kwargs = {}
_components = {}
frame_attr_names = frame.representation_component_names.keys()
repr_attr_names = frame.representation_component_names.values()
for arg, frame_attr_name, repr_attr_name, unit in zip(
args, frame_attr_names, repr_attr_names, units
):
attr_class = frame.representation_type.attr_classes[repr_attr_name]
_components[frame_attr_name] = attr_class(arg, unit=unit)
else:
raise ValueError(
f"Must supply no more than three positional arguments, got {len(args)}"
)
# The next two loops copy the component and skycoord attribute data into
# their final, respective "valid_" dictionaries. For each, we check that
# there are no relevant conflicts with values specified by the user
# through other means:
# First validate the component data
for attr, coord_value in _components.items():
if attr in valid_components:
raise ValueError(
_conflict_err_msg.format(
attr, coord_value, valid_components[attr], "SkyCoord"
)
)
valid_components[attr] = coord_value
# Now validate the custom SkyCoord attributes
for attr, value in _skycoord_kwargs.items():
if attr in valid_skycoord_kwargs and np.any(
valid_skycoord_kwargs[attr] != value
):
raise ValueError(
_conflict_err_msg.format(
attr, value, valid_skycoord_kwargs[attr], "SkyCoord"
)
)
valid_skycoord_kwargs[attr] = value
return valid_skycoord_kwargs, valid_components, info
def _get_representation_component_units(args, kwargs):
"""
Get the unit from kwargs for the *representation* components (not the
differentials).
"""
if "unit" not in kwargs:
units = [None, None, None]
else:
units = kwargs.pop("unit")
if isinstance(units, str):
units = [x.strip() for x in units.split(",")]
# Allow for input like unit='deg' or unit='m'
if len(units) == 1:
units = [units[0], units[0], units[0]]
elif isinstance(units, (Unit, IrreducibleUnit)):
units = [units, units, units]
try:
units = [(Unit(x) if x else None) for x in units]
units.extend(None for x in range(3 - len(units)))
if len(units) > 3:
raise ValueError()
except Exception as err:
raise ValueError(
"Unit keyword must have one to three unit values as "
"tuple or comma-separated string."
) from err
return units
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
from .sky_coordinate import SkyCoord
is_scalar = False # Differentiate between scalar and list input
# valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
components = {}
skycoord_kwargs = {}
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation_type.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError("Cannot initialize from a frame without coordinate data")
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (
isinstance(coords.data, UnitSphericalRepresentation)
and repr_attr_name == "distance"
):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and "s" in coords.data.differentials:
orig_vel = coords.data.differentials["s"]
vel = coords.data.represent_as(
frame.representation_type, frame.get_representation_cls("s")
).differentials["s"]
for frname, reprname in frame.get_representation_component_names(
"s"
).items():
if (
reprname == "d_distance"
and not hasattr(orig_vel, reprname)
and "unit" in orig_vel.get_name()
):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (
isinstance(coords, SkyCoord) or attr not in coords.frame_attributes
)
if use_value and value is not None:
skycoord_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and "s" in coords.differentials:
diffs = frame.get_representation_cls("s")
data = coords.represent_as(frame.representation_type, diffs)
values = [
getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names
]
for frname, reprname in frame.get_representation_component_names(
"s"
).items():
values.append(getattr(data.differentials["s"], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials["s"].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation_type)
values = [
getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names
]
elif (
isinstance(coords, np.ndarray)
and coords.dtype.kind in "if"
and coords.ndim == 2
and coords.shape[1] <= 3
):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = (
"ra" in frame.representation_component_names
and "dec" in frame.representation_component_names
)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError(
f"List of inputs don't have equivalent frames: {sc} != {scs[0]}"
)
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(
frame_attr_names, repr_attr_names
):
if allunitsphrepr and repr_attr_name == "distance":
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(
data_val.reshape((1,)) if sc.isscalar else data_val
)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (" ".join(coord1[:3]), " ".join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted({len(x) for x in vals})
except Exception as err:
raise ValueError(
"One or more elements of input sequence does not have a length."
) from err
if len(n_coords) > 1:
raise ValueError(
"Input coordinate values must have same number of elements, found"
f" {n_coords}"
)
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError(
f"Input coordinates have {n_coords} values but representation"
f" {frame.representation_type.get_name()} only accepts"
f" {n_attr_names}"
)
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError("Cannot parse coordinates from first argument")
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units
):
components[frame_attr_name] = repr_attr_class(value, unit=unit, copy=False)
except Exception as err:
raise ValueError(
f'Cannot parse first argument data "{value}" for attribute'
f" {frame_attr_name}"
) from err
return skycoord_kwargs, components
def _get_representation_attrs(frame, units, kwargs):
"""
Find instances of the "representation attributes" for specifying data
for this frame. Pop them off of kwargs, run through the appropriate class
constructor (to validate and apply unit), and put into the output
valid_kwargs. "Representation attributes" are the frame-specific aliases
for the underlying data values in the representation, e.g. "ra" for "lon"
for many equatorial spherical representations, or "w" for "x" in the
cartesian representation of Galactic.
This also gets any *differential* kwargs, because they go into the same
frame initializer later on.
"""
frame_attr_names = frame.representation_component_names.keys()
repr_attr_classes = frame.representation_type.attr_classes.values()
valid_kwargs = {}
for frame_attr_name, repr_attr_class, unit in zip(
frame_attr_names, repr_attr_classes, units
):
value = kwargs.pop(frame_attr_name, None)
if value is not None:
try:
valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit)
except u.UnitConversionError as err:
error_message = (
f"Unit '{unit}' ({unit.physical_type}) could not be applied to"
f" '{frame_attr_name}'. This can occur when passing units for some"
" coordinate components when other components are specified as"
" Quantity objects. Either pass a list of units for all components"
" (and unit-less coordinate data), or pass Quantities for all"
" components."
)
raise u.UnitConversionError(error_message) from err
# also check the differentials. They aren't included in the units keyword,
# so we only look for the names.
differential_type = frame.differential_type
if differential_type is not None:
for frame_name, repr_name in frame.get_representation_component_names(
"s"
).items():
diff_attr_class = differential_type.attr_classes[repr_name]
value = kwargs.pop(frame_name, None)
if value is not None:
valid_kwargs[frame_name] = diff_attr_class(value)
return valid_kwargs
def _parse_ra_dec(coord_str):
"""Parse RA and Dec values from a coordinate string.
Currently the following formats are supported:
* space separated 6-value format
* space separated <6-value format, this requires a plus or minus sign
separation between RA and Dec
* sign separated format
* JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
* JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
Parameters
----------
coord_str : str
Coordinate string to parse.
Returns
-------
coord : str or list of str
Parsed coordinate values.
"""
if isinstance(coord_str, str):
coord1 = coord_str.split()
else:
# This exception should never be raised from SkyCoord
raise TypeError("coord_str must be a single str")
if len(coord1) == 6:
coord = (" ".join(coord1[:3]), " ".join(coord1[3:]))
elif len(coord1) > 2:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], " ".join(coord[1:]))
elif len(coord1) == 1:
match_j = J_PREFIXED_RA_DEC_RE.match(coord_str)
if match_j:
coord = match_j.groups()
if len(coord[0].split(".")[0]) == 7:
coord = (
f"{coord[0][0:3]} {coord[0][3:5]} {coord[0][5:]}",
f"{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}",
)
else:
coord = (
f"{coord[0][0:2]} {coord[0][2:4]} {coord[0][4:]}",
f"{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}",
)
else:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], " ".join(coord[1:]))
else:
coord = coord1
return coord
|
bf128fb008c5eeaf188f691af269b8a73b49756b89e269fe7b30ebdf8a4bb48b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Framework and base classes for coordinate frames/"low-level" coordinate
classes.
"""
# Standard library
import copy
import inspect
import warnings
from collections import defaultdict, namedtuple
# Dependencies
import numpy as np
from astropy import units as u
from astropy.utils import ShapedLikeNDArray, check_broadcast
# Project
from astropy.utils.decorators import deprecated, format_doc, lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from . import representation as r
from .angles import Angle
from .attributes import Attribute
from .transformations import TransformGraph
__all__ = [
"BaseCoordinateFrame",
"frame_transform_graph",
"GenericFrame",
"RepresentationMapping",
]
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
def _get_repr_cls(value):
"""
Return a valid representation class from ``value`` or raise exception.
"""
if value in r.REPRESENTATION_CLASSES:
value = r.REPRESENTATION_CLASSES[value]
elif not isinstance(value, type) or not issubclass(value, r.BaseRepresentation):
raise ValueError(
f"Representation is {value!r} but must be a BaseRepresentation class "
f"or one of the string aliases {list(r.REPRESENTATION_CLASSES)}"
)
return value
def _get_diff_cls(value):
"""
Return a valid differential class from ``value`` or raise exception.
As originally created, this is only used in the SkyCoord initializer, so if
that is refactored, this function my no longer be necessary.
"""
if value in r.DIFFERENTIAL_CLASSES:
value = r.DIFFERENTIAL_CLASSES[value]
elif not isinstance(value, type) or not issubclass(value, r.BaseDifferential):
raise ValueError(
f"Differential is {value!r} but must be a BaseDifferential class "
f"or one of the string aliases {list(r.DIFFERENTIAL_CLASSES)}"
)
return value
def _get_repr_classes(base, **differentials):
"""Get valid representation and differential classes.
Parameters
----------
base : str or `~astropy.coordinates.BaseRepresentation` subclass
class for the representation of the base coordinates. If a string,
it is looked up among the known representation classes.
**differentials : dict of str or `~astropy.coordinates.BaseDifferentials`
Keys are like for normal differentials, i.e., 's' for a first
derivative in time, etc. If an item is set to `None`, it will be
guessed from the base class.
Returns
-------
repr_classes : dict of subclasses
The base class is keyed by 'base'; the others by the keys of
``diffferentials``.
"""
base = _get_repr_cls(base)
repr_classes = {"base": base}
for name, differential_type in differentials.items():
if differential_type == "base":
# We don't want to fail for this case.
differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None)
elif differential_type in r.DIFFERENTIAL_CLASSES:
differential_type = r.DIFFERENTIAL_CLASSES[differential_type]
elif differential_type is not None and (
not isinstance(differential_type, type)
or not issubclass(differential_type, r.BaseDifferential)
):
raise ValueError(
"Differential is {differential_type!r} but must be a BaseDifferential"
f" class or one of the string aliases {list(r.DIFFERENTIAL_CLASSES)}"
)
repr_classes[name] = differential_type
return repr_classes
_RepresentationMappingBase = namedtuple(
"RepresentationMapping", ("reprname", "framename", "defaultunit")
)
class RepresentationMapping(_RepresentationMappingBase):
"""
This `~collections.namedtuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (which is degrees
for Angles, nothing otherwise), or None (to indicate that no unit mapping
should be done).
"""
def __new__(cls, reprname, framename, defaultunit="recommended"):
# this trick just provides some defaults
return super().__new__(cls, reprname, framename, defaultunit)
base_doc = """{__doc__}
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` subclass instance
A representation object or ``None`` to have no data (or use the
coordinate component arguments, see below).
{components}
representation_type : `~astropy.coordinates.BaseRepresentation` subclass, str, optional
A representation class or string name of a representation class. This
sets the expected input representation class, thereby changing the
expected keyword arguments for the data passed in. For example, passing
``representation_type='cartesian'`` will make the classes expect
position data with cartesian names, i.e. ``x, y, z`` in most cases
unless overridden via ``frame_specific_representation_info``. To see this
frame's names, check out ``<this frame>().representation_info``.
differential_type : `~astropy.coordinates.BaseDifferential` subclass, str, dict, optional
A differential class or dictionary of differential classes (currently
only a velocity differential with key 's' is supported). This sets the
expected input differential class, thereby changing the expected keyword
arguments of the data passed in. For example, passing
``differential_type='cartesian'`` will make the classes expect velocity
data with the argument names ``v_x, v_y, v_z`` unless overridden via
``frame_specific_representation_info``. To see this frame's names,
check out ``<this frame>().representation_info``.
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
{footer}
"""
_components = """
*args, **kwargs
Coordinate components, with names that depend on the subclass.
"""
@format_doc(base_doc, components=_components, footer="")
class BaseCoordinateFrame(ShapedLikeNDArray):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `default_differential`
A subclass of `~astropy.coordinates.BaseDifferential` that will be
treated as the default differential class of this frame. This is the
differential class assumed by default when the frame is created.
* `~astropy.coordinates.Attribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
Unless overridden via `frame_specific_representation_info`, velocity name
defaults are:
* ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `~astropy.coordinates.SphericalCosLatDifferential` velocity components
* ``pm_{lon}``, ``pm_{lat}`` for `~astropy.coordinates.SphericalDifferential` velocity components
* ``radial_velocity`` for any ``d_distance`` component
* ``v_{x,y,z}`` for `~astropy.coordinates.CartesianDifferential` velocity components
where ``{lon}`` and ``{lat}`` are the frame names of the angular components.
"""
default_representation = None
default_differential = None
# Specifies special names and units for representation and differential
# attributes.
frame_specific_representation_info = {}
frame_attributes = {}
# Default empty frame_attributes dict
def __init_subclass__(cls, **kwargs):
# We first check for explicitly set values for these:
default_repr = getattr(cls, "default_representation", None)
default_diff = getattr(cls, "default_differential", None)
repr_info = getattr(cls, "frame_specific_representation_info", None)
# Then, to make sure this works for subclasses-of-subclasses, we also
# have to check for cases where the attribute names have already been
# replaced by underscore-prefaced equivalents by the logic below:
if default_repr is None or isinstance(default_repr, property):
default_repr = getattr(cls, "_default_representation", None)
if default_diff is None or isinstance(default_diff, property):
default_diff = getattr(cls, "_default_differential", None)
if repr_info is None or isinstance(repr_info, property):
repr_info = getattr(cls, "_frame_specific_representation_info", None)
repr_info = cls._infer_repr_info(repr_info)
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
cls._create_readonly_property(
"default_representation",
default_repr,
"Default representation for position data",
)
cls._create_readonly_property(
"default_differential",
default_diff,
"Default representation for differential data (e.g., velocity)",
)
cls._create_readonly_property(
"frame_specific_representation_info",
copy.deepcopy(repr_info),
"Mapping for frame-specific component names",
)
# Set the frame attributes. We first construct the attributes from
# superclasses, going in reverse order to keep insertion order,
# and then add any attributes from the frame now being defined
# (if any old definitions are overridden, this keeps the order).
# Note that we cannot simply start with the inherited frame_attributes
# since we could be a mixin between multiple coordinate frames.
# TODO: Should this be made to use readonly_prop_factory as well or
# would it be inconvenient for getting the frame_attributes from
# classes?
frame_attrs = {}
for basecls in reversed(cls.__bases__):
if issubclass(basecls, BaseCoordinateFrame):
frame_attrs.update(basecls.frame_attributes)
for k, v in cls.__dict__.items():
if isinstance(v, Attribute):
frame_attrs[k] = v
cls.frame_attributes = frame_attrs
# Deal with setting the name of the frame:
if not hasattr(cls, "name"):
cls.name = cls.__name__.lower()
elif BaseCoordinateFrame not in cls.__bases__ and cls.name in [
getattr(base, "name", None) for base in cls.__bases__
]:
# This may be a subclass of a subclass of BaseCoordinateFrame,
# like ICRS(BaseRADecFrame). In this case, cls.name will have been
# set by init_subclass
cls.name = cls.__name__.lower()
# A cache that *must be unique to each frame class* - it is
# insufficient to share them with superclasses, hence the need to put
# them in the meta
cls._frame_class_cache = {}
super().__init_subclass__(**kwargs)
# call this once here to initialize defaults
# (via FrameAttribute.__get__/convert_input)
cls.get_frame_attr_defaults()
def __init__(
self,
*args,
copy=True,
representation_type=None,
differential_type=None,
**kwargs,
):
self._attr_names_with_defaults = []
self._representation = self._infer_representation(
representation_type, differential_type
)
self._data = self._infer_data(args, copy, kwargs) # possibly None.
# Set frame attributes, if any
values = {}
for fnm, fdefault in self.get_frame_attr_defaults().items():
# Read-only frame attributes are defined as FrameAttribute
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, "_" + fnm, value)
# Validate attribute by getting it. If the instance has data,
# this also checks its shape is OK. If not, we do it below.
values[fnm] = getattr(self, fnm)
else:
setattr(self, "_" + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
if kwargs:
raise TypeError(
f"Coordinate frame {self.__class__.__name__} got unexpected "
f"keywords: {list(kwargs)}"
)
# We do ``is None`` because self._data might evaluate to false for
# empty arrays or data == 0
if self._data is None:
# No data: we still need to check that any non-scalar attributes
# have consistent shapes. Collect them for all attributes with
# size > 1 (which should be array-like and thus have a shape).
shapes = {
fnm: value.shape
for fnm, value in values.items()
if getattr(value, "shape", ())
}
if shapes:
if len(shapes) > 1:
try:
self._no_data_shape = check_broadcast(*shapes.values())
except ValueError as err:
raise ValueError(
f"non-scalar attributes with inconsistent shapes: {shapes}"
) from err
# Above, we checked that it is possible to broadcast all
# shapes. By getting and thus validating the attributes,
# we verify that the attributes can in fact be broadcast.
for fnm in shapes:
getattr(self, fnm)
else:
self._no_data_shape = shapes.popitem()[1]
else:
self._no_data_shape = ()
# The logic of this block is not related to the previous one
if self._data is not None:
# This makes the cache keys backwards-compatible, but also adds
# support for having differentials attached to the frame data
# representation object.
if "s" in self._data.differentials:
# TODO: assumes a velocity unit differential
key = (
self._data.__class__.__name__,
self._data.differentials["s"].__class__.__name__,
False,
)
else:
key = (self._data.__class__.__name__, False)
# Set up representation cache.
self.cache["representation"][key] = self._data
def _infer_representation(self, representation_type, differential_type):
if representation_type is None and differential_type is None:
return {"base": self.default_representation, "s": self.default_differential}
if representation_type is None:
representation_type = self.default_representation
if inspect.isclass(differential_type) and issubclass(
differential_type, r.BaseDifferential
):
# TODO: assumes the differential class is for the velocity
# differential
differential_type = {"s": differential_type}
elif isinstance(differential_type, str):
# TODO: assumes the differential class is for the velocity
# differential
diff_cls = r.DIFFERENTIAL_CLASSES[differential_type]
differential_type = {"s": diff_cls}
elif differential_type is None:
if representation_type == self.default_representation:
differential_type = {"s": self.default_differential}
else:
differential_type = {"s": "base"} # see set_representation_cls()
return _get_repr_classes(representation_type, **differential_type)
def _infer_data(self, args, copy, kwargs):
# if not set below, this is a frame with no data
representation_data = None
differential_data = None
args = list(args) # need to be able to pop them
if args and (isinstance(args[0], r.BaseRepresentation) or args[0] is None):
representation_data = args.pop(0) # This can still be None
if len(args) > 0:
raise TypeError(
"Cannot create a frame with both a representation object "
"and other positional arguments"
)
if representation_data is not None:
diffs = representation_data.differentials
differential_data = diffs.get("s", None)
if (differential_data is None and len(diffs) > 0) or (
differential_data is not None and len(diffs) > 1
):
raise ValueError(
"Multiple differentials are associated with the representation"
" object passed in to the frame initializer. Only a single"
f" velocity differential is supported. Got: {diffs}"
)
else:
representation_cls = self.get_representation_cls()
# Get any representation data passed in to the frame initializer
# using keyword or positional arguments for the component names
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
# first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
# special-case the Spherical->UnitSpherical if no `distance`
if repr_kwargs:
# TODO: determine how to get rid of the part before the "try" -
# currently removing it has a performance regression for
# unitspherical because of the try-related overhead.
# Also frames have no way to indicate what the "distance" is
if repr_kwargs.get("distance", True) is None:
del repr_kwargs["distance"]
if (
issubclass(representation_cls, r.SphericalRepresentation)
and "distance" not in repr_kwargs
):
representation_cls = representation_cls._unit_representation
try:
representation_data = representation_cls(copy=copy, **repr_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
try:
representation_data = representation_cls._unit_representation(
copy=copy, **repr_kwargs
)
except Exception:
msg = str(e)
names = self.get_representation_component_names()
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace("__init__()", f"{self.__class__.__name__}()")
e.args = (msg,)
raise e
# Now we handle the Differential data:
# Get any differential data passed in to the frame initializer
# using keyword or positional arguments for the component names
differential_cls = self.get_representation_cls("s")
diff_component_names = self.get_representation_component_names("s")
diff_kwargs = {}
for nmkw, nmrep in diff_component_names.items():
if len(args) > 0:
# first gather up positional args
diff_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
diff_kwargs[nmrep] = kwargs.pop(nmkw)
if diff_kwargs:
if (
hasattr(differential_cls, "_unit_differential")
and "d_distance" not in diff_kwargs
):
differential_cls = differential_cls._unit_differential
elif len(diff_kwargs) == 1 and "d_distance" in diff_kwargs:
differential_cls = r.RadialDifferential
try:
differential_data = differential_cls(copy=copy, **diff_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
msg = str(e)
names = self.get_representation_component_names("s")
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace("__init__()", f"{self.__class__.__name__}()")
e.args = (msg,)
raise
if len(args) > 0:
raise TypeError(
"{}.__init__ had {} remaining unhandled arguments".format(
self.__class__.__name__, len(args)
)
)
if representation_data is None and differential_data is not None:
raise ValueError(
"Cannot pass in differential component data "
"without positional (representation) data."
)
if differential_data:
# Check that differential data provided has units compatible
# with time-derivative of representation data.
# NOTE: there is no dimensionless time while lengths can be
# dimensionless (u.dimensionless_unscaled).
for comp in representation_data.components:
if (diff_comp := f"d_{comp}") in differential_data.components:
current_repr_unit = representation_data._units[comp]
current_diff_unit = differential_data._units[diff_comp]
expected_unit = current_repr_unit / u.s
if not current_diff_unit.is_equivalent(expected_unit):
for (
key,
val,
) in self.get_representation_component_names().items():
if val == comp:
current_repr_name = key
break
for key, val in self.get_representation_component_names(
"s"
).items():
if val == diff_comp:
current_diff_name = key
break
raise ValueError(
f'{current_repr_name} has unit "{current_repr_unit}" with'
f' physical type "{current_repr_unit.physical_type}", but'
f" {current_diff_name} has incompatible unit"
f' "{current_diff_unit}" with physical type'
f' "{current_diff_unit.physical_type}" instead of the'
f' expected "{(expected_unit).physical_type}".'
)
representation_data = representation_data.with_differentials(
{"s": differential_data}
)
return representation_data
@classmethod
def _infer_repr_info(cls, repr_info):
# Unless overridden via `frame_specific_representation_info`, velocity
# name defaults are (see also docstring for BaseCoordinateFrame):
# * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for
# `SphericalCosLatDifferential` proper motion components
# * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper
# motion components
# * ``radial_velocity`` for any `d_distance` component
# * ``v_{x,y,z}`` for `CartesianDifferential` velocity components
# where `{lon}` and `{lat}` are the frame names of the angular
# components.
if repr_info is None:
repr_info = {}
# the tuple() call below is necessary because if it is not there,
# the iteration proceeds in a difficult-to-predict manner in the
# case that one of the class objects hash is such that it gets
# revisited by the iteration. The tuple() call prevents this by
# making the items iterated over fixed regardless of how the dict
# changes
for cls_or_name in tuple(repr_info.keys()):
if isinstance(cls_or_name, str):
# TODO: this provides a layer of backwards compatibility in
# case the key is a string, but now we want explicit classes.
_cls = _get_repr_cls(cls_or_name)
repr_info[_cls] = repr_info.pop(cls_or_name)
# The default spherical names are 'lon' and 'lat'
repr_info.setdefault(
r.SphericalRepresentation,
[RepresentationMapping("lon", "lon"), RepresentationMapping("lat", "lat")],
)
sph_component_map = {
m.reprname: m.framename for m in repr_info[r.SphericalRepresentation]
}
repr_info.setdefault(
r.SphericalCosLatDifferential,
[
RepresentationMapping(
"d_lon_coslat",
"pm_{lon}_cos{lat}".format(**sph_component_map),
u.mas / u.yr,
),
RepresentationMapping(
"d_lat", "pm_{lat}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping("d_distance", "radial_velocity", u.km / u.s),
],
)
repr_info.setdefault(
r.SphericalDifferential,
[
RepresentationMapping(
"d_lon", "pm_{lon}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping(
"d_lat", "pm_{lat}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping("d_distance", "radial_velocity", u.km / u.s),
],
)
repr_info.setdefault(
r.CartesianDifferential,
[
RepresentationMapping("d_x", "v_x", u.km / u.s),
RepresentationMapping("d_y", "v_y", u.km / u.s),
RepresentationMapping("d_z", "v_z", u.km / u.s),
],
)
# Unit* classes should follow the same naming conventions
# TODO: this adds some unnecessary mappings for the Unit classes, so
# this could be cleaned up, but in practice doesn't seem to have any
# negative side effects
repr_info.setdefault(
r.UnitSphericalRepresentation, repr_info[r.SphericalRepresentation]
)
repr_info.setdefault(
r.UnitSphericalCosLatDifferential, repr_info[r.SphericalCosLatDifferential]
)
repr_info.setdefault(
r.UnitSphericalDifferential, repr_info[r.SphericalDifferential]
)
return repr_info
@classmethod
def _create_readonly_property(cls, attr_name, value, doc=None):
private_attr = "_" + attr_name
def getter(self):
return getattr(self, private_attr)
setattr(cls, private_attr, value)
setattr(cls, attr_name, property(getter, doc=doc))
@lazyproperty
def cache(self):
"""Cache for this frame, a dict.
It stores anything that should be computed from the coordinate data (*not* from
the frame attributes). This can be used in functions to store anything that
might be expensive to compute but might be re-used by some other function.
E.g.::
if 'user_data' in myframe.cache:
data = myframe.cache['user_data']
else:
myframe.cache['user_data'] = data = expensive_func(myframe.lat)
If in-place modifications are made to the frame data, the cache should
be cleared::
myframe.cache.clear()
"""
return defaultdict(dict)
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data, an
`ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError(
f'The frame object "{self!r}" does not have associated data'
)
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
@property
def shape(self):
return self.data.shape if self.has_data else self._no_data_shape
# We have to override the ShapedLikeNDArray definitions, since our shape
# does not have to be that of the data.
def __len__(self):
return len(self.data)
def __bool__(self):
return self.has_data and self.size > 0
@property
def size(self):
return self.data.size
@property
def isscalar(self):
return self.has_data and self.data.isscalar
@classmethod
def get_frame_attr_defaults(cls):
"""Return a dict with the defaults for each frame attribute."""
return {name: getattr(cls, name) for name in cls.frame_attributes}
@deprecated(
"5.2",
alternative="get_frame_attr_defaults",
message=(
"The {func}() {obj_type} is deprecated and may be removed in a future"
" version. Use {alternative}() to obtain a dict of frame attribute names"
" and default values."
" The fastest way to obtain the names is frame_attributes.keys()"
),
)
@classmethod
def get_frame_attr_names(cls):
"""Return a dict with the defaults for each frame attribute."""
return cls.get_frame_attr_defaults()
def get_representation_cls(self, which="base"):
"""The class used for part of this frame's data.
Parameters
----------
which : ('base', 's', `None`)
The class of which part to return. 'base' means the class used to
represent the coordinates; 's' the first derivative to time, i.e.,
the class representing the proper motion and/or radial velocity.
If `None`, return a dict with both.
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`.
"""
if which is not None:
return self._representation[which]
else:
return self._representation
def set_representation_cls(self, base=None, s="base"):
"""Set representation and/or differential class for this frame's data.
Parameters
----------
base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional
The name or subclass to use to represent the coordinate data.
s : `~astropy.coordinates.BaseDifferential` subclass, optional
The differential subclass to use to represent any velocities,
such as proper motion and radial velocity. If equal to 'base',
which is the default, it will be inferred from the representation.
If `None`, the representation will drop any differentials.
"""
if base is None:
base = self._representation["base"]
self._representation = _get_repr_classes(base=base, s=s)
representation_type = property(
fget=get_representation_cls,
fset=set_representation_cls,
doc="""The representation class used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseRepresentation`.
Can also be *set* using the string name of the representation. If you
wish to set an explicit differential class (rather than have it be
inferred), use the ``set_representation_cls`` method.
""",
)
@property
def differential_type(self):
"""
The differential used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseDifferential`.
For simultaneous setting of representation and differentials, see the
``set_representation_cls`` method.
"""
return self.get_representation_cls("s")
@differential_type.setter
def differential_type(self, value):
self.set_representation_cls(s=value)
@classmethod
def _get_representation_info(cls):
# This exists as a class method only to support handling frame inputs
# without units, which are deprecated and will be removed. This can be
# moved into the representation_info property at that time.
# note that if so moved, the cache should be acceessed as
# self.__class__._frame_class_cache
if (
cls._frame_class_cache.get("last_reprdiff_hash", None)
!= r.get_reprdiff_cls_hash()
):
repr_attrs = {}
for repr_diff_cls in list(r.REPRESENTATION_CLASSES.values()) + list(
r.DIFFERENTIAL_CLASSES.values()
):
repr_attrs[repr_diff_cls] = {"names": [], "units": []}
for c, c_cls in repr_diff_cls.attr_classes.items():
repr_attrs[repr_diff_cls]["names"].append(c)
rec_unit = u.deg if issubclass(c_cls, Angle) else None
repr_attrs[repr_diff_cls]["units"].append(rec_unit)
for (
repr_diff_cls,
mappings,
) in cls._frame_specific_representation_info.items():
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_diff_cls]["names"]
uns = repr_attrs[repr_diff_cls]["units"]
comptomap = {m.reprname: m for m in mappings}
for i, c in enumerate(repr_diff_cls.attr_classes.keys()):
if c in comptomap:
mapp = comptomap[c]
nms[i] = mapp.framename
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (
isinstance(mapp.defaultunit, str)
and mapp.defaultunit == "recommended"
):
uns[i] = mapp.defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_diff_cls]["names"] = tuple(nms)
repr_attrs[repr_diff_cls]["units"] = tuple(uns)
cls._frame_class_cache["representation_info"] = repr_attrs
cls._frame_class_cache["last_reprdiff_hash"] = r.get_reprdiff_cls_hash()
return cls._frame_class_cache["representation_info"]
@lazyproperty
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
return self._get_representation_info()
def get_representation_component_names(self, which="base"):
out = {}
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
data_names = repr_or_diff_cls.attr_classes.keys()
repr_names = self.representation_info[repr_or_diff_cls]["names"]
for repr_name, data_name in zip(repr_names, data_names):
out[repr_name] = data_name
return out
def get_representation_component_units(self, which="base"):
out = {}
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
repr_attrs = self.representation_info[repr_or_diff_cls]
repr_names = repr_attrs["names"]
repr_units = repr_attrs["units"]
for repr_name, repr_unit in zip(repr_names, repr_units):
if repr_unit:
out[repr_name] = repr_unit
return out
representation_component_names = property(get_representation_component_names)
representation_component_units = property(get_representation_component_units)
def _replicate(self, data, copy=False, **kwargs):
"""Base for replicating a frame, with possibly different attributes.
Produces a new instance of the frame using the attributes of the old
frame (unless overridden) and with the data given.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` or None
Data to use in the new frame instance. If `None`, it will be
a data-less frame.
copy : bool, optional
Whether data and the attributes on the old frame should be copied
(default), or passed on by reference.
**kwargs
Any attributes that should be overridden.
"""
# This is to provide a slightly nicer error message if the user tries
# to use frame_obj.representation instead of frame_obj.data to get the
# underlying representation object [e.g., #2890]
if inspect.isclass(data):
raise TypeError(
"Class passed as data instead of a representation instance. If you"
" called frame.representation, this returns the representation class."
" frame.data returns the instantiated object - you may want to use"
" this instead."
)
if copy and data is not None:
data = data.copy()
for attr in self.frame_attributes:
if attr not in self._attr_names_with_defaults and attr not in kwargs:
value = getattr(self, attr)
if copy:
value = value.copy()
kwargs[attr] = value
return self.__class__(data, copy=False, **kwargs)
def replicate(self, copy=False, **kwargs):
"""
Return a replica of the frame, optionally with new frame attributes.
The replica is a new frame object that has the same data as this frame
object and with frame attributes overridden if they are provided as extra
keyword arguments to this method. If ``copy`` is set to `True` then a
copy of the internal arrays will be made. Otherwise the replica will
use a reference to the original arrays when possible to save memory. The
internal arrays are normally not changeable by the user so in most cases
it should not be necessary to set ``copy`` to `True`.
Parameters
----------
copy : bool, optional
If True, the resulting object is a copy of the data. When False,
references are used where possible. This rule also applies to the
frame attributes.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
Replica of this object, but possibly with new frame attributes.
"""
return self._replicate(self.data, copy=copy, **kwargs)
def replicate_without_data(self, copy=False, **kwargs):
"""
Return a replica without data, optionally with new frame attributes.
The replica is a new frame object without data but with the same frame
attributes as this object, except where overridden by extra keyword
arguments to this method. The ``copy`` keyword determines if the frame
attributes are truly copied vs being references (which saves memory for
cases where frame attributes are large).
This method is essentially the converse of `realize_frame`.
Parameters
----------
copy : bool, optional
If True, the resulting object has copies of the frame attributes.
When False, references are used where possible.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
Replica of this object, but without data and possibly with new frame
attributes.
"""
return self._replicate(None, copy=copy, **kwargs)
def realize_frame(self, data, **kwargs):
"""
Generates a new frame with new data from another frame (which may or
may not have data). Roughly speaking, the converse of
`replicate_without_data`.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation`
The representation to use as the data for the new frame.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object. In particular, `representation_type` can be specified.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
A new object in *this* frame, with the same frame attributes as
this one, but with the ``data`` as the coordinate data.
"""
return self._replicate(data, **kwargs)
def represent_as(self, base, s="base", in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation, or
use the ``set_representation_cls`` method to also set the differential.
Parameters
----------
base : subclass of BaseRepresentation or string
The type of representation to generate. Must be a *class*
(not an instance), or the string name of the representation
class.
s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional
Class in which any velocities should be represented. Must be
a *class* (not an instance), or the string name of the
differential class. If equal to 'base' (default), inferred from
the base class. If `None`, all velocity information is dropped.
in_frame_units : bool, keyword-only
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP
<CartesianRepresentation (x, y, z) [dimensionless]
(1., 0., 0.)>
>>> coord.representation_type = CartesianRepresentation
>>> coord # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1., 0., 0.)>
"""
# For backwards compatibility (because in_frame_units used to be the
# 2nd argument), we check to see if `new_differential` is a boolean. If
# it is, we ignore the value of `new_differential` and warn about the
# position change
if isinstance(s, bool):
warnings.warn(
"The argument position for `in_frame_units` in `represent_as` has"
" changed. Use as a keyword argument if needed.",
AstropyWarning,
)
in_frame_units = s
s = "base"
# In the future, we may want to support more differentials, in which
# case one probably needs to define **kwargs above and use it here.
# But for now, we only care about the velocity.
repr_classes = _get_repr_classes(base=base, s=s)
representation_cls = repr_classes["base"]
# We only keep velocity information
if "s" in self.data.differentials:
# For the default 'base' option in which _get_repr_classes has
# given us a best guess based on the representation class, we only
# use it if the class we had already is incompatible.
if s == "base" and (
self.data.differentials["s"].__class__
in representation_cls._compatible_differentials
):
differential_cls = self.data.differentials["s"].__class__
else:
differential_cls = repr_classes["s"]
elif s is None or s == "base":
differential_cls = None
else:
raise TypeError(
"Frame data has no associated differentials (i.e. the frame has no"
" velocity data) - represent_as() only accepts a new representation."
)
if differential_cls:
cache_key = (
representation_cls.__name__,
differential_cls.__name__,
in_frame_units,
)
else:
cache_key = (representation_cls.__name__, in_frame_units)
cached_repr = self.cache["representation"].get(cache_key)
if not cached_repr:
if differential_cls:
# Sanity check to ensure we do not just drop radial
# velocity. TODO: should Representation.represent_as
# allow this transformation in the first place?
if (
isinstance(self.data, r.UnitSphericalRepresentation)
and issubclass(representation_cls, r.CartesianRepresentation)
and not isinstance(
self.data.differentials["s"],
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
)
):
raise u.UnitConversionError(
"need a distance to retrieve a cartesian representation "
"when both radial velocity and proper motion are present, "
"since otherwise the units cannot match."
)
# TODO NOTE: only supports a single differential
data = self.data.represent_as(representation_cls, differential_cls)
diff = data.differentials["s"] # TODO: assumes velocity
else:
data = self.data.represent_as(representation_cls)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
new_attrs = self.representation_info.get(representation_cls)
if new_attrs and in_frame_units:
datakwargs = {comp: getattr(data, comp) for comp in data.components}
for comp, new_attr_unit in zip(data.components, new_attrs["units"]):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(copy=False, **datakwargs)
if differential_cls:
# the original differential
data_diff = self.data.differentials["s"]
# If the new differential is known to this frame and has a
# defined set of names and units, then use that.
new_attrs = self.representation_info.get(differential_cls)
if new_attrs and in_frame_units:
diffkwargs = {comp: getattr(diff, comp) for comp in diff.components}
for comp, new_attr_unit in zip(diff.components, new_attrs["units"]):
# Some special-casing to treat a situation where the
# input data has a UnitSphericalDifferential or a
# RadialDifferential. It is re-represented to the
# frame's differential class (which might be, e.g., a
# dimensional Differential), so we don't want to try to
# convert the empty component units
if (
isinstance(
data_diff,
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
),
)
and comp not in data_diff.__class__.attr_classes
):
continue
elif (
isinstance(data_diff, r.RadialDifferential)
and comp not in data_diff.__class__.attr_classes
):
continue
# Try to convert to requested units. Since that might
# not be possible (e.g., for a coordinate with proper
# motion but without distance, one cannot convert to a
# cartesian differential in km/s), we allow the unit
# conversion to fail. See gh-7028 for discussion.
if new_attr_unit and hasattr(diff, comp):
try:
diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit)
except Exception:
pass
diff = diff.__class__(copy=False, **diffkwargs)
# Here we have to bypass using with_differentials() because
# it has a validation check. But because
# .representation_type and .differential_type don't point to
# the original classes, if the input differential is a
# RadialDifferential, it usually gets turned into a
# SphericalCosLatDifferential (or whatever the default is)
# with strange units for the d_lon and d_lat attributes.
# This then causes the dictionary key check to fail (i.e.
# comparison against `diff._get_deriv_key()`)
data._differentials.update({"s": diff})
self.cache["representation"][cache_key] = data
return self.cache["representation"][cache_key]
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : coordinate-like or `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The frame to transform this coordinate frame into.
The frame class option is deprecated.
Returns
-------
transframe : coordinate-like
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
from .errors import ConvertError
if self._data is None:
raise ValueError("Cannot transform a frame with no data")
if (
getattr(self.data, "differentials", None)
and hasattr(self, "obstime")
and hasattr(new_frame, "obstime")
and np.any(self.obstime != new_frame.obstime)
):
raise NotImplementedError(
"You cannot transform a frame that has velocities to another frame at a"
" different obstime. If you think this should (or should not) be"
" possible, please comment at"
" https://github.com/astropy/astropy/issues/6280"
)
if inspect.isclass(new_frame):
warnings.warn(
"Transforming a frame instance to a frame class (as opposed to another "
"frame instance) will not be supported in the future. Either "
"explicitly instantiate the target frame, or first convert the source "
"frame instance to a `astropy.coordinates.SkyCoord` and use its "
"`transform_to()` method.",
AstropyDeprecationWarning,
)
# Use the default frame attributes for this class
new_frame = new_frame()
if hasattr(new_frame, "_sky_coord_frame"):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__, new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = "Cannot transform from {0} to {1}"
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass or instance
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__
trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls)
if trans is None:
if new_frame_cls is self.__class__:
return "same"
else:
return False
else:
return True
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
@staticmethod
def _frameattr_equiv(left_fattr, right_fattr):
"""
Determine if two frame attributes are equivalent. Implemented as a
staticmethod mainly as a convenient location, although conceivable it
might be desirable for subclasses to override this behavior.
Primary purpose is to check for equality of representations. This
aspect can actually be simplified/removed now that representations have
equality defined.
Secondary purpose is to check for equality of coordinate attributes,
which first checks whether they themselves are in equivalent frames
before checking for equality in the normal fashion. This is because
checking for equality with non-equivalent frames raises an error.
"""
if left_fattr is right_fattr:
# shortcut if it's exactly the same object
return True
elif left_fattr is None or right_fattr is None:
# shortcut if one attribute is unspecified and the other isn't
return False
left_is_repr = isinstance(left_fattr, r.BaseRepresentationOrDifferential)
right_is_repr = isinstance(right_fattr, r.BaseRepresentationOrDifferential)
if left_is_repr and right_is_repr:
# both are representations.
if getattr(left_fattr, "differentials", False) or getattr(
right_fattr, "differentials", False
):
warnings.warn(
"Two representation frame attributes were checked for equivalence"
" when at least one of them has differentials. This yields False"
" even if the underlying representations are equivalent (although"
" this may change in future versions of Astropy)",
AstropyWarning,
)
return False
if isinstance(right_fattr, left_fattr.__class__):
# if same representation type, compare components.
return np.all(
[
(getattr(left_fattr, comp) == getattr(right_fattr, comp))
for comp in left_fattr.components
]
)
else:
# convert to cartesian and see if they match
return np.all(
left_fattr.to_cartesian().xyz == right_fattr.to_cartesian().xyz
)
elif left_is_repr or right_is_repr:
return False
left_is_coord = isinstance(left_fattr, BaseCoordinateFrame)
right_is_coord = isinstance(right_fattr, BaseCoordinateFrame)
if left_is_coord and right_is_coord:
# both are coordinates
if left_fattr.is_equivalent_frame(right_fattr):
return np.all(left_fattr == right_fattr)
else:
return False
elif left_is_coord or right_is_coord:
return False
return np.all(left_fattr == right_fattr)
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : :class:`~astropy.coordinates.BaseCoordinateFrame`
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `~astropy.coordinates.BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
for frame_attr_name in self.frame_attributes:
if not self._frameattr_equiv(
getattr(self, frame_attr_name), getattr(other, frame_attr_name)
):
return False
return True
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't a frame"
)
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = f" ({frameattrs})"
if data_repr:
return f"<{self.__class__.__name__} Coordinate{frameattrs}: {data_repr}>"
else:
return f"<{self.__class__.__name__} Frame{frameattrs}>"
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ""
if self.representation_type:
if hasattr(self.representation_type, "_unit_representation") and isinstance(
self.data, self.representation_type._unit_representation
):
rep_cls = self.data.__class__
else:
rep_cls = self.representation_type
if "s" in self.data.differentials:
dif_cls = self.get_representation_cls("s")
dif_data = self.data.differentials["s"]
if isinstance(
dif_data,
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
):
dif_cls = dif_data.__class__
else:
dif_cls = None
data = self.represent_as(rep_cls, dif_cls, in_frame_units=True)
data_repr = repr(data)
# Generate the list of component names out of the repr string
part1, _, remainder = data_repr.partition("(")
if remainder != "":
comp_str, _, part2 = remainder.partition(")")
comp_names = comp_str.split(", ")
# Swap in frame-specific component names
invnames = {
nmrepr: nmpref
for nmpref, nmrepr in self.representation_component_names.items()
}
for i, name in enumerate(comp_names):
comp_names[i] = invnames.get(name, name)
# Reassemble the repr string
data_repr = part1 + "(" + ", ".join(comp_names) + ")" + part2
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith("<" + data.__class__.__name__):
# remove both the leading "<" and the space after the name, as well
# as the trailing ">"
data_repr = data_repr[(len(data.__class__.__name__) + 2) : -1]
else:
data_repr = "Data:\n" + data_repr
if "s" in self.data.differentials:
data_repr_spl = data_repr.split("\n")
if "has differentials" in data_repr_spl[-1]:
diffrepr = repr(data.differentials["s"]).split("\n")
if diffrepr[0].startswith("<"):
diffrepr[0] = " " + " ".join(diffrepr[0].split(" ")[1:])
for frm_nm, rep_nm in self.get_representation_component_names(
"s"
).items():
diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm)
if diffrepr[-1].endswith(">"):
diffrepr[-1] = diffrepr[-1][:-1]
data_repr_spl[-1] = "\n".join(diffrepr)
data_repr = "\n".join(data_repr_spl)
return data_repr
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
attr_strs = []
for attribute_name in self.frame_attributes:
attr = getattr(self, attribute_name)
# Check to see if this object has a way of representing itself
# specific to being an attribute of a frame. (Note, this is not the
# Attribute class, it's the actual object).
if hasattr(attr, "_astropy_repr_in_frame"):
attrstr = attr._astropy_repr_in_frame()
else:
attrstr = str(attr)
attr_strs.append(f"{attribute_name}={attrstr}")
return ", ".join(attr_strs)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
new = super().__new__(self.__class__)
if hasattr(self, "_representation"):
new._representation = self._representation.copy()
new._attr_names_with_defaults = self._attr_names_with_defaults.copy()
for attr in self.frame_attributes:
_attr = "_" + attr
if attr in self._attr_names_with_defaults:
setattr(new, _attr, getattr(self, _attr))
else:
value = getattr(self, _attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, _attr, value)
if self.has_data:
new._data = apply_method(self.data)
else:
new._data = None
shapes = [
getattr(new, "_" + attr).shape
for attr in new.frame_attributes
if (
attr not in new._attr_names_with_defaults
and getattr(getattr(new, "_" + attr), "shape", ())
)
]
if shapes:
new._no_data_shape = (
check_broadcast(*shapes) if len(shapes) > 1 else shapes[0]
)
else:
new._no_data_shape = ()
return new
def __setitem__(self, item, value):
if self.__class__ is not value.__class__:
raise TypeError(
f"can only set from object of same class: {self.__class__.__name__} vs."
f" {value.__class__.__name__}"
)
if not self.is_equivalent_frame(value):
raise ValueError("can only set frame item from an equivalent frame")
if value._data is None:
raise ValueError("can only set frame with value that has data")
if self._data is None:
raise ValueError("cannot set frame which has no data")
if self.shape == ():
raise TypeError(
f"scalar '{self.__class__.__name__}' frame object "
"does not support item assignment"
)
if self._data is None:
raise ValueError("can only set frame if it has data")
if self._data.__class__ is not value._data.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self._data.__class__.__name__} vs. {value._data.__class__.__name__}"
)
if self._data._differentials:
# Can this ever occur? (Same class but different differential keys).
# This exception is not tested since it is not clear how to generate it.
if self._data._differentials.keys() != value._data._differentials.keys():
raise ValueError("setitem value must have same differentials")
for key, self_diff in self._data._differentials.items():
if self_diff.__class__ is not value._data._differentials[key].__class__:
raise TypeError(
"can only set from object of same class: "
f"{self_diff.__class__.__name__} vs. "
f"{value._data._differentials[key].__class__.__name__}"
)
# Set representation data
self._data[item] = value._data
# Frame attributes required to be identical by is_equivalent_frame,
# no need to set them here.
self.cache.clear()
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
return sorted(
set(super().__dir__())
| set(self.representation_component_names)
| set(self.get_representation_component_names("s"))
)
def __getattr__(self, attr):
"""
Allow access to attributes on the representation and differential as
found via ``self.get_representation_component_names``.
TODO: We should handle dynamic representation transforms here (e.g.,
`.cylindrical`) instead of defining properties as below.
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if attr.startswith("_"):
return self.__getattribute__(attr) # Raise AttributeError.
repr_names = self.representation_component_names
if attr in repr_names:
if self._data is None:
self.data # this raises the "no data" error by design - doing it
# this way means we don't have to replicate the error message here
rep = self.represent_as(self.representation_type, in_frame_units=True)
val = getattr(rep, repr_names[attr])
return val
diff_names = self.get_representation_component_names("s")
if attr in diff_names:
if self._data is None:
self.data # see above.
# TODO: this doesn't work for the case when there is only
# unitspherical information. The differential_type gets set to the
# default_differential, which expects full information, so the
# units don't work out
rep = self.represent_as(
in_frame_units=True, **self.get_representation_cls(None)
)
val = getattr(rep.differentials["s"], diff_names[attr])
return val
return self.__getattribute__(attr) # Raise AttributeError.
def __setattr__(self, attr, value):
# Don't slow down access of private attributes!
if not attr.startswith("_"):
if hasattr(self, "representation_info"):
repr_attr_names = set()
for representation_attr in self.representation_info.values():
repr_attr_names.update(representation_attr["names"])
if attr in repr_attr_names:
raise AttributeError(f"Cannot set any frame attribute {attr}")
super().__setattr__(attr, value)
def __eq__(self, value):
"""Equality operator for frame.
This implements strict equality and requires that the frames are
equivalent and that the representation data are exactly equal.
"""
if not isinstance(value, BaseCoordinateFrame):
return NotImplemented
is_equiv = self.is_equivalent_frame(value)
if self._data is None and value._data is None:
# For Frame with no data, == compare is same as is_equivalent_frame()
return is_equiv
if not is_equiv:
raise TypeError(
"cannot compare: objects must have equivalent frames: "
f"{self.replicate_without_data()} vs. {value.replicate_without_data()}"
)
if (value._data is None) != (self._data is None):
raise ValueError(
"cannot compare: one frame has data and the other does not"
)
return self._data == value._data
def __ne__(self, value):
return np.logical_not(self == value)
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from .angle_utilities import angular_separation
from .angles import Angle
self_unit_sph = self.represent_as(r.UnitSphericalRepresentation)
other_transformed = other.transform_to(self)
other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation)
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(
self_unit_sph.lon, self_unit_sph.lat, other_unit_sph.lon, other_unit_sph.lat
)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
from .distances import Distance
if issubclass(self.data.__class__, r.UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
# do this first just in case the conversion somehow creates a distance
other_in_self_system = other.transform_to(self)
if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
# drop the differentials to ensure they don't do anything odd in the
# subtraction
self_car = self.data.without_differentials().represent_as(
r.CartesianRepresentation
)
other_car = other_in_self_system.data.without_differentials().represent_as(
r.CartesianRepresentation
)
dist = (self_car - other_car).norm()
if dist.unit == u.one:
return dist
else:
return Distance(dist)
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("cartesian", in_frame_units=True)
@property
def cylindrical(self):
"""
Shorthand for a cylindrical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("cylindrical", in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("spherical", in_frame_units=True)
@property
def sphericalcoslat(self):
"""
Shorthand for a spherical representation of the positional data and a
`~astropy.coordinates.SphericalCosLatDifferential` for the velocity
data in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("spherical", "sphericalcoslat", in_frame_units=True)
@property
def velocity(self):
"""
Shorthand for retrieving the Cartesian space-motion as a
`~astropy.coordinates.CartesianDifferential` object.
This is equivalent to calling ``self.cartesian.differentials['s']``.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
return self.cartesian.differentials["s"]
@property
def proper_motion(self):
"""
Shorthand for the two-dimensional proper motion as a
`~astropy.units.Quantity` object with angular velocity units. In the
returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude
dimension so that ``.proper_motion[0]`` is the longitudinal proper
motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper
motion already includes the cos(latitude) term.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
sph = self.represent_as("spherical", "sphericalcoslat", in_frame_units=True)
pm_lon = sph.differentials["s"].d_lon_coslat
pm_lat = sph.differentials["s"].d_lat
return (
np.stack((pm_lon.value, pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit
)
@property
def radial_velocity(self):
"""
Shorthand for the radial or line-of-sight velocity as a
`~astropy.units.Quantity` object.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
sph = self.represent_as("spherical", in_frame_units=True)
return sph.differentials["s"].d_distance
class GenericFrame(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
self.frame_attributes = {}
for name, default in frame_attrs.items():
self.frame_attributes[name] = Attribute(default)
setattr(self, "_" + name, default)
super().__init__(None)
def __getattr__(self, name):
if "_" + name in self.__dict__:
return getattr(self, "_" + name)
else:
raise AttributeError(f"no {name}")
def __setattr__(self, name, value):
if name in self.frame_attributes:
raise AttributeError(f"can't set frame attribute '{name}'")
else:
super().__setattr__(name, value)
|
e5377a5214016c43d55f7e4e8b69cfb433a2dffa46a7ff640e1be5b6eecb54cc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions implementing some of the
algorithms contained within Jean Meeus, 'Astronomical Algorithms',
second edition, 1998, Willmann-Bell.
"""
import erfa
import numpy as np
from numpy.polynomial.polynomial import polyval
from astropy import units as u
from astropy.utils import deprecated
from . import ICRS, GeocentricTrueEcliptic, SkyCoord
from .builtin_frames.utils import get_jd12
__all__ = ["calc_moon"]
# Meeus 1998: table 47.A
# D M M' F l r
_MOON_L_R = (
(0, 0, 1, 0, 6288774, -20905355),
(2, 0, -1, 0, 1274027, -3699111),
(2, 0, 0, 0, 658314, -2955968),
(0, 0, 2, 0, 213618, -569925),
(0, 1, 0, 0, -185116, 48888),
(0, 0, 0, 2, -114332, -3149),
(2, 0, -2, 0, 58793, 246158),
(2, -1, -1, 0, 57066, -152138),
(2, 0, 1, 0, 53322, -170733),
(2, -1, 0, 0, 45758, -204586),
(0, 1, -1, 0, -40923, -129620),
(1, 0, 0, 0, -34720, 108743),
(0, 1, 1, 0, -30383, 104755),
(2, 0, 0, -2, 15327, 10321),
(0, 0, 1, 2, -12528, 0),
(0, 0, 1, -2, 10980, 79661),
(4, 0, -1, 0, 10675, -34782),
(0, 0, 3, 0, 10034, -23210),
(4, 0, -2, 0, 8548, -21636),
(2, 1, -1, 0, -7888, 24208),
(2, 1, 0, 0, -6766, 30824),
(1, 0, -1, 0, -5163, -8379),
(1, 1, 0, 0, 4987, -16675),
(2, -1, 1, 0, 4036, -12831),
(2, 0, 2, 0, 3994, -10445),
(4, 0, 0, 0, 3861, -11650),
(2, 0, -3, 0, 3665, 14403),
(0, 1, -2, 0, -2689, -7003),
(2, 0, -1, 2, -2602, 0),
(2, -1, -2, 0, 2390, 10056),
(1, 0, 1, 0, -2348, 6322),
(2, -2, 0, 0, 2236, -9884),
(0, 1, 2, 0, -2120, 5751),
(0, 2, 0, 0, -2069, 0),
(2, -2, -1, 0, 2048, -4950),
(2, 0, 1, -2, -1773, 4130),
(2, 0, 0, 2, -1595, 0),
(4, -1, -1, 0, 1215, -3958),
(0, 0, 2, 2, -1110, 0),
(3, 0, -1, 0, -892, 3258),
(2, 1, 1, 0, -810, 2616),
(4, -1, -2, 0, 759, -1897),
(0, 2, -1, 0, -713, -2117),
(2, 2, -1, 0, -700, 2354),
(2, 1, -2, 0, 691, 0),
(2, -1, 0, -2, 596, 0),
(4, 0, 1, 0, 549, -1423),
(0, 0, 4, 0, 537, -1117),
(4, -1, 0, 0, 520, -1571),
(1, 0, -2, 0, -487, -1739),
(2, 1, 0, -2, -399, 0),
(0, 0, 2, -2, -381, -4421),
(1, 1, 1, 0, 351, 0),
(3, 0, -2, 0, -340, 0),
(4, 0, -3, 0, 330, 0),
(2, -1, 2, 0, 327, 0),
(0, 2, 1, 0, -323, 1165),
(1, 1, -1, 0, 299, 0),
(2, 0, 3, 0, 294, 0),
(2, 0, -1, -2, 0, 8752),
)
# Meeus 1998: table 47.B
# D M M' F b
_MOON_B = (
(0, 0, 0, 1, 5128122),
(0, 0, 1, 1, 280602),
(0, 0, 1, -1, 277693),
(2, 0, 0, -1, 173237),
(2, 0, -1, 1, 55413),
(2, 0, -1, -1, 46271),
(2, 0, 0, 1, 32573),
(0, 0, 2, 1, 17198),
(2, 0, 1, -1, 9266),
(0, 0, 2, -1, 8822),
(2, -1, 0, -1, 8216),
(2, 0, -2, -1, 4324),
(2, 0, 1, 1, 4200),
(2, 1, 0, -1, -3359),
(2, -1, -1, 1, 2463),
(2, -1, 0, 1, 2211),
(2, -1, -1, -1, 2065),
(0, 1, -1, -1, -1870),
(4, 0, -1, -1, 1828),
(0, 1, 0, 1, -1794),
(0, 0, 0, 3, -1749),
(0, 1, -1, 1, -1565),
(1, 0, 0, 1, -1491),
(0, 1, 1, 1, -1475),
(0, 1, 1, -1, -1410),
(0, 1, 0, -1, -1344),
(1, 0, 0, -1, -1335),
(0, 0, 3, 1, 1107),
(4, 0, 0, -1, 1021),
(4, 0, -1, 1, 833),
# second column
(0, 0, 1, -3, 777),
(4, 0, -2, 1, 671),
(2, 0, 0, -3, 607),
(2, 0, 2, -1, 596),
(2, -1, 1, -1, 491),
(2, 0, -2, 1, -451),
(0, 0, 3, -1, 439),
(2, 0, 2, 1, 422),
(2, 0, -3, -1, 421),
(2, 1, -1, 1, -366),
(2, 1, 0, 1, -351),
(4, 0, 0, 1, 331),
(2, -1, 1, 1, 315),
(2, -2, 0, -1, 302),
(0, 0, 1, 3, -283),
(2, 1, 1, -1, -229),
(1, 1, 0, -1, 223),
(1, 1, 0, 1, 223),
(0, 1, -2, -1, -220),
(2, 1, -1, -1, -220),
(1, 0, 1, 1, -185),
(2, -1, -2, -1, 181),
(0, 1, 2, 1, -177),
(4, 0, -2, -1, 176),
(4, -1, -1, -1, 166),
(1, 0, 1, -1, -164),
(4, 0, 1, -1, 132),
(1, 0, -1, -1, -119),
(4, -1, 0, -1, 115),
(2, -2, 0, 1, 107),
)
"""
Coefficients of polynomials for various terms:
Lc : Mean longitude of Moon, w.r.t mean Equinox of date
D : Mean elongation of the Moon
M: Sun's mean anomaly
Mc : Moon's mean anomaly
F : Moon's argument of latitude (mean distance of Moon from its ascending node).
"""
_coLc = (2.18316448e02, 4.81267881e05, -1.57860000e-03, 1.85583502e-06, -1.53388349e-08)
_coD = (2.97850192e02, 4.45267111e05, -1.88190000e-03, 1.83194472e-06, -8.84447000e-09)
_coM = (3.57529109e02, 3.59990503e04, -1.53600000e-04, 4.08329931e-08)
_coMc = (1.34963396e02, 4.77198868e05, 8.74140000e-03, 1.43474081e-05, -6.79717238e-08)
_coF = (9.32720950e01, 4.83202018e05, -3.65390000e-03, -2.83607487e-07, 1.15833246e-09)
_coA1 = (119.75, 131.849)
_coA2 = (53.09, 479264.290)
_coA3 = (313.45, 481266.484)
_coE = (1.0, -0.002516, -0.0000074)
@deprecated(
since="5.0",
alternative="astropy.coordinates.get_moon",
message=(
"The private calc_moon function has been deprecated, as its functionality is"
" now available in ERFA. Note that the coordinate system was not interpreted"
" quite correctly, leading to small inaccuracies. Please use the public"
" get_moon or get_body functions instead."
),
)
def calc_moon(t):
"""
Lunar position model ELP2000-82 of (Chapront-Touze' and Chapront, 1983, 124, 50).
This is the simplified version of Jean Meeus, Astronomical Algorithms,
second edition, 1998, Willmann-Bell. Meeus claims approximate accuracy of 10"
in longitude and 4" in latitude, with no specified time range.
Tests against JPL ephemerides show accuracy of 10 arcseconds and 50 km over the
date range CE 1950-2050.
Parameters
----------
t : `~astropy.time.Time`
Time of observation.
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
ICRS Coordinate for the body
"""
# number of centuries since J2000.0.
# This should strictly speaking be in Ephemeris Time, but TDB or TT
# will introduce error smaller than intrinsic accuracy of algorithm.
T = (t.tdb.jyear - 2000.0) / 100.0
# constants that are needed for all calculations
Lc = u.Quantity(polyval(T, _coLc), u.deg)
D = u.Quantity(polyval(T, _coD), u.deg)
M = u.Quantity(polyval(T, _coM), u.deg)
Mc = u.Quantity(polyval(T, _coMc), u.deg)
F = u.Quantity(polyval(T, _coF), u.deg)
A1 = u.Quantity(polyval(T, _coA1), u.deg)
A2 = u.Quantity(polyval(T, _coA2), u.deg)
A3 = u.Quantity(polyval(T, _coA3), u.deg)
E = polyval(T, _coE)
suml = sumr = 0.0
for DNum, MNum, McNum, FNum, LFac, RFac in _MOON_L_R:
corr = E ** abs(MNum)
suml += LFac * corr * np.sin(D * DNum + M * MNum + Mc * McNum + F * FNum)
sumr += RFac * corr * np.cos(D * DNum + M * MNum + Mc * McNum + F * FNum)
sumb = 0.0
for DNum, MNum, McNum, FNum, BFac in _MOON_B:
corr = E ** abs(MNum)
sumb += BFac * corr * np.sin(D * DNum + M * MNum + Mc * McNum + F * FNum)
suml += 3958 * np.sin(A1) + 1962 * np.sin(Lc - F) + 318 * np.sin(A2)
sumb += (
-2235 * np.sin(Lc)
+ 382 * np.sin(A3)
+ 175 * np.sin(A1 - F)
+ 175 * np.sin(A1 + F)
+ 127 * np.sin(Lc - Mc)
- 115 * np.sin(Lc + Mc)
)
# ensure units
suml = suml * u.microdegree
sumb = sumb * u.microdegree
# nutation of longitude
jd1, jd2 = get_jd12(t, "tt")
nut, _ = erfa.nut06a(jd1, jd2)
nut = nut * u.rad
# calculate ecliptic coordinates
lon = Lc + suml + nut
lat = sumb
dist = (385000.56 + sumr / 1000) * u.km
# Meeus algorithm gives GeocentricTrueEcliptic coordinates
ecliptic_coo = GeocentricTrueEcliptic(lon, lat, distance=dist, obstime=t, equinox=t)
return SkyCoord(ecliptic_coo.transform_to(ICRS()))
|
009211c93533edbb58ec7532a456fec2b9ae6127a57e54378ff8890735d9a2bc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import json
import socket
import urllib.error
import urllib.parse
import urllib.request
from warnings import warn
import erfa
import numpy as np
from astropy import constants as consts
from astropy import units as u
from astropy.units.quantity import QuantityInfoBase
from astropy.utils import data
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle, Latitude, Longitude
from .errors import UnknownSiteException
from .matrix_utilities import matrix_transpose
from .representation import (
BaseRepresentation,
CartesianDifferential,
CartesianRepresentation,
)
__all__ = [
"EarthLocation",
"BaseGeodeticRepresentation",
"WGS84GeodeticRepresentation",
"WGS72GeodeticRepresentation",
"GRS80GeodeticRepresentation",
]
GeodeticLocation = collections.namedtuple("GeodeticLocation", ["lon", "lat", "height"])
ELLIPSOIDS = {}
"""Available ellipsoids (defined in erfam.h, with numbers exposed in erfa)."""
# Note: they get filled by the creation of the geodetic classes.
OMEGA_EARTH = (1.002_737_811_911_354_48 * u.cycle / u.day).to(
1 / u.s, u.dimensionless_angles()
)
"""
Rotational velocity of Earth, following SOFA's pvtob.
In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value
in SI seconds, so multiply by the ratio of stellar to solar day.
See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth
Seidelmann (1992), University Science Books. The constant is the
conventional, exact one (IERS conventions 2003); see
http://hpiers.obspm.fr/eop-pc/index.php?index=constants.
"""
def _check_ellipsoid(ellipsoid=None, default="WGS84"):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError(f"Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})")
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode("utf8"))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out")) from e
else:
raise NameResolveError(err_str.format(msg=e.reason)) from e
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get("results", [])
if resp_data.get("status", None) != "OK":
raise NameResolveError(
err_str.format(msg="unknown failure with Google API")
)
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("x", "y", "z", "ellipsoid")
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop("ellipsoid")
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop("dtype")
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop("shape")
data = u.Quantity(
np.zeros(shape=shape, dtype=cols[0].dtype), unit=cols[0].unit, copy=False
)
# Get arguments needed to reconstruct class
map = {
key: (data[key] if key in "xyz" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = "WGS84"
_location_dtype = np.dtype({"names": ["x", "y", "z"], "formats": [np.float64] * 3})
_array_dtype = np.dtype((np.float64, (3,)))
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], EarthLocation):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError(
"Coordinates could not be parsed as either "
"geocentric or geodetic, with respective "
f'exceptions "{exc_geocentric}" and "{exc_geodetic}"'
)
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array-like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : unit-like or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError(
"Geocentric coordinates should be Quantities "
"unless an explicit unit is given."
) from None
else:
unit = u.Unit(unit)
if unit.physical_type != "length":
raise u.UnitsError("Geocentric coordinates should be in units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc["x"], struc["y"], struc["z"] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0.0, ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` ['length'] or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
# As wrapping fails on readonly input, we do so manually
lon = Angle(lon, u.degree, copy=False).wrap_at(180 * u.degree)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates.
geodetic = ELLIPSOIDS[ellipsoid](lon, lat, height, copy=False)
xyz = geodetic.to_cartesian().get_xyz(xyz_axis=-1) << height.unit
self = xyz.view(cls._location_dtype, cls).reshape(geodetic.shape)
self._ellipsoid = ellipsoid
return self
@classmethod
def of_site(cls, site_name):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
Additional information about the site is stored in the ``.info.meta``
dictionary of sites obtained using this method (see the examples below).
.. note::
When this function is called, it will attempt to download site
information from the astropy data server. If you would like a site
to be added, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If a site cannot be found in the registry (i.e., an internet
connection is not available), it will fall back on a built-in list,
In the future, this bundled list might include a version-controlled
list of canonical observatories extracted from the online version,
but it currently only contains the Greenwich Royal Observatory as an
example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
Returns
-------
site : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the observatory. The returned class will be the same
as this class.
Examples
--------
>>> from astropy.coordinates import EarthLocation
>>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA
>>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP
GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>)
>>> keck.info # doctest: +REMOTE_DATA
name = W. M. Keck Observatory
dtype = (float64, float64, float64)
unit = m
class = EarthLocation
n_bad = 0
>>> keck.info.meta # doctest: +REMOTE_DATA
{'source': 'IRAF Observatory Database', 'timezone': 'US/Hawaii'}
See Also
--------
get_site_names : the list of sites that this function can access
"""
registry = cls._get_site_registry()
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(
e.site, "EarthLocation.get_site_names", close_names=e.close_names
) from e
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False, google_api_key=None):
"""
Return an object of this class for a given address by querying either
the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding
API [2]_, which requires a specified API key.
This is intended as a quick convenience function to get easy access to
locations. If you need to specify a precise location, you should use the
initializer directly and pass in a longitude, latitude, and elevation.
In the background, this just issues a web query to either of
the APIs noted above. This is not meant to be abused! Both
OpenStreetMap and Google use IP-based query limiting and will ban your
IP if you send more than a few thousand queries per hour [2]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first**
returned location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API,
this can be a fully specified street address (e.g., 123 Main St.,
New York, NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool, optional
This only works when using the Google API! See the ``google_api_key``
block below. Use the retrieved location to perform a second query to
the Google maps elevation API to retrieve the height of the input
address [3]_.
google_api_key : str, optional
A Google API key with the Geocoding API and (optionally) the
elevation API enabled. See [4]_ for more information.
Returns
-------
location : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the input address.
Will be type(this class)
References
----------
.. [1] https://nominatim.openstreetmap.org/
.. [2] https://developers.google.com/maps/documentation/geocoding/start
.. [3] https://developers.google.com/maps/documentation/elevation/start
.. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key
"""
use_google = google_api_key is not None
# Fail fast if invalid options are passed:
if not use_google and get_height:
raise ValueError(
"Currently, `get_height` only works when using the Google geocoding"
" API, which requires passing a Google API key with `google_api_key`."
" See:"
" https://developers.google.com/maps/documentation/geocoding/get-api-key"
" for information on obtaining an API key."
)
if use_google: # Google
pars = urllib.parse.urlencode({"address": address, "key": google_api_key})
geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}"
else: # OpenStreetMap
pars = urllib.parse.urlencode({"q": address, "format": "json"})
geo_url = f"https://nominatim.openstreetmap.org/search?{pars}"
# get longitude and latitude location
err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}"
geo_result = _get_json_result(geo_url, err_str=err_str, use_google=use_google)
if use_google:
loc = geo_result[0]["geometry"]["location"]
lat = loc["lat"]
lon = loc["lng"]
else:
loc = geo_result[0]
lat = float(loc["lat"]) # strings are returned by OpenStreetMap
lon = float(loc["lon"])
if get_height:
pars = {"locations": f"{lat:.8f},{lon:.8f}", "key": google_api_key}
pars = urllib.parse.urlencode(pars)
ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}"
err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}"
ele_result = _get_json_result(
ele_url, err_str=err_str, use_google=use_google
)
height = ele_result[0]["elevation"] * u.meter
else:
height = 0.0
return cls.from_geodetic(lon=lon * u.deg, lat=lat * u.deg, height=height)
@classmethod
def get_site_names(cls):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
When this function is called, it will first attempt to
download site information from the astropy data server. If it
cannot (i.e., an internet connection is not available), it will fall
back on the list included with astropy (which is a limited and dated
set of sites). If you think a site should be added, issue a pull
request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry().names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
Returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
if force_builtin and force_download:
raise ValueError("Cannot have both force_builtin and force_download True")
if force_builtin:
reg = cls._site_registry = get_builtin_sites()
else:
reg = getattr(cls, "_site_registry", None)
if force_download or not reg:
try:
if isinstance(force_download, str):
reg = get_downloaded_sites(force_download)
else:
reg = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = (
"Could not access the online site list. Falling "
"back on the built-in version, which is rather "
"limited. If you want to retry the download, do "
"{0}._get_site_registry(force_download=True)"
)
warn(AstropyUserWarning(msg.format(cls.__name__)))
reg = get_builtin_sites()
cls._site_registry = reg
return reg
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
lon, lat, height : `~astropy.units.Quantity`
The tuple is a ``GeodeticLocation`` namedtuple and is comprised of
instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`.
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
xyz = self.view(self._array_dtype, u.Quantity)
llh = CartesianRepresentation(xyz, xyz_axis=-1, copy=False).represent_as(
ELLIPSOIDS[ellipsoid]
)
return GeodeticLocation(
Longitude(llh.lon, u.deg, wrap_angle=180 * u.deg, copy=False),
llh.lat << u.deg,
llh.height << self.unit,
)
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lat(self):
"""Latitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities."""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities."""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.shape:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
itrs = property(
get_itrs,
doc="""An `~astropy.coordinates.ITRS` object
for the location of this object at the
default ``obstime``.""",
)
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
loc, vel = self.get_gcrs_posvel(obstime)
loc.differentials["s"] = CartesianDifferential.from_cartesian(vel)
return GCRS(loc, obstime=obstime)
def _get_gcrs_posvel(self, obstime, ref_to_itrs, gcrs_to_ref):
"""Calculate GCRS position and velocity given transformation matrices.
The reference frame z axis must point to the Celestial Intermediate Pole
(as is the case for CIRS and TETE).
This private method is used in intermediate_rotation_transforms,
where some of the matrices are already available for the coordinate
transformation.
The method is faster by an order of magnitude than just adding a zero
velocity to ITRS and transforming to GCRS, because it avoids calculating
the velocity via finite differencing of the results of the transformation
at three separate times.
"""
# The simplest route is to transform to the reference frame where the
# z axis is properly aligned with the Earth's rotation axis (CIRS or
# TETE), then calculate the velocity, and then transform this
# reference position and velocity to GCRS. For speed, though, we
# transform the coordinates to GCRS in one step, and calculate the
# velocities by rotating around the earth's axis transformed to GCRS.
ref_to_gcrs = matrix_transpose(gcrs_to_ref)
itrs_to_gcrs = ref_to_gcrs @ matrix_transpose(ref_to_itrs)
# Earth's rotation vector in the ref frame is rot_vec_ref = (0,0,OMEGA_EARTH),
# so in GCRS it is rot_vec_gcrs[..., 2] @ OMEGA_EARTH.
rot_vec_gcrs = CartesianRepresentation(
ref_to_gcrs[..., 2] * OMEGA_EARTH, xyz_axis=-1, copy=False
)
# Get the position in the GCRS frame.
# Since we just need the cartesian representation of ITRS, avoid get_itrs().
itrs_cart = CartesianRepresentation(self.x, self.y, self.z, copy=False)
pos = itrs_cart.transform(itrs_to_gcrs)
vel = rot_vec_gcrs.cross(pos)
return pos, vel
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# Local import to prevent circular imports.
from .builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
)
# Get gcrs_posvel by transforming via CIRS (slightly faster than TETE).
return self._get_gcrs_posvel(
obstime, cirs_to_itrs_mat(obstime), gcrs_to_cirs_mat(obstime)
)
def gravitational_redshift(
self, obstime, bodies=["sun", "jupiter", "moon"], masses={}
):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict[str, `~astropy.units.Quantity`], optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
-------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if "earth" in bodies:
bodies.remove("earth")
bodies.append("earth")
_masses = {
"sun": consts.GM_sun,
"jupiter": consts.GM_jup,
"moon": consts.G * 7.34767309e22 * u.kg,
"earth": consts.GM_earth,
}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3 / u.s**2, [M_GM_equivalency]))
except KeyError as err:
raise KeyError(f'body "{body}" does not have a mass.') from err
except u.UnitsError as exc:
exc.args += (
(
'"masses" argument values must be masses or '
"gravitational parameters."
),
)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [
-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)
]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self["x"]
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self["y"]
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self["z"]
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "_ellipsoid"):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError("0-d EarthLocation arrays cannot be indexed")
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
geodetic_base_doc = """{__doc__}
Parameters
----------
lon, lat : angle-like
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle` and either
`~astropy.coordinates.Longitude` not `~astropy.coordinates.Latitude`,
depending on the parameter.
height : `~astropy.units.Quantity` ['length']
The height to the point(s).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
@format_doc(geodetic_base_doc)
class BaseGeodeticRepresentation(BaseRepresentation):
"""Base geodetic representation."""
attr_classes = {"lon": Longitude, "lat": Latitude, "height": u.Quantity}
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if "_ellipsoid" in cls.__dict__:
ELLIPSOIDS[cls._ellipsoid] = cls
def __init__(self, lon, lat=None, height=None, copy=True):
if height is None and not isinstance(lon, self.__class__):
height = 0 << u.m
super().__init__(lon, lat, height, copy=copy)
if not self.height.unit.is_equivalent(u.m):
raise u.UnitTypeError(
f"{self.__class__.__name__} requires height with units of length."
)
def to_cartesian(self):
"""
Converts WGS84 geodetic coordinates to 3D rectangular (geocentric)
cartesian coordinates.
"""
xyz = erfa.gd2gc(
getattr(erfa, self._ellipsoid), self.lon, self.lat, self.height
)
return CartesianRepresentation(xyz, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates (assumed geocentric) to
WGS84 geodetic coordinates.
"""
lon, lat, height = erfa.gc2gd(
getattr(erfa, cls._ellipsoid), cart.get_xyz(xyz_axis=-1)
)
return cls(lon, lat, height, copy=False)
@format_doc(geodetic_base_doc)
class WGS84GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS84 3D geodetic coordinates."""
_ellipsoid = "WGS84"
@format_doc(geodetic_base_doc)
class WGS72GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS72 3D geodetic coordinates."""
_ellipsoid = "WGS72"
@format_doc(geodetic_base_doc)
class GRS80GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in GRS80 3D geodetic coordinates."""
_ellipsoid = "GRS80"
|
71c08d66c134fb21cc4c8046002ac6f796843b0a0ee9ccecd880dacdc383a42b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
import functools
from collections import namedtuple
import numpy as np
from astropy import units as u
from astropy.utils import isiterable
from . import angle_formats as form
__all__ = ["Angle", "Latitude", "Longitude"]
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple("hms_tuple", ("h", "m", "s"))
dms_tuple = namedtuple("dms_tuple", ("d", "m", "s"))
signed_dms_tuple = namedtuple("signed_dms_tuple", ("sign", "d", "m", "s"))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats.
The examples below illustrate common ways of initializing an
`~astropy.coordinates.Angle` object. First some imports::
>>> from astropy.coordinates import Angle
>>> from astropy import units as u
The angle values can now be provided::
>>> Angle('10.2345d')
<Angle 10.2345 deg>
>>> Angle(['10.2345d', '-20d'])
<Angle [ 10.2345, -20. ] deg>
>>> Angle('1:2:30.43 degrees')
<Angle 1.04178611 deg>
>>> Angle('1 2 0 hours')
<Angle 1.03333333 hourangle>
>>> Angle(np.arange(1, 8), unit=u.deg)
<Angle [1., 2., 3., 4., 5., 6., 7.] deg>
>>> Angle('1°2′3″')
<Angle 1.03416667 deg>
>>> Angle('1°2′3″N')
<Angle 1.03416667 deg>
>>> Angle('1d2m3.4s')
<Angle 1.03427778 deg>
>>> Angle('1d2m3.4sS')
<Angle -1.03427778 deg>
>>> Angle('-1h2m3s')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2m3sE')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2.5m')
<Angle -1.04166667 hourangle>
>>> Angle('-1h2.5mW')
<Angle 1.04166667 hourangle>
>>> Angle('-1:2.5', unit=u.deg)
<Angle -1.04166667 deg>
>>> Angle(10.2345 * u.deg)
<Angle 10.2345 deg>
>>> Angle(Angle(10.2345 * u.deg))
<Angle 10.2345 deg>
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=np.inexact, copy=True, **kwargs):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = form.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
if angle_unit == u.hourangle:
form._check_hour_range(angle[0])
form._check_minute_range(angle[1])
a = np.abs(angle[0]) + angle[1] / 60.0
if len(angle) == 3:
form._check_second_range(angle[2])
a += angle[2] / 3600.0
angle = np.copysign(a, angle[0])
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif isiterable(angle) and not (
isinstance(angle, np.ndarray) and angle.dtype.kind not in "SUVO"
):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy, **kwargs)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return form.hms_to_hours(*angle)
elif unit == u.degree:
return form.dms_to_degrees(*angle)
else:
raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'")
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit == u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""The angle's value in hours, as a named tuple with ``(h, m, s)`` members."""
return hms_tuple(*form.hours_to_hms(self.hourangle))
@property
def dms(self):
"""The angle's value in degrees, as a ``(d, m, s)`` named tuple."""
return dms_tuple(*form.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""The angle's value in degrees, as a ``(sign, d, m, s)`` named tuple.
The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``.
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(
np.sign(self.degree), *form.degrees_to_dms(np.abs(self.degree))
)
def to_string(
self,
unit=None,
decimal=False,
sep="fromunit",
precision=None,
alwayssign=False,
pad=False,
fields=3,
format=None,
):
"""A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `False`, the returned string will be in sexagesimal form
if possible (for units of degrees or hourangle). If `True`,
a decimal representation will be used. In that case, no unit
will be appended if ``format`` is not explicitly given.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string which is the
same as with ``format='latex'`` for |Angle| instances
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
"generic": {u.degree: "dms", u.hourangle: "hms"},
"latex": {
u.degree: [r"^\circ", r"{}^\prime", r"{}^{\prime\prime}"],
u.hourangle: [r"^{\mathrm{h}}", r"^{\mathrm{m}}", r"^{\mathrm{s}}"],
},
"unicode": {u.degree: "°′″", u.hourangle: "ʰᵐˢ"},
}
# 'latex_inline' provides no functionality beyond what 'latex' offers,
# but it should be implemented to avoid ValueErrors in user code.
separators["latex_inline"] = separators["latex"]
# Default separators are as for generic.
separators[None] = separators["generic"]
# Create an iterator so we can format each element of what
# might be an array.
if not decimal and (unit_is_deg := unit == u.degree or unit == u.hourangle):
# Sexagesimal.
if sep == "fromunit":
if format not in separators:
raise ValueError(f"Unknown format '{format}'")
sep = separators[format][unit]
func = functools.partial(
form.degrees_to_string if unit_is_deg else form.hours_to_string,
precision=precision,
sep=sep,
pad=pad,
fields=fields,
)
else:
if sep != "fromunit":
raise ValueError(
f"'{unit}' can not be represented in sexagesimal notation"
)
func = ("{:g}" if precision is None else f"{{0:0.{precision}f}}").format
# Don't add unit by default for decimal.
if not (decimal and format is None):
unit_string = unit.to_string(format=format)
if format == "latex" or format == "latex_inline":
unit_string = unit_string[1:-1]
format_func = func
func = lambda x: format_func(x) + unit_string
def do_format(val):
# Check if value is not nan to avoid ValueErrors when turning it into
# a hexagesimal string.
if not np.isnan(val):
s = func(float(val))
if alwayssign and not s.startswith("-"):
s = "+" + s
if format == "latex" or format == "latex_inline":
s = f"${s}$"
return s
s = f"{val}"
return s
values = self.to_value(unit)
format_ufunc = np.vectorize(do_format, otypes=["U"])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def _wrap_at(self, wrap_angle):
"""
Implementation that assumes ``angle`` is already validated
and that wrapping is inplace.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.view(np.ndarray)
# Do the wrapping, but only if any angles need to be wrapped
#
# Catch any invalid warnings from the floor division.
with np.errstate(invalid="ignore"):
wraps = (self_angle - wrap_angle_floor) // a360
valid = np.isfinite(wraps) & (wraps != 0)
if np.any(valid):
self_angle -= wraps * a360
# Rounding errors can cause problems.
self_angle[self_angle >= wrap_angle] -= a360
self_angle[self_angle < wrap_angle_floor] += a360
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `~astropy.coordinates.Angle`
object is wrapped in place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : angle-like
Specifies a single value for the wrap angle. This can be any
object that can initialize an `~astropy.coordinates.Angle` object,
e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `~astropy.coordinates.Angle`
Returns
-------
out : Angle or None
If ``inplace is False`` (default), return new
`~astropy.coordinates.Angle` object with angles wrapped accordingly.
Otherwise wrap in place and return `None`.
"""
wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle
if not inplace:
self = self.copy()
self._wrap_at(wrap_angle)
return None if inplace else self
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``.
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : angle-like or None
Specifies lower bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : angle-like or None
Specifies upper bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
def formatter(x):
return x.to_string(format=format)
return np.array2string(self, formatter={"all": formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format="latex")
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value(s). If a tuple, will be interpreted as ``(h, m, s)``
or ``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself.
"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
# For speed, compare using "is", which is not strictly guaranteed to hold,
# but if it doesn't we'll just convert correctly in the 'else' clause.
if angles.unit is u.deg:
limit = 90
elif angles.unit is u.rad:
limit = self.dtype.type(0.5 * np.pi)
else:
limit = u.degree.to(angles.unit, 90.0)
invalid_angles = np.any(angles.value < -limit) or np.any(angles.value > limit)
if invalid_angles:
raise ValueError(
"Latitude angle(s) must be within -90 deg <= angle <= 90 deg, "
f"got {angles.to(u.degree)}"
)
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
if value is not np.ma.masked:
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ("wrap_angle",)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : tuple or angle-like
The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted
following the rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like ['angle'], optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : angle-like or None, optional
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError(
"A Longitude angle cannot be created from a Latitude angle."
)
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, "wrap_angle", self._default_wrap_angle)
self.wrap_angle = wrap_angle # angle-like b/c property setter
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_at(self.wrap_angle)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value, copy=False)
self._wrap_at(self.wrap_angle)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, "_wrap_angle", self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
98cbb7d902e2547f7a5f89c021b3ad08a3a3294b26ee364c0782fb2100c61272 | """
Module for parsing astronomical object names to extract embedded coordinates
eg: '2MASS J06495091-0737408'.
"""
import re
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
RA_REGEX = r"()([0-2]\d)([0-5]\d)([0-5]\d)\.?(\d{0,3})"
DEC_REGEX = r"([+-])(\d{1,2})([0-5]\d)([0-5]\d)\.?(\d{0,3})"
JCOORD_REGEX = "(.*?J)" + RA_REGEX + DEC_REGEX
JPARSER = re.compile(JCOORD_REGEX)
def _sexagesimal(g):
# convert matched regex groups to sexigesimal array
sign, h, m, s, frac = g
sign = -1 if (sign == "-") else 1
s = ".".join((s, frac))
return sign * np.array([h, m, s], float)
def search(name, raise_=False):
"""Regex match for coordinates in name."""
# extract the coordinate data from name
match = JPARSER.search(name)
if match is None and raise_:
raise ValueError("No coordinate match found!")
return match
def to_ra_dec_angles(name):
"""get RA in hourangle and DEC in degrees by parsing name."""
groups = search(name, True).groups()
prefix, hms, dms = np.split(groups, [1, 6])
ra = (_sexagesimal(hms) / (1, 60, 60 * 60) * u.hourangle).sum()
dec = (_sexagesimal(dms) * (u.deg, u.arcmin, u.arcsec)).sum()
return ra, dec
def to_skycoord(name, frame="icrs"):
"""Convert to `name` to `SkyCoords` object."""
return SkyCoord(*to_ra_dec_angles(name), frame=frame)
def shorten(name):
"""Produce a shortened version of the full object name.
The shortened name is built from the prefix (usually the survey name) and RA (hour,
minute), DEC (deg, arcmin) parts.
e.g.: '2MASS J06495091-0737408' --> '2MASS J0649-0737'
Parameters
----------
name : str
Full object name with J-coords embedded.
Returns
-------
shortName: str
"""
match = search(name)
return "".join(match.group(1, 3, 4, 7, 8, 9))
|
84255d65737e3ce877a55dee19fc0c20b87977254577042ec0bbd3a068042937 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
# Project
from astropy import units as u
from astropy.utils import ShapedLikeNDArray
__all__ = [
"Attribute",
"TimeAttribute",
"QuantityAttribute",
"EarthLocationAttribute",
"CoordinateAttribute",
"CartesianRepresentationAttribute",
"DifferentialAttribute",
]
class Attribute:
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
name = "<unbound>"
def __init__(self, default=None, secondary_attribute=""):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def __set_name__(self, owner, name):
self.name = name
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value : object
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, "_" + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
# None if instance (frame) has no data!
instance_shape = getattr(instance, "shape", None)
if instance_shape is not None and (
getattr(out, "shape", ()) and out.shape != instance_shape
):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(
np.broadcast_to, shape=instance_shape, subok=True
)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
f"attribute {self.name} should be scalar or have shape"
f" {instance_shape}, but it has shape {out.shape} and could not"
" be broadcast."
)
converted = True
if converted:
setattr(instance, "_" + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError("Cannot set frame attribute")
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(f"Invalid time input {self.name}={value!r}.") from err
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit-like or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute="", unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out : object
The correctly-typed object.
converted : boolean
A boolean which indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (
isinstance(value, list)
and len(value) == 3
and all(v == 0 for v in value)
and self.unit is not None
):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, "xyz") and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, "xyz", value)
if not hasattr(value, "unit"):
raise TypeError(
f"tried to set a {self.__class__.__name__} with something that does"
" not have a unit."
)
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Can be `None`, which should be used for special cases in associated
frame transformations like "this quantity should be ignored" or similar.
Parameters
----------
default : number or `~astropy.units.Quantity` or None, optional
Default value for the attribute if the user does not supply one. If a
Quantity, it must be consistent with ``unit``, or if a value, ``unit``
cannot be None.
secondary_attribute : str, optional
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit-like or None, optional
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None, optional
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute="", unit=None, shape=None):
if default is None and unit is None:
raise ValueError(
"Either a default quantity value must be provided, or a unit must "
"be provided to define a QuantityAttribute."
)
if default is not None and unit is None:
unit = default.unit
self.unit = unit
self.shape = shape
default = self.convert_input(default)[0]
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if (
not hasattr(value, "unit")
and self.unit != u.dimensionless_unscaled
and np.any(value != 0)
):
raise TypeError(
"Tried to set a QuantityAttribute with "
"something that does not have a unit."
)
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
if value.shape == () and oldvalue == 0:
# Allow a single 0 to fill whatever shape is needed.
value = np.broadcast_to(value, self.shape, subok=True)
else:
raise ValueError(
f'The provided value has shape "{value.shape}", but '
f'should have shape "{self.shape}"'
)
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, "transform_to"):
raise ValueError(
f'"{value}" was passed into an EarthLocationAttribute, but it does'
' not have "transform_to" method'
)
itrsobj = value.transform_to(ITRS())
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
`~astropy.coordinates.SkyCoord` or a low-level frame instance. If a
low-level frame instance is provided, it will always be upgraded to be a
`~astropy.coordinates.SkyCoord` to ensure consistent transformation
behavior. The coordinate object will always be returned as a low-level
frame instance when accessed.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=""):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from .sky_coordinate import SkyCoord
if value is None:
return None, False
elif isinstance(value, SkyCoord) and isinstance(value.frame, self._frame):
return value.frame, True
elif isinstance(value, self._frame):
return value, False
else:
value = SkyCoord(value) # always make the value a SkyCoord
transformedobj = value.transform_to(self._frame)
return transformedobj.frame, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None, secondary_attribute=""):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if not isinstance(value, self.allowed_classes):
if len(self.allowed_classes) == 1:
value = self.allowed_classes[0](value)
else:
raise TypeError(
"Tried to set a DifferentialAttribute with an unsupported"
f" Differential type {value.__class__}. Allowed classes are:"
f" {self.allowed_classes}"
)
return value, True
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation # noqa: E402
from .representation import BaseDifferential, CartesianRepresentation # noqa: E402
|
c81c168c1bc9a9a8aa7b5d079b8b2d61b0771657913b3402d6044542aeec36f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a helper function to fill erfa.astrom struct and a
ScienceState, which allows to speed up coordinate transformations at the
expense of accuracy.
"""
import warnings
import erfa
import numpy as np
import astropy.units as u
from astropy.time import Time
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.state import ScienceState
from .builtin_frames.utils import (
get_cip,
get_jd12,
get_polar_motion,
pav2pv,
prepare_earth_position_vel,
)
from .matrix_utilities import rotation_matrix
__all__ = []
class ErfaAstrom:
"""
The default provider for astrometry values.
A utility class to extract the necessary arguments for
erfa functions from frame attributes, call the corresponding
erfa functions and return the astrom object.
"""
@staticmethod
def apco(frame_or_coord):
"""
Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS.
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, an AltAz or CIRS frame is expected.
"""
lon, lat, height = frame_or_coord.location.to_geodetic("WGS84")
obstime = frame_or_coord.obstime
jd1_tt, jd2_tt = get_jd12(obstime, "tt")
xp, yp = get_polar_motion(obstime)
sp = erfa.sp00(jd1_tt, jd2_tt)
x, y, s = get_cip(jd1_tt, jd2_tt)
era = erfa.era00(*get_jd12(obstime, "ut1"))
earth_pv, earth_heliocentric = prepare_earth_position_vel(obstime)
# refraction constants
if hasattr(frame_or_coord, "pressure"):
# this is an AltAz like frame. Calculate refraction
refa, refb = erfa.refco(
frame_or_coord.pressure.to_value(u.hPa),
frame_or_coord.temperature.to_value(u.deg_C),
frame_or_coord.relative_humidity.value,
frame_or_coord.obswl.to_value(u.micron),
)
else:
# This is not an AltAz frame, so don't bother computing refraction
refa, refb = 0.0, 0.0
return erfa.apco(
jd1_tt,
jd2_tt,
earth_pv,
earth_heliocentric,
x,
y,
s,
era,
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m),
xp,
yp,
sp,
refa,
refb,
)
@staticmethod
def apcs(frame_or_coord):
"""
Wrapper for ``erfa.apcs``, used in conversions GCRS <-> ICRS.
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, a GCRS frame is expected.
"""
jd1_tt, jd2_tt = get_jd12(frame_or_coord.obstime, "tt")
obs_pv = pav2pv(
frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value,
frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value,
)
earth_pv, earth_heliocentric = prepare_earth_position_vel(
frame_or_coord.obstime
)
return erfa.apcs(jd1_tt, jd2_tt, obs_pv, earth_pv, earth_heliocentric)
@staticmethod
def apio(frame_or_coord):
"""
Slightly modified equivalent of ``erfa.apio``, used in conversions AltAz <-> CIRS.
Since we use a topocentric CIRS frame, we have dropped the steps needed to calculate
diurnal aberration.
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, an AltAz frame is expected.
"""
# Calculate erfa.apio input parameters.
# TIO locator s'
sp = erfa.sp00(*get_jd12(frame_or_coord.obstime, "tt"))
# Earth rotation angle.
theta = erfa.era00(*get_jd12(frame_or_coord.obstime, "ut1"))
# Longitude and latitude in radians.
lon, lat, height = frame_or_coord.location.to_geodetic("WGS84")
elong = lon.to_value(u.radian)
phi = lat.to_value(u.radian)
# Polar motion, rotated onto local meridian
xp, yp = get_polar_motion(frame_or_coord.obstime)
# we need an empty astrom structure before we fill in the required sections
astrom = np.zeros(frame_or_coord.obstime.shape, dtype=erfa.dt_eraASTROM)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (
rotation_matrix(elong, "z", unit=u.radian)
@ rotation_matrix(-yp, "x", unit=u.radian)
@ rotation_matrix(-xp, "y", unit=u.radian)
@ rotation_matrix(theta + sp, "z", unit=u.radian)
)
# Solve for local Earth rotation angle.
a = r[..., 0, 0]
b = r[..., 0, 1]
eral = np.arctan2(b, a)
astrom["eral"] = eral
# Solve for polar motion [X,Y] with respect to local meridian.
c = r[..., 0, 2]
astrom["xpl"] = np.arctan2(c, np.sqrt(a * a + b * b))
a = r[..., 1, 2]
b = r[..., 2, 2]
astrom["ypl"] = -np.arctan2(a, b)
# Adjusted longitude.
astrom["along"] = erfa.anpm(eral - theta)
# Functions of latitude.
astrom["sphi"] = np.sin(phi)
astrom["cphi"] = np.cos(phi)
# Omit two steps that are zero for a geocentric observer:
# Observer's geocentric position and velocity (m, m/s, CIRS).
# Magnitude of diurnal aberration vector.
# Refraction constants.
astrom["refa"], astrom["refb"] = erfa.refco(
frame_or_coord.pressure.to_value(u.hPa),
frame_or_coord.temperature.to_value(u.deg_C),
frame_or_coord.relative_humidity.value,
frame_or_coord.obswl.to_value(u.micron),
)
return astrom
class ErfaAstromInterpolator(ErfaAstrom):
"""
A provider for astrometry values that does not call erfa
for each individual timestamp but interpolates linearly
between support points.
For the interpolation, float64 MJD values are used, so time precision
for the interpolation will be around a microsecond.
This can dramatically speed up coordinate transformations,
e.g. between CIRS and ICRS,
when obstime is an array of many values (factors of 10 to > 100 depending
on the selected resolution, number of points and the time range of the values).
The precision of the transformation will still be in the order of microseconds
for reasonable values of time_resolution, e.g. ``300 * u.s``.
Users should benchmark performance and accuracy with the default transformation
for their specific use case and then choose a suitable ``time_resolution``
from there.
This class is intended be used together with the ``erfa_astrom`` science state,
e.g. in a context manager like this
Example
-------
>>> from astropy.coordinates import SkyCoord, CIRS
>>> from astropy.coordinates.erfa_astrom import erfa_astrom, ErfaAstromInterpolator
>>> import astropy.units as u
>>> from astropy.time import Time
>>> import numpy as np
>>> obstime = Time('2010-01-01T20:00:00') + np.linspace(0, 4, 1000) * u.hour
>>> crab = SkyCoord(ra='05h34m31.94s', dec='22d00m52.2s')
>>> with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
... cirs = crab.transform_to(CIRS(obstime=obstime))
"""
@u.quantity_input(time_resolution=u.day)
def __init__(self, time_resolution):
if time_resolution.to_value(u.us) < 10:
warnings.warn(
f"Using {self.__class__.__name__} with `time_resolution`"
" below 10 microseconds might lead to numerical inaccuracies"
" as the MJD-based interpolation is limited by floating point "
" precision to about a microsecond of precision",
AstropyWarning,
)
self.mjd_resolution = time_resolution.to_value(u.day)
def _get_support_points(self, obstime):
"""
Calculate support points for the interpolation.
We divide the MJD by the time resolution (as single float64 values),
and calculate ceil and floor.
Then we take the unique and sorted values and scale back to MJD.
This will create a sparse support for non-regular input obstimes.
"""
mjd_scaled = np.ravel(obstime.mjd / self.mjd_resolution)
# unique already does sorting
mjd_u = np.unique(np.concatenate([np.floor(mjd_scaled), np.ceil(mjd_scaled)]))
return Time(
mjd_u * self.mjd_resolution,
format="mjd",
scale=obstime.scale,
)
@staticmethod
def _prepare_earth_position_vel(support, obstime):
"""
Calculate Earth's position and velocity.
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
pv_support, heliocentric_support = prepare_earth_position_vel(support)
# do interpolation
earth_pv = np.empty(obstime.shape, dtype=erfa.dt_pv)
earth_heliocentric = np.empty(obstime.shape + (3,))
for dim in range(3):
for key in "pv":
earth_pv[key][..., dim] = np.interp(
obstime.mjd, support.mjd, pv_support[key][..., dim]
)
earth_heliocentric[..., dim] = np.interp(
obstime.mjd, support.mjd, heliocentric_support[..., dim]
)
return earth_pv, earth_heliocentric
@staticmethod
def _get_c2i(support, obstime):
"""
Calculate the Celestial-to-Intermediate rotation matrix.
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
jd1_tt_support, jd2_tt_support = get_jd12(support, "tt")
c2i_support = erfa.c2i06a(jd1_tt_support, jd2_tt_support)
c2i = np.empty(obstime.shape + (3, 3))
for dim1 in range(3):
for dim2 in range(3):
c2i[..., dim1, dim2] = np.interp(
obstime.mjd, support.mjd, c2i_support[..., dim1, dim2]
)
return c2i
@staticmethod
def _get_cip(support, obstime):
"""
Find the X, Y coordinates of the CIP and the CIO locator, s.
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
jd1_tt_support, jd2_tt_support = get_jd12(support, "tt")
cip_support = get_cip(jd1_tt_support, jd2_tt_support)
return tuple(
np.interp(obstime.mjd, support.mjd, cip_component)
for cip_component in cip_support
)
@staticmethod
def _get_polar_motion(support, obstime):
"""
Find the two polar motion components in radians.
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
polar_motion_support = get_polar_motion(support)
return tuple(
np.interp(obstime.mjd, support.mjd, polar_motion_component)
for polar_motion_component in polar_motion_support
)
def apco(self, frame_or_coord):
"""
Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS.
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, an AltAz or CIRS frame is expected.
"""
lon, lat, height = frame_or_coord.location.to_geodetic("WGS84")
obstime = frame_or_coord.obstime
support = self._get_support_points(obstime)
jd1_tt, jd2_tt = get_jd12(obstime, "tt")
# get the position and velocity arrays for the observatory. Need to
# have xyz in last dimension, and pos/vel in one-but-last.
earth_pv, earth_heliocentric = self._prepare_earth_position_vel(
support, obstime
)
xp, yp = self._get_polar_motion(support, obstime)
sp = erfa.sp00(jd1_tt, jd2_tt)
x, y, s = self._get_cip(support, obstime)
era = erfa.era00(*get_jd12(obstime, "ut1"))
# refraction constants
if hasattr(frame_or_coord, "pressure"):
# an AltAz like frame. Include refraction
refa, refb = erfa.refco(
frame_or_coord.pressure.to_value(u.hPa),
frame_or_coord.temperature.to_value(u.deg_C),
frame_or_coord.relative_humidity.value,
frame_or_coord.obswl.to_value(u.micron),
)
else:
# a CIRS like frame - no refraction
refa, refb = 0.0, 0.0
return erfa.apco(
jd1_tt,
jd2_tt,
earth_pv,
earth_heliocentric,
x,
y,
s,
era,
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m),
xp,
yp,
sp,
refa,
refb,
)
def apcs(self, frame_or_coord):
"""
Wrapper for ``erfa.apci``, used in conversions GCRS <-> ICRS.
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, a GCRS frame is expected.
"""
obstime = frame_or_coord.obstime
support = self._get_support_points(obstime)
# get the position and velocity arrays for the observatory. Need to
# have xyz in last dimension, and pos/vel in one-but-last.
earth_pv, earth_heliocentric = self._prepare_earth_position_vel(
support, obstime
)
pv = pav2pv(
frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value,
frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value,
)
jd1_tt, jd2_tt = get_jd12(obstime, "tt")
return erfa.apcs(jd1_tt, jd2_tt, pv, earth_pv, earth_heliocentric)
class erfa_astrom(ScienceState):
"""
ScienceState to select with astrom provider is used in
coordinate transformations.
"""
_value = ErfaAstrom()
@classmethod
def validate(cls, value):
if not isinstance(value, ErfaAstrom):
raise TypeError(f"Must be an instance of {ErfaAstrom!r}")
return value
|
5ac4a6865b7871afa640860283e649b1a86a1ceedfe165c55feb5c7cbfe7c984 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains standard functions for earth orientation, such as
precession and nutation.
This module is (currently) not intended to be part of the public API, but
is instead primarily for internal use in `coordinates`
"""
import erfa
import numpy as np
from astropy.time import Time
from .builtin_frames.utils import get_jd12
from .matrix_utilities import matrix_transpose, rotation_matrix
jd1950 = Time("B1950").jd
jd2000 = Time("J2000").jd
def eccentricity(jd):
"""
Eccentricity of the Earth's orbit at the requested Julian Date.
Parameters
----------
jd : scalar or array-like
Julian date at which to compute the eccentricity
Returns
-------
eccentricity : scalar or array
The eccentricity (or array of eccentricities)
References
----------
* Explanatory Supplement to the Astronomical Almanac: P. Kenneth
Seidelmann (ed), University Science Books (1992).
"""
T = (jd - jd1950) / 36525.0
p = (-0.000000126, -0.00004193, 0.01673011)
return np.polyval(p, T)
def mean_lon_of_perigee(jd):
"""
Computes the mean longitude of perigee of the Earth's orbit at the
requested Julian Date.
Parameters
----------
jd : scalar or array-like
Julian date at which to compute the mean longitude of perigee
Returns
-------
mean_lon_of_perigee : scalar or array
Mean longitude of perigee in degrees (or array of mean longitudes)
References
----------
* Explanatory Supplement to the Astronomical Almanac: P. Kenneth
Seidelmann (ed), University Science Books (1992).
"""
T = (jd - jd1950) / 36525.0
p = (0.012, 1.65, 6190.67, 1015489.951)
return np.polyval(p, T) / 3600.0
def obliquity(jd, algorithm=2006):
"""
Computes the obliquity of the Earth at the requested Julian Date.
Parameters
----------
jd : scalar or array-like
Julian date (TT) at which to compute the obliquity
algorithm : int
Year of algorithm based on IAU adoption. Can be 2006, 2000 or 1980.
The IAU 2006 algorithm is based on Hilton et al. 2006.
The IAU 1980 algorithm is based on the Explanatory Supplement to the
Astronomical Almanac (1992).
The IAU 2000 algorithm starts with the IAU 1980 algorithm and applies a
precession-rate correction from the IAU 2000 precession model.
Returns
-------
obliquity : scalar or array
Mean obliquity in degrees (or array of obliquities)
References
----------
* Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351
* Capitaine, N., et al., 2003, Astron.Astrophys. 400, 1145-1154
* Explanatory Supplement to the Astronomical Almanac: P. Kenneth
Seidelmann (ed), University Science Books (1992).
"""
if algorithm == 2006:
return np.rad2deg(erfa.obl06(jd, 0))
elif algorithm == 2000:
return np.rad2deg(erfa.obl80(jd, 0) + erfa.pr00(jd, 0)[1])
elif algorithm == 1980:
return np.rad2deg(erfa.obl80(jd, 0))
else:
raise ValueError("invalid algorithm year for computing obliquity")
def precession_matrix_Capitaine(fromepoch, toepoch):
"""
Computes the precession matrix from one Julian epoch to another, per IAU 2006.
Parameters
----------
fromepoch : `~astropy.time.Time`
The epoch to precess from.
toepoch : `~astropy.time.Time`
The epoch to precess to.
Returns
-------
pmatrix : 3x3 array
Precession matrix to get from ``fromepoch`` to ``toepoch``
References
----------
Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351
"""
# Multiply the two precession matrices (without frame bias) through J2000.0
fromepoch_to_J2000 = matrix_transpose(erfa.bp06(*get_jd12(fromepoch, "tt"))[1])
J2000_to_toepoch = erfa.bp06(*get_jd12(toepoch, "tt"))[1]
return J2000_to_toepoch @ fromepoch_to_J2000
def _precession_matrix_besselian(epoch1, epoch2):
"""
Computes the precession matrix from one Besselian epoch to another using
Newcomb's method.
``epoch1`` and ``epoch2`` are in Besselian year numbers.
"""
# tropical years
t1 = (epoch1 - 1850.0) / 1000.0
t2 = (epoch2 - 1850.0) / 1000.0
dt = t2 - t1
zeta1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1
zeta2 = 30.240 - 0.27 * t1
zeta3 = 17.995
pzeta = (zeta3, zeta2, zeta1, 0)
zeta = np.polyval(pzeta, dt) / 3600
z1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1
z2 = 109.480 + 0.39 * t1
z3 = 18.325
pz = (z3, z2, z1, 0)
z = np.polyval(pz, dt) / 3600
theta1 = 20051.12 - 85.29 * t1 - 0.37 * t1 * t1
theta2 = -42.65 - 0.37 * t1
theta3 = -41.8
ptheta = (theta3, theta2, theta1, 0)
theta = np.polyval(ptheta, dt) / 3600
return (
rotation_matrix(-z, "z")
@ rotation_matrix(theta, "y")
@ rotation_matrix(-zeta, "z")
)
def nutation_components2000B(jd):
"""
Computes nutation components following the IAU 2000B specification.
Parameters
----------
jd : scalar
Julian date (TT) at which to compute the nutation components
Returns
-------
eps : float
epsilon in radians
dpsi : float
dpsi in radians
deps : float
depsilon in raidans
"""
dpsi, deps, epsa, _, _, _, _, _ = erfa.pn00b(jd, 0)
return epsa, dpsi, deps
def nutation_matrix(epoch):
"""
Nutation matrix generated from nutation components, IAU 2000B model.
Matrix converts from mean coordinate to true coordinate as
r_true = M * r_mean
Parameters
----------
epoch : `~astropy.time.Time`
The epoch at which to compute the nutation matrix
Returns
-------
nmatrix : 3x3 array
Nutation matrix for the specified epoch
References
----------
* Explanatory Supplement to the Astronomical Almanac: P. Kenneth
Seidelmann (ed), University Science Books (1992).
"""
# TODO: implement higher precision 2006/2000A model if requested/needed
return erfa.num00b(*get_jd12(epoch, "tt"))
|
ad70630025d0825ca8cb1d5398883578d2b97ee14ecb0e0ca0992535782afa30 | """Implements the wrapper for the Astropy test runner.
This is for backward-compatibility for other downstream packages and can be removed
once astropy-helpers has reached end-of-life.
"""
import os
import shutil
import stat
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from setuptools import Command
from astropy.logger import log
@contextmanager
def _suppress_stdout():
"""
A context manager to temporarily disable stdout.
Used later when installing a temporary copy of astropy to avoid a
very verbose output.
"""
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
class FixRemoteDataOption(type):
"""
This metaclass is used to catch cases where the user is running the tests
with --remote-data. We've now changed the --remote-data option so that it
takes arguments, but we still want --remote-data to work as before and to
enable all remote tests. With this metaclass, we can modify sys.argv
before setuptools try to parse the command-line options.
"""
def __init__(cls, name, bases, dct):
try:
idx = sys.argv.index("--remote-data")
except ValueError:
pass
else:
sys.argv[idx] = "--remote-data=any"
try:
idx = sys.argv.index("-R")
except ValueError:
pass
else:
sys.argv[idx] = "-R=any"
return super().__init__(name, bases, dct)
class AstropyTest(Command, metaclass=FixRemoteDataOption):
description = "Run the tests for this package"
user_options = [
(
"package=",
"P",
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"Accepts comma separated string to specify multiple packages. "
"If nothing is specified, all default tests are run.",
),
(
"test-path=",
"t",
"Specify a test location by path. If a relative path to a .py file, "
'it is relative to the built package, so e.g., a leading "astropy/" '
"is necessary. If a relative path to a .rst file, it is relative to "
"the directory *below* the --docs-path directory, so a leading "
'"docs/" is usually necessary. May also be an absolute path.',
),
("verbose-results", "V", "Turn on verbose output from pytest."),
("plugins=", "p", "Plugins to enable when running pytest."),
("pastebin=", "b", "Enable pytest pastebin output. Either 'all' or 'failed'."),
("args=", "a", "Additional arguments to be passed to pytest."),
(
"remote-data=",
"R",
"Run tests that download remote data. Should be "
"one of none/astropy/any (defaults to none).",
),
(
"pep8",
"8",
"Enable PEP8 checking and disable regular tests. "
"Requires the pytest-pep8 plugin.",
),
("pdb", "d", "Start the interactive Python debugger on errors."),
("coverage", "c", "Create a coverage report. Requires the coverage package."),
(
"open-files",
"o",
"Fail if any tests leave files open. Requires the psutil package.",
),
(
"parallel=",
"j",
"Run the tests in parallel on the specified number of "
'CPUs. If "auto", all the cores on the machine will be '
"used. Requires the pytest-xdist plugin.",
),
(
"docs-path=",
None,
"The path to the documentation .rst files. If not provided, and "
'the current directory contains a directory called "docs", that '
"will be used.",
),
("skip-docs", None, "Don't test the documentation .rst files."),
(
"repeat=",
None,
"How many times to repeat each test (can be used to check for "
"sporadic failures).",
),
(
"temp-root=",
None,
"The root directory in which to create the temporary testing files. "
"If unspecified the system default is used (e.g. /tmp) as explained "
"in the documentation for tempfile.mkstemp.",
),
(
"verbose-install",
None,
"Turn on terminal output from the installation of astropy in a "
"temporary folder.",
),
("readonly", None, "Make the temporary installation being tested read-only."),
]
package_name = ""
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = "none"
self.pep8 = False
self.pdb = False
self.coverage = False
self.open_files = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
self.repeat = None
self.temp_root = None
self.verbose_install = False
self.readonly = False
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = "" # Commands to run before the test function
cmd_post = "" # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
cmd = ( # see _build_temp_install below
"{cmd_pre}{0}; import {1.package_name}, sys; result = ("
"{1.package_name}.test("
"package={1.package!r}, "
"test_path={1.test_path!r}, "
"args={1.args!r}, "
"plugins={1.plugins!r}, "
"verbose={1.verbose_results!r}, "
"pastebin={1.pastebin!r}, "
"remote_data={1.remote_data!r}, "
"pep8={1.pep8!r}, "
"pdb={1.pdb!r}, "
"open_files={1.open_files!r}, "
"parallel={1.parallel!r}, "
"docs_path={1.docs_path!r}, "
"skip_docs={1.skip_docs!r}, "
"add_local_eggs_to_path=True, "
"repeat={1.repeat!r})); "
"{cmd_post}"
"sys.exit(result)"
)
return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
def run(self):
"""Run the tests!"""
# Install the runtime dependencies.
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
# Ensure there is a doc path
if self.docs_path is None:
cfg_docs_dir = self.distribution.get_option_dict("build_docs").get(
"source_dir", None
)
# Some affiliated packages use this.
# See astropy/package-template#157
if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]):
self.docs_path = os.path.abspath(cfg_docs_dir[1])
# fall back on a default path of "docs"
elif os.path.exists("docs"): # pragma: no cover
self.docs_path = os.path.abspath("docs")
# Build a testing install of the package
self._build_temp_install()
# Install the test dependencies
# NOTE: we do this here after _build_temp_install because there is
# a weird but which occurs if psutil is installed in this way before
# astropy is built, Cython can have segmentation fault. Strange, eh?
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# Copy any additional dependencies that may have been installed via
# tests_requires or install_requires. We then pass the
# add_local_eggs_to_path=True option to package.test() to make sure the
# eggs get included in the path.
if os.path.exists(".eggs"):
shutil.copytree(".eggs", os.path.join(self.testing_path, ".eggs"))
# This option exists so that we can make sure that the tests don't
# write to an installed location.
if self.readonly:
log.info("changing permissions of temporary installation to read-only")
self._change_permissions_testing_path(writable=False)
# Run everything in a try: finally: so that the tmp dir gets deleted.
try:
# Construct this modules testing command
cmd = self.generate_testing_command()
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
testproc = subprocess.Popen(
[sys.executable, "-c", cmd], cwd=self.testing_path, close_fds=False
)
retcode = testproc.wait()
except KeyboardInterrupt:
import signal
# If a keyboard interrupt is handled, pass it to the test
# subprocess to prompt pytest to initiate its teardown
testproc.send_signal(signal.SIGINT)
retcode = testproc.wait()
finally:
# Remove temporary directory
if self.readonly:
self._change_permissions_testing_path(writable=True)
shutil.rmtree(self.tmp_dir)
raise SystemExit(retcode)
def _build_temp_install(self):
"""
Install the package and to a temporary directory for the purposes of
testing. This allows us to test the install command, include the
entry points, and also avoids creating pyc and __pycache__ directories
inside the build directory.
"""
# On OSX the default path for temp files is under /var, but in most
# cases on OSX /var is actually a symlink to /private/var; ensure we
# dereference that link, because pytest is very sensitive to relative
# paths...
tmp_dir = tempfile.mkdtemp(
prefix=self.package_name + "-test-", dir=self.temp_root
)
self.tmp_dir = os.path.realpath(tmp_dir)
log.info(f"installing to temporary directory: {self.tmp_dir}")
# We now install the package to the temporary directory. We do this
# rather than build and copy because this will ensure that e.g. entry
# points work.
self.reinitialize_command("install")
install_cmd = self.distribution.get_command_obj("install")
install_cmd.prefix = self.tmp_dir
if self.verbose_install:
self.run_command("install")
else:
with _suppress_stdout():
self.run_command("install")
# We now get the path to the site-packages directory that was created
# inside self.tmp_dir
install_cmd = self.get_finalized_command("install")
self.testing_path = install_cmd.install_lib
# Ideally, docs_path is set properly in run(), but if it is still
# not set here, do not pretend it is, otherwise bad things happen.
# See astropy/package-template#157
if self.docs_path is not None:
new_docs_path = os.path.join(
self.testing_path, os.path.basename(self.docs_path)
)
shutil.copytree(self.docs_path, new_docs_path)
self.docs_path = new_docs_path
shutil.copy("setup.cfg", self.testing_path)
def _change_permissions_testing_path(self, writable=False):
if writable:
basic_flags = stat.S_IRUSR | stat.S_IWUSR
else:
basic_flags = stat.S_IRUSR
for root, dirs, files in os.walk(self.testing_path):
for dirname in dirs:
os.chmod(os.path.join(root, dirname), basic_flags | stat.S_IXUSR)
for filename in files:
os.chmod(os.path.join(root, filename), basic_flags)
def _generate_coverage_commands(self):
"""
This method creates the post and pre commands if coverage is to be
generated.
"""
if self.parallel != 0:
raise ValueError("--coverage can not be used with --parallel")
try:
import coverage # noqa: F401
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is installed."
)
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
self.testing_path,
self.package_name.replace(".", "/"),
"tests",
"coveragerc",
)
with open(coveragerc) as fd:
coveragerc_content = fd.read()
coveragerc_content = coveragerc_content.replace(
"{packagename}", self.package_name.replace(".", "/")
)
tmp_coveragerc = os.path.join(self.tmp_dir, "coveragerc")
with open(tmp_coveragerc, "wb") as tmp:
tmp.write(coveragerc_content.encode("utf-8"))
cmd_pre = (
"import coverage; cov ="
f' coverage.coverage(data_file=r"{os.path.abspath(".coverage")}",'
f' config_file=r"{os.path.abspath(tmp_coveragerc)}"); cov.start();'
)
cmd_post = (
"cov.stop(); from astropy.tests.helper import _save_coverage;"
f' _save_coverage(cov, result, r"{os.path.abspath(".")}",'
f' r"{os.path.abspath(self.testing_path)}");'
)
return cmd_pre, cmd_post
|
4985990b09a0d493662ca03d8dd6f87c1d07244310b79eb563ba8ccc6f91154d | """Implements the Astropy TestRunner which is a thin wrapper around pytest."""
import copy
import glob
import inspect
import os
import shlex
import sys
import tempfile
import warnings
from collections import OrderedDict
from functools import wraps
from importlib.util import find_spec
from astropy.config.paths import set_temp_cache, set_temp_config
from astropy.utils import find_current_module
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
__all__ = ["TestRunner", "TestRunnerBase", "keyword"]
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
:class:`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
``run_tests`` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
``run_tests`` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == "keyword", functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += " " * 8
doc_keywords += func.__doc__.strip()
doc_keywords += "\n\n"
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super().__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError(
f"run_tests() got an unexpected keyword argument {keyword}"
)
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError(f"{keyword} keyword method must return a list")
args += result
return args
RUN_TESTS_DOCSTRING = """
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
{keywords}
"""
_required_dependencies = [
"pytest",
"pytest_remotedata",
"pytest_doctestplus",
"pytest_astropy_header",
]
_missing_dependancy_error = (
"Test dependencies are missing: {module}. You should install the "
"'pytest-astropy' package (you may need to update the package if you "
"have a previous version installed, e.g., "
"'pip install pytest-astropy --upgrade' or the equivalent with conda)."
)
@classmethod
def _has_test_dependencies(cls): # pragma: no cover
# Using the test runner will not work without these dependencies, but
# pytest-openfiles is optional, so it's not listed here.
for module in cls._required_dependencies:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled
if spec is None or spec.loader is None:
raise RuntimeError(cls._missing_dependancy_error.format(module=module))
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# running pytest, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop("add_local_eggs_to_path", False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join(".eggs", "*.egg")):
sys.path.insert(0, egg)
self._has_test_dependencies() # pragma: no cover
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError(
f"run_tests() got an unexpected keyword argument {wrong_kwargs[0]}"
)
args = self._generate_args(**kwargs)
if kwargs.get("plugins", None) is not None:
plugins = kwargs.pop("plugins")
elif self.keywords.get("plugins", None) is not None:
plugins = self.keywords["plugins"]
else:
plugins = []
# Override the config locations to not make a new directory nor use
# existing cache or config. Note that we need to do this here in
# addition to in conftest.py - for users running tests interactively
# in e.g. IPython, conftest.py would get read in too late, so we need
# to do it here - but at the same time the code here doesn't work when
# running tests in parallel mode because this uses subprocesses which
# don't know about the temporary config/cache.
astropy_config = tempfile.mkdtemp("astropy_config")
astropy_cache = tempfile.mkdtemp("astropy_cache")
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to pytest are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=plugins)
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
``TestRunner.run_tests``.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ("__doc__",))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, "__wrapped__"):
del test.__wrapped__
test.__test__ = False
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests.
"""
def packages_path(self, packages, base_path, error=None, warning=None):
"""
Generates the path for multiple packages.
Parameters
----------
packages : str
Comma separated string of packages.
base_path : str
Base path to the source code or documentation.
error : str
Error message to be raised as ``ValueError``. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No error is raised if `None`. (Default: `None`)
warning : str
Warning message to be issued. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No warning is issues if `None`. (Default: `None`)
Returns
-------
paths : list of str
List of strings of existing package paths.
"""
packages = packages.split(",")
paths = []
for package in packages:
path = os.path.join(base_path, package.replace(".", os.path.sep))
if not os.path.isdir(path):
info = {"name": package, "path": path}
if error is not None:
raise ValueError(error.format(**info))
if warning is not None:
warnings.warn(warning.format(**info))
else:
paths.append(path)
return paths
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning,
)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or
'utils'. Accepts comma separated string to specify multiple
packages. If nothing is specified all default tests are run.
"""
if package is None:
self.package_path = [self.base_path]
else:
error_message = "package to test is not found: {name} (at path {path})."
self.package_path = self.packages_path(
package, self.base_path, error=error_message
)
if not kwargs["test_path"]:
return self.package_path
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs["package"], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in (".rst", ""):
if kwargs["docs_path"] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path specified."
)
abs_docs_path = os.path.abspath(kwargs["docs_path"])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path)
)
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append("--doctest-rst")
test_path = abs_test_path
# Check that the extensions are in the path and not at the end to
# support specifying the name of the test, i.e.
# test_quantity.py::test_unit
if not (
os.path.isdir(test_path) or (".py" in test_path or ".rst" in test_path)
):
raise ValueError(
"Test path must be a directory or a path to a .py or .rst file"
)
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith("win"))
return []
@keyword(default_value=[])
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
# Plugins are handled independently by `run_tests` so we define this
# keyword just for the docstring
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from pytest. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ["-v"]
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on pytest pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ["failed", "all"]:
return [f"--pastebin={pastebin}"]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value="none")
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = "any"
elif remote_data is False:
remote_data = "none"
elif remote_data not in ("none", "astropy", "any"):
warnings.warn(
"The remote_data option should be one of "
f"none/astropy/any (found {remote_data}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.",
AstropyDeprecationWarning,
)
remote_data = "any"
return [f"--remote-data={remote_data}"]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # noqa: F401
except ImportError:
raise ImportError(
"PEP8 checking requires pytest-pep8 plugin: "
"https://pypi.org/project/pytest-pep8"
)
else:
return ["--pep8", "-k", "pep8"]
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ["--pdb"]
return []
@keyword()
def open_files(self, open_files, kwargs):
"""
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
``psutil`` package.
"""
if open_files:
if kwargs["parallel"] != 0:
raise SystemError(
"open file detection may not be used in conjunction with "
"parallel testing."
)
try:
import psutil # noqa: F401
except ImportError:
raise SystemError(
"open file detection requested, but psutil package "
"is not installed."
)
return ["--open-files"]
print("Checking for unclosed files")
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int or 'auto', optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is ``'auto'``, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa: F401
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package"
)
return ["-n", str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
paths = []
if docs_path is not None and not kwargs["skip_docs"]:
if kwargs["package"] is not None:
warning_message = (
"Can not test .rst docs for {name}, since "
"docs path ({path}) does not exist."
)
paths = self.packages_path(
kwargs["package"], docs_path, warning=warning_message
)
elif not kwargs["test_path"]:
paths = [docs_path]
if len(paths) and not kwargs["test_path"]:
paths.append("--doctest-rst")
return paths
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return [f"--repeat={repeat}"]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from astropy.table import Table # noqa: F401
return super().run_tests(**kwargs)
|
b68fac154d05043b57d2d60cd69238423708851a92aa6a39725b1f696475048f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest`_ testing framework.
"""
import functools
import inspect
import os
import pickle
import sys
import warnings
import pytest
from astropy.units import allclose as quantity_allclose # noqa: F401
from astropy.utils.compat import PYTHON_LT_3_11
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyPendingDeprecationWarning,
)
# For backward-compatibility with affiliated packages
from .runner import TestRunner # noqa: F401
__all__ = [
"assert_follows_unicode_guidelines",
"assert_quantity_allclose",
"check_pickling_recovery",
"pickle_protocol",
"generic_recursive_equality_test",
]
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from astropy.utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key), os.path.realpath(testing_path)
)
new_path = os.path.abspath(os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print("Saving coverage data in .coverage...", "green")
cov.save()
color_print("Saving HTML coverage report in htmlcov...", "green")
cov.html_report(directory=os.path.join(rootdir, "htmlcov"))
@deprecated("5.1", alternative="pytest.raises")
class raises:
"""A decorator to mark that a test should raise a given exception.
Use as follows::
@raises(ZeroDivisionError)
def test_foo():
x = 1/0
This can also be used a context manager, in which case it is just
an alias for the ``pytest.raises`` context manager (because the
two have the same name this help avoid confusion by being
flexible).
.. note:: Usage of ``pytest.raises`` is preferred.
"""
# pep-8 naming exception -- this is a decorator class
def __init__(self, exc):
self._exc = exc
self._ctx = None
def __call__(self, func):
@functools.wraps(func)
def run_raises_test(*args, **kwargs):
pytest.raises(self._exc, func, *args, **kwargs)
return run_raises_test
def __enter__(self):
self._ctx = pytest.raises(self._exc)
return self._ctx.__enter__()
def __exit__(self, *exc_info):
return self._ctx.__exit__(*exc_info)
# TODO: Remove these when deprecation period of things deprecated in PR 12633 are removed.
_deprecations_as_exceptions = False
_include_astropy_deprecations = True
_modules_to_ignore_on_import = {
r"compiler", # A deprecated stdlib module used by pytest
r"scipy",
r"pygments",
r"ipykernel",
r"IPython", # deprecation warnings for async and await
r"setuptools",
}
_warnings_to_ignore_entire_module = set()
_warnings_to_ignore_by_pyver = {
None: { # Python version agnostic
# https://github.com/astropy/astropy/pull/7372
(
r"Importing from numpy\.testing\.decorators is deprecated, "
r"import from numpy\.testing instead\.",
DeprecationWarning,
),
# inspect raises this slightly different warning on Python 3.7.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
(
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)",
DeprecationWarning,
),
# https://github.com/astropy/pytest-doctestplus/issues/29
(r"split\(\) requires a non-empty pattern match", FutureWarning),
# Package resolution warning that we can do nothing about
(
r"can't resolve package from __spec__ or __package__, "
r"falling back on __name__ and __path__",
ImportWarning,
),
},
(3, 7): {
# Deprecation warning for collections.abc, fixed in Astropy but still
# used in lxml, and maybe others
(r"Using or importing the ABCs from 'collections'", DeprecationWarning)
},
}
@deprecated("5.1", alternative="https://docs.pytest.org/en/stable/warnings.html")
def enable_deprecations_as_exceptions(
include_astropy_deprecations=True,
modules_to_ignore_on_import=[],
warnings_to_ignore_entire_module=[],
warnings_to_ignore_by_pyver={},
):
"""
Turn on the feature that turns deprecations into exceptions.
Parameters
----------
include_astropy_deprecations : bool
If set to `True`, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also turned into exceptions.
modules_to_ignore_on_import : list of str
List of additional modules that generate deprecation warnings
on import, which are to be ignored. By default, these are already
included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and
``setuptools``.
warnings_to_ignore_entire_module : list of str
List of modules with deprecation warnings to ignore completely,
not just during import. If ``include_astropy_deprecations=True``
is given, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also ignored for the modules.
warnings_to_ignore_by_pyver : dict
Dictionary mapping tuple of ``(major, minor)`` Python version to
a list of ``(warning_message, warning_class)`` to ignore.
Python version-agnostic warnings should be mapped to `None` key.
This is in addition of those already ignored by default
(see ``_warnings_to_ignore_by_pyver`` values).
"""
global _deprecations_as_exceptions
_deprecations_as_exceptions = True
global _include_astropy_deprecations
_include_astropy_deprecations = include_astropy_deprecations
global _modules_to_ignore_on_import
_modules_to_ignore_on_import.update(modules_to_ignore_on_import)
global _warnings_to_ignore_entire_module
_warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module)
global _warnings_to_ignore_by_pyver
for key, val in warnings_to_ignore_by_pyver.items():
if key in _warnings_to_ignore_by_pyver:
_warnings_to_ignore_by_pyver[key].update(val)
else:
_warnings_to_ignore_by_pyver[key] = set(val)
@deprecated("5.1", alternative="https://docs.pytest.org/en/stable/warnings.html")
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(sys.modules.values()):
try:
del module.__warningregistry__
except Exception:
pass
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter("ignore", DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (pytest and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
for m in _modules_to_ignore_on_import:
try:
__import__(m)
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn these warnings into exceptions
_all_warns = [DeprecationWarning, FutureWarning, ImportWarning]
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
_all_warns += [AstropyDeprecationWarning, AstropyPendingDeprecationWarning]
for w in _all_warns:
warnings.filterwarnings("error", ".*", w)
# This ignores all specified warnings from given module(s),
# not just on import, for use of Astropy affiliated packages.
for m in _warnings_to_ignore_entire_module:
for w in _all_warns:
warnings.filterwarnings("ignore", category=w, module=m)
# This ignores only specified warnings by Python version, if applicable.
for v in _warnings_to_ignore_by_pyver:
if v is None or sys.version_info[:2] == v:
for s in _warnings_to_ignore_by_pyver[v]:
warnings.filterwarnings("ignore", s[0], s[1])
@deprecated("5.1", alternative="pytest.warns")
class catch_warnings(warnings.catch_warnings):
"""
A high-powered version of warnings.catch_warnings to use for testing
and to make sure that there is no dependence on the order in which
the tests are run.
This completely blitzes any memory of any warnings that have
appeared before so that all warnings will be caught and displayed.
``*args`` is a set of warning classes to collect. If no arguments are
provided, all warnings are collected.
Use as follows::
with catch_warnings(MyCustomWarning) as w:
do.something.bad()
assert len(w) > 0
.. note:: Usage of :ref:`pytest.warns <pytest:warns>` is preferred.
"""
def __init__(self, *classes):
super().__init__(record=True)
self.classes = classes
def __enter__(self):
warning_list = super().__enter__()
treat_deprecations_as_exceptions()
if len(self.classes) == 0:
warnings.simplefilter("always")
else:
warnings.simplefilter("ignore")
for cls in self.classes:
warnings.simplefilter("always", cls)
return warning_list
def __exit__(self, type, value, traceback):
treat_deprecations_as_exceptions()
@deprecated("5.1", alternative="pytest.mark.filterwarnings")
class ignore_warnings(catch_warnings):
"""
This can be used either as a context manager or function decorator to
ignore all warnings that occur within a function or block of code.
An optional category option can be supplied to only ignore warnings of a
certain category or categories (if a list is provided).
"""
def __init__(self, category=None):
super().__init__()
if isinstance(category, type) and issubclass(category, Warning):
self.category = [category]
else:
self.category = category
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Originally this just reused self, but that doesn't work if the
# function is called more than once so we need to make a new
# context manager instance for each call
with self.__class__(category=self.category):
return func(*args, **kwargs)
return wrapper
def __enter__(self):
retval = super().__enter__()
if self.category is not None:
for category in self.category:
warnings.simplefilter("ignore", category)
else:
warnings.simplefilter("ignore")
return retval
def assert_follows_unicode_guidelines(x, roundtrip=None):
"""
Test that an object follows our Unicode policy. See
"Unicode guidelines" in the coding guidelines.
Parameters
----------
x : object
The instance to test
roundtrip : module, optional
When provided, this namespace will be used to evaluate
``repr(x)`` and ensure that it roundtrips. It will also
ensure that ``__bytes__(x)`` roundtrip.
If not provided, no roundtrip testing will be performed.
"""
from astropy import conf
with conf.set_temp("unicode_output", False):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode("ascii")
assert isinstance(unicode_x, str)
unicode_x.encode("ascii")
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode("ascii")
else:
repr_x.encode("ascii")
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
with conf.set_temp("unicode_output", True):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode("ascii")
assert isinstance(unicode_x, str)
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode("ascii")
else:
repr_x.encode("ascii")
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
@pytest.fixture(params=[0, 1, -1])
def pickle_protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
(Originally from astropy.table.tests.test_pickle).
"""
return request.param
def generic_recursive_equality_test(a, b, class_history):
"""
Check if the attributes of a and b are equal. Then,
check if the attributes of the attributes are equal.
"""
if PYTHON_LT_3_11:
dict_a = a.__getstate__() if hasattr(a, "__getstate__") else a.__dict__
else:
# NOTE: The call may need to be adapted if other objects implementing a __getstate__
# with required argument(s) are passed to this function.
# For a class with `__slots__` the default state is not a `dict`;
# with neither `__dict__` nor `__slots__` it is `None`.
state = a.__getstate__(a) if inspect.isclass(a) else a.__getstate__()
dict_a = state if isinstance(state, dict) else getattr(a, "__dict__", dict())
dict_b = b.__dict__
for key in dict_a:
assert key in dict_b, f"Did not pickle {key}"
if dict_a[key].__class__.__eq__ is not object.__eq__:
# Only compare if the class defines a proper equality test.
# E.g., info does not define __eq__, and hence defers to
# object.__eq__, which is equivalent to checking that two
# instances are the same. This will generally not be true
# after pickling.
eq = dict_a[key] == dict_b[key]
if "__iter__" in dir(eq):
eq = False not in eq
assert eq, f"Value of {key} changed by pickling"
if hasattr(dict_a[key], "__dict__"):
if dict_a[key].__class__ in class_history:
# attempt to prevent infinite recursion
pass
else:
new_class_history = [dict_a[key].__class__]
new_class_history.extend(class_history)
generic_recursive_equality_test(
dict_a[key], dict_b[key], new_class_history
)
def check_pickling_recovery(original, protocol):
"""
Try to pickle an object. If successful, make sure
the object's attributes survived pickling and unpickling.
"""
f = pickle.dumps(original, protocol=protocol)
unpickled = pickle.loads(f)
class_history = [original.__class__]
generic_recursive_equality_test(original, unpickled, class_history)
def assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
import numpy as np
from astropy.units.quantity import _unquantify_allclose_arguments
np.testing.assert_allclose(
*_unquantify_allclose_arguments(actual, desired, rtol, atol), **kwargs
)
|
2ad32613e11daa3c5dd5b79f0e2ef4cb2172db925860bc94a297d7a00cbee88b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.table import QTable, Table, groups
from astropy.time import Time, TimeDelta
from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns
from astropy.units import Quantity, UnitsError
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ["TimeSeries"]
@autocheck_required_columns
class TimeSeries(BaseTimeSeries):
"""
A class to represent time series data in tabular form.
`~astropy.timeseries.TimeSeries` provides a class for representing time
series as a collection of values of different quantities measured at specific
points in time (for time series with finite time bins, see the
`~astropy.timeseries.BinnedTimeSeries` class).
`~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable`
and thus provides all the standard table maniplation methods available to
tables, but it also provides additional conveniences for dealing with time
series, such as a flexible initializer for setting up the times, a method
for folding time series, and a ``time`` attribute for easy access to the
time values.
See also: https://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional
Data to initialize time series. This does not need to contain the times,
which can be provided separately, but if it does contain the times they
should be in a column called ``'time'`` to be automatically recognized.
time : `~astropy.time.Time`, `~astropy.time.TimeDelta` or iterable
The times at which the values are sampled - this can be either given
directly as a `~astropy.time.Time` or `~astropy.time.TimeDelta` array
or as any iterable that initializes the `~astropy.time.Time` class. If
this is given, then the remaining time-related arguments should not be used.
time_start : `~astropy.time.Time` or str
The time of the first sample in the time series. This is an alternative
to providing ``time`` and requires that ``time_delta`` is also provided.
time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity` ['time']
The step size in time for the series. This can either be a scalar if
the time series is evenly sampled, or an array of values if it is not.
n_samples : int
The number of time samples for the series. This is only used if both
``time_start`` and ``time_delta`` are provided and are scalar values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ["time"]
def __init__(
self,
data=None,
*,
time=None,
time_start=None,
time_delta=None,
n_samples=None,
**kwargs,
):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if data is None and time is None and time_start is None and time_delta is None:
self._required_columns_relax = True
return
# First if time has been given in the table data, we should extract it
# and treat it as if it had been passed as a keyword argument.
if data is not None:
if n_samples is not None:
if n_samples != len(self):
raise TypeError(
"'n_samples' has been given both and it is not the "
"same length as the input data."
)
else:
n_samples = len(self)
if "time" in self.colnames:
if time is None:
time = self.columns["time"]
else:
raise TypeError(
"'time' has been given both in the table and as a keyword argument"
)
if time is None and time_start is None:
raise TypeError("Either 'time' or 'time_start' should be specified")
elif time is not None and time_start is not None:
raise TypeError("Cannot specify both 'time' and 'time_start'")
if time is not None and not isinstance(time, (Time, TimeDelta)):
time = Time(time)
if time_start is not None and not isinstance(time_start, (Time, TimeDelta)):
time_start = Time(time_start)
if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)):
raise TypeError("'time_delta' should be a Quantity or a TimeDelta")
if isinstance(time_delta, TimeDelta):
time_delta = time_delta.sec * u.s
if time_start is not None:
# We interpret this as meaning that time is that of the first
# sample and that the interval is given by time_delta.
if time_delta is None:
raise TypeError("'time' is scalar, so 'time_delta' is required")
if time_delta.isscalar:
time_delta = np.repeat(time_delta, n_samples)
time_delta = np.cumsum(time_delta)
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0.0 * u.s
time = time_start + time_delta
elif len(self.colnames) > 0 and len(time) != len(self):
raise ValueError(
f"Length of 'time' ({len(time)}) should match data length ({n_samples})"
)
elif time_delta is not None:
raise TypeError(
"'time_delta' should not be specified since 'time' is an array"
)
with self._delay_required_column_checks():
if "time" in self.colnames:
self.remove_column("time")
self.add_column(time, index=0, name="time")
@property
def time(self):
"""
The time values.
"""
return self["time"]
@deprecated_renamed_argument("midpoint_epoch", "epoch_time", "4.0")
def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity` ['time']
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
if not isinstance(period, Quantity) or period.unit.physical_type != "time":
raise UnitsError("period should be a Quantity in units of time")
folded = self.copy()
if epoch_time is None:
epoch_time = self.time[0]
else:
epoch_time = Time(epoch_time)
period_sec = period.to_value(u.s)
if normalize_phase:
if (
isinstance(epoch_phase, Quantity)
and epoch_phase.unit.physical_type != "dimensionless"
):
raise UnitsError(
"epoch_phase should be a dimensionless Quantity "
"or a float when normalize_phase=True"
)
epoch_phase_sec = epoch_phase * period_sec
else:
if epoch_phase == 0:
epoch_phase_sec = 0.0
else:
if (
not isinstance(epoch_phase, Quantity)
or epoch_phase.unit.physical_type != "time"
):
raise UnitsError(
"epoch_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
epoch_phase_sec = epoch_phase.to_value(u.s)
if wrap_phase is None:
wrap_phase = period_sec / 2
else:
if normalize_phase:
if isinstance(
wrap_phase, Quantity
) and not wrap_phase.unit.is_equivalent(u.one):
raise UnitsError(
"wrap_phase should be dimensionless when normalize_phase=True"
)
else:
if wrap_phase < 0 or wrap_phase > 1:
raise ValueError("wrap_phase should be between 0 and 1")
else:
wrap_phase = wrap_phase * period_sec
else:
if (
isinstance(wrap_phase, Quantity)
and wrap_phase.unit.physical_type == "time"
):
if wrap_phase < 0 or wrap_phase > period:
raise ValueError(
"wrap_phase should be between 0 and the period"
)
else:
wrap_phase = wrap_phase.to_value(u.s)
else:
raise UnitsError(
"wrap_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
relative_time_sec = (
(self.time - epoch_time).sec + epoch_phase_sec + (period_sec - wrap_phase)
) % period_sec - (period_sec - wrap_phase)
folded_time = TimeDelta(relative_time_sec * u.s)
if normalize_phase:
folded_time = (folded_time / period).decompose()
period = period_sec = 1
with folded._delay_required_column_checks():
folded.remove_column("time")
folded.add_column(folded_time, name="time", index=0)
return folded
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if "time" not in item:
out = QTable(
[self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices,
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
return out
return super().__getitem__(item)
def add_column(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_column`.
"""
# Note that the docstring is inherited from QTable
result = super().add_column(*args, **kwargs)
if len(self.indices) == 0 and "time" in self.colnames:
self.add_index("time")
return result
def add_columns(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_columns`.
"""
# Note that the docstring is inherited from QTable
result = super().add_columns(*args, **kwargs)
if len(self.indices) == 0 and "time" in self.colnames:
self.add_index("time")
return result
@classmethod
def from_pandas(self, df, time_scale="utc"):
"""
Convert a :class:`~pandas.DataFrame` to a
:class:`astropy.timeseries.TimeSeries`.
Parameters
----------
df : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance.
time_scale : str
The time scale to pass into `astropy.time.Time`.
Defaults to ``UTC``.
"""
from pandas import DataFrame, DatetimeIndex
if not isinstance(df, DataFrame):
raise TypeError("Input should be a pandas DataFrame")
if not isinstance(df.index, DatetimeIndex):
raise TypeError("DataFrame does not have a DatetimeIndex")
time = Time(df.index, scale=time_scale)
table = Table.from_pandas(df)
return TimeSeries(time=time, data=table)
def to_pandas(self):
"""
Convert this :class:`~astropy.timeseries.TimeSeries` to a
:class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
"""
return Table(self).to_pandas(index="time")
@classmethod
def read(
self,
filename,
time_column=None,
time_format=None,
time_scale=None,
format=None,
*args,
**kwargs,
):
"""
Read and parse a file and returns a `astropy.timeseries.TimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(https://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.TimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv',
... time_column='date') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_column : str, optional
The name of the time column.
time_format : str, optional
The time format for the time column.
time_scale : str, optional
The time scale for the time column.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.sampled.TimeSeries`
TimeSeries corresponding to file contents.
Notes
-----
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, format=format, *args, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_column is None:
raise ValueError(
"``time_column`` should be provided since the default Table readers"
" are being used."
)
table = Table.read(filename, format=format, *args, **kwargs)
if time_column in table.colnames:
time = Time(
table.columns[time_column], scale=time_scale, format=time_format
)
table.remove_column(time_column)
else:
raise ValueError(
f"Time column '{time_column}' not found in the input data."
)
return TimeSeries(time=time, data=table)
|
666ee2a10fdcdc4613c70db695285881e48d1f56589ce30e040d4d0bfb8d99fb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.timeseries.sampled import TimeSeries
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["aggregate_downsample"]
def reduceat(array, indices, function):
"""
Manual reduceat functionality for cases where Numpy functions don't have a reduceat.
It will check if the input function has a reduceat and call that if it does.
"""
if len(indices) == 0:
return np.array([])
elif hasattr(function, "reduceat"):
return np.array(function.reduceat(array, indices))
else:
result = []
for i in range(len(indices) - 1):
if indices[i + 1] <= indices[i] + 1:
result.append(function(array[indices[i]]))
else:
result.append(function(array[indices[i] : indices[i + 1]]))
result.append(function(array[indices[-1] :]))
return np.array(result)
def _to_relative_longdouble(time: Time, rel_base: Time) -> np.longdouble:
# Convert the time objects into plain ndarray
# so that they be used to make various operations faster, including
# - `np.searchsorted()`
# - time comparison.
#
# Relative time in seconds with np.longdouble type is used to:
# - a consistent format for search, irrespective of the format/scale of the inputs,
# - retain the best precision possible
return (time - rel_base).to_value(format="sec", subfmt="long")
def aggregate_downsample(
time_series,
*,
time_bin_size=None,
time_bin_start=None,
time_bin_end=None,
n_bins=None,
aggregate_func=None
):
"""
Downsample a time series by binning values into bins with a fixed size or
custom sizes, using a single function to combine the values in the bin.
Parameters
----------
time_series : :class:`~astropy.timeseries.TimeSeries`
The time series to downsample.
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` or ``time_bin_end`` is provided.
Defaults to the first time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is provided or its default is used. If ``time_bin_end`` is scalar and
``time_bin_start`` is an array, time bins are assumed to be contiguous;
the end of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array and
``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end``
and ``time_bin_start`` are arrays, bins do not need to be contiguous.
If this argument is provided, ``time_bin_size`` should not be provided.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. If both ``time_bin_start`` and ``time_bin_size``
are provided and are scalar values, this determines the total bins
within that interval. If ``time_bin_start`` is an iterable, this
parameter will be ignored.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
Returns
-------
binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries`
The downsampled time series.
"""
if not isinstance(time_series, TimeSeries):
raise TypeError("time_series should be a TimeSeries")
if time_bin_size is not None and not isinstance(
time_bin_size, (u.Quantity, TimeDelta)
):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if time_bin_start is not None and not isinstance(time_bin_start, (Time, TimeDelta)):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)):
time_bin_end = Time(time_bin_end)
# Use the table sorted by time
ts_sorted = time_series.iloc[:]
# If start time is not provided, it is assumed to be the start of the timeseries
if time_bin_start is None:
time_bin_start = ts_sorted.time[0]
# Total duration of the timeseries is needed for determining either
# `time_bin_size` or `nbins` in the case of scalar `time_bin_start`
if time_bin_start.isscalar:
time_duration = (ts_sorted.time[-1] - time_bin_start).sec
if time_bin_size is None and time_bin_end is None:
if time_bin_start.isscalar:
if n_bins is None:
raise TypeError(
"With single 'time_bin_start' either 'n_bins', "
"'time_bin_size' or time_bin_end' must be provided"
)
else:
# `nbins` defaults to the number needed to fit all points
time_bin_size = time_duration / n_bins * u.s
else:
time_bin_end = np.maximum(ts_sorted.time[-1], time_bin_start[-1])
if time_bin_start.isscalar:
if time_bin_size is not None:
if time_bin_size.isscalar:
# Determine the number of bins
if n_bins is None:
bin_size_sec = time_bin_size.to_value(u.s)
n_bins = int(np.ceil(time_duration / bin_size_sec))
elif time_bin_end is not None:
if not time_bin_end.isscalar:
# Convert start time to an array and populate using `time_bin_end`
scalar_start_time = time_bin_start
time_bin_start = time_bin_end.replicate(copy=True)
time_bin_start[0] = scalar_start_time
time_bin_start[1:] = time_bin_end[:-1]
# Check for overlapping bins, and warn if they are present
if time_bin_end is not None:
if (
not time_bin_end.isscalar
and not time_bin_start.isscalar
and np.any(time_bin_start[1:] < time_bin_end[:-1])
):
warnings.warn(
"Overlapping bins should be avoided since they "
"can lead to double-counting of data during binning.",
AstropyUserWarning,
)
binned = BinnedTimeSeries(
time_bin_size=time_bin_size,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
n_bins=n_bins,
)
if aggregate_func is None:
aggregate_func = np.nanmean
# Start and end times of the binned timeseries
bin_start = binned.time_bin_start
bin_end = binned.time_bin_end
# Set `n_bins` to match the length of `time_bin_start` if
# `n_bins` is unspecified or if `time_bin_start` is an iterable
if n_bins is None or not time_bin_start.isscalar:
n_bins = len(bin_start)
# Find the subset of the table that is inside the union of all bins
# - output: `keep` a mask to create the subset
# - use relative time in seconds `np.longdouble`` in in creating `keep` to speed up
# (`Time` object comparison is rather slow)
# - tiny sacrifice on precision (< 0.01ns on 64 bit platform)
rel_base = ts_sorted.time[0]
rel_bin_start = _to_relative_longdouble(bin_start, rel_base)
rel_bin_end = _to_relative_longdouble(bin_end, rel_base)
rel_ts_sorted_time = _to_relative_longdouble(ts_sorted.time, rel_base)
keep = (rel_ts_sorted_time >= rel_bin_start[0]) & (
rel_ts_sorted_time <= rel_bin_end[-1]
)
# Find out indices to be removed because of noncontiguous bins
#
# Only need to check when adjacent bins have gaps, i.e.,
# bin_start[ind + 1] > bin_end[ind]
# - see: https://github.com/astropy/astropy/issues/13058#issuecomment-1090846697
# on thoughts on how to reduce the number of times to loop
noncontiguous_bins_indices = np.where(rel_bin_start[1:] > rel_bin_end[:-1])[0]
for ind in noncontiguous_bins_indices:
delete_indices = np.where(
np.logical_and(
rel_ts_sorted_time > rel_bin_end[ind],
rel_ts_sorted_time < rel_bin_start[ind + 1],
)
)
keep[delete_indices] = False
rel_subset_time = rel_ts_sorted_time[keep]
# Figure out which bin each row falls in by sorting with respect
# to the bin end times
indices = np.searchsorted(rel_bin_end, rel_subset_time)
# For time == bin_start[i+1] == bin_end[i], let bin_start takes precedence
if len(indices) and np.all(rel_bin_start[1:] >= rel_bin_end[:-1]):
indices_start = np.searchsorted(
rel_subset_time, rel_bin_start[rel_bin_start <= rel_ts_sorted_time[-1]]
)
indices[indices_start] = np.arange(len(indices_start))
# Determine rows where values are defined
if len(indices):
groups = np.hstack([0, np.nonzero(np.diff(indices))[0] + 1])
else:
groups = np.array([])
# Find unique indices to determine which rows in the final time series
# will not be empty.
unique_indices = np.unique(indices)
# Add back columns
subset = ts_sorted[keep]
for colname in subset.colnames:
if colname == "time":
continue
values = subset[colname]
# FIXME: figure out how to avoid the following, if possible
if not isinstance(values, (np.ndarray, u.Quantity)):
warnings.warn(
"Skipping column {0} since it has a mix-in type", AstropyUserWarning
)
continue
if isinstance(values, u.Quantity):
data = u.Quantity(np.repeat(np.nan, n_bins), unit=values.unit)
data[unique_indices] = u.Quantity(
reduceat(values.value, groups, aggregate_func), values.unit, copy=False
)
else:
data = np.ma.zeros(n_bins, dtype=values.dtype)
data.mask = 1
data[unique_indices] = reduceat(values, groups, aggregate_func)
data.mask[unique_indices] = 0
binned[colname] = data
return binned
|
e8d516479e7ea83a0caeb5a8e129b4cbd82ca94ac2049fffa6b8e78c4e83b0ce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.table import QTable, Table, groups
from astropy.time import Time, TimeDelta
from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns
from astropy.units import Quantity
__all__ = ["BinnedTimeSeries"]
@autocheck_required_columns
class BinnedTimeSeries(BaseTimeSeries):
"""
A class to represent binned time series data in tabular form.
`~astropy.timeseries.BinnedTimeSeries` provides a class for
representing time series as a collection of values of different
quantities measured in time bins (for time series with values
sampled at specific times, see the `~astropy.timeseries.TimeSeries`
class). `~astropy.timeseries.BinnedTimeSeries` is a sub-class of
`~astropy.table.QTable` and thus provides all the standard table
maniplation methods available to tables, but it also provides
additional conveniences for dealing with time series, such as a
flexible initializer for setting up the times, and attributes to
access the start/center/end time of bins.
See also: https://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize time series. This does not need to contain the
times, which can be provided separately, but if it does contain the
times they should be in columns called ``'time_bin_start'`` and
``'time_bin_size'`` to be automatically recognized.
time_bin_start : `~astropy.time.Time` or iterable
The times of the start of each bin - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. If this is given, then
the remaining time-related arguments should not be used. This can also
be a scalar value if ``time_bin_size`` is provided.
time_bin_end : `~astropy.time.Time` or iterable
The times of the end of each bin - this can be either given directly
as a `~astropy.time.Time` array or as any value or iterable that
initializes the `~astropy.time.Time` class. If this is given, then the
remaining time-related arguments should not be used. This can only be
given if ``time_bin_start`` is an array of values. If ``time_bin_end``
is a scalar, time bins are assumed to be contiguous, such that the end
of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array, the
time bins do not need to be contiguous. If this argument is provided,
``time_bin_size`` should not be provided.
time_bin_size : `~astropy.time.TimeDelta` or `~astropy.units.Quantity`
The size of the time bins, either as a scalar value (in which case all
time bins will be assumed to have the same duration) or as an array of
values (in which case each time bin can have a different duration).
If this argument is provided, ``time_bin_end`` should not be provided.
n_bins : int
The number of time bins for the series. This is only used if both
``time_bin_start`` and ``time_bin_size`` are provided and are scalar
values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ["time_bin_start", "time_bin_size"]
def __init__(
self,
data=None,
*,
time_bin_start=None,
time_bin_end=None,
time_bin_size=None,
n_bins=None,
**kwargs,
):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if (
data is None
and time_bin_start is None
and time_bin_end is None
and time_bin_size is None
and n_bins is None
):
self._required_columns_relax = True
return
# First if time_bin_start and time_bin_end have been given in the table data, we
# should extract them and treat them as if they had been passed as
# keyword arguments.
if "time_bin_start" in self.colnames:
if time_bin_start is None:
time_bin_start = self.columns["time_bin_start"]
else:
raise TypeError(
"'time_bin_start' has been given both in the table "
"and as a keyword argument"
)
if "time_bin_size" in self.colnames:
if time_bin_size is None:
time_bin_size = self.columns["time_bin_size"]
else:
raise TypeError(
"'time_bin_size' has been given both in the table "
"and as a keyword argument"
)
if time_bin_start is None:
raise TypeError("'time_bin_start' has not been specified")
if time_bin_end is None and time_bin_size is None:
raise TypeError(
"Either 'time_bin_size' or 'time_bin_end' should be specified"
)
if not isinstance(time_bin_start, (Time, TimeDelta)):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)):
time_bin_end = Time(time_bin_end)
if time_bin_size is not None and not isinstance(
time_bin_size, (Quantity, TimeDelta)
):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if isinstance(time_bin_size, TimeDelta):
time_bin_size = time_bin_size.sec * u.s
if n_bins is not None and time_bin_size is not None:
if not (time_bin_start.isscalar and time_bin_size.isscalar):
raise TypeError(
"'n_bins' cannot be specified if 'time_bin_start' or "
"'time_bin_size' are not scalar'"
)
if time_bin_start.isscalar:
# We interpret this as meaning that this is the start of the
# first bin and that the bins are contiguous. In this case,
# we require time_bin_size to be specified.
if time_bin_size is None:
raise TypeError(
"'time_bin_start' is scalar, so 'time_bin_size' is required"
)
if time_bin_size.isscalar:
if data is not None:
if n_bins is not None:
if n_bins != len(self):
raise TypeError(
"'n_bins' has been given and it is not the "
"same length as the input data."
)
else:
n_bins = len(self)
time_bin_size = np.repeat(time_bin_size, n_bins)
time_delta = np.cumsum(time_bin_size)
time_bin_end = time_bin_start + time_delta
# Now shift the array so that the first entry is 0
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0.0 * u.s
# Make time_bin_start into an array
time_bin_start = time_bin_start + time_delta
else:
if len(self.colnames) > 0 and len(time_bin_start) != len(self):
raise ValueError(
f"Length of 'time_bin_start' ({len(time_bin_start)}) should match "
f"table length ({len(self)})"
)
if time_bin_end is not None:
if time_bin_end.isscalar:
times = time_bin_start.copy()
times[:-1] = times[1:]
times[-1] = time_bin_end
time_bin_end = times
time_bin_size = (time_bin_end - time_bin_start).sec * u.s
if time_bin_size.isscalar:
time_bin_size = np.repeat(time_bin_size, len(self))
with self._delay_required_column_checks():
if "time_bin_start" in self.colnames:
self.remove_column("time_bin_start")
if "time_bin_size" in self.colnames:
self.remove_column("time_bin_size")
self.add_column(time_bin_start, index=0, name="time_bin_start")
self.add_index("time_bin_start")
self.add_column(time_bin_size, index=1, name="time_bin_size")
@property
def time_bin_start(self):
"""
The start times of all the time bins.
"""
return self["time_bin_start"]
@property
def time_bin_center(self):
"""
The center times of all the time bins.
"""
return self["time_bin_start"] + self["time_bin_size"] * 0.5
@property
def time_bin_end(self):
"""
The end times of all the time bins.
"""
return self["time_bin_start"] + self["time_bin_size"]
@property
def time_bin_size(self):
"""
The sizes of all the time bins.
"""
return self["time_bin_size"]
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if "time_bin_start" not in item or "time_bin_size" not in item:
out = QTable(
[self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices,
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
return out
return super().__getitem__(item)
@classmethod
def read(
self,
filename,
time_bin_start_column=None,
time_bin_end_column=None,
time_bin_size_column=None,
time_bin_size_unit=None,
time_format=None,
time_scale=None,
format=None,
*args,
**kwargs,
):
"""
Read and parse a file and returns a `astropy.timeseries.BinnedTimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(https://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.BinnedTimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries.binned import BinnedTimeSeries
>>> ts = BinnedTimeSeries.read('binned.dat', format='ascii.ecsv',
... time_bin_start_column='date_start',
... time_bin_end_column='date_end') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_bin_start_column : str
The name of the column with the start time for each bin.
time_bin_end_column : str, optional
The name of the column with the end time for each bin. Either this
option or ``time_bin_size_column`` should be specified.
time_bin_size_column : str, optional
The name of the column with the size for each bin. Either this
option or ``time_bin_end_column`` should be specified.
time_bin_size_unit : `astropy.units.Unit`, optional
If ``time_bin_size_column`` is specified but does not have a unit
set in the table, you can specify the unit manually.
time_format : str, optional
The time format for the start and end columns.
time_scale : str, optional
The time scale for the start and end columns.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.binned.BinnedTimeSeries`
BinnedTimeSeries corresponding to the file.
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, format=format, *args, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_bin_start_column is None:
raise ValueError(
"``time_bin_start_column`` should be provided since the default"
" Table readers are being used."
)
if time_bin_end_column is None and time_bin_size_column is None:
raise ValueError(
"Either `time_bin_end_column` or `time_bin_size_column` should be"
" provided."
)
elif time_bin_end_column is not None and time_bin_size_column is not None:
raise ValueError(
"Cannot specify both `time_bin_end_column` and"
" `time_bin_size_column`."
)
table = Table.read(filename, format=format, *args, **kwargs)
if time_bin_start_column in table.colnames:
time_bin_start = Time(
table.columns[time_bin_start_column],
scale=time_scale,
format=time_format,
)
table.remove_column(time_bin_start_column)
else:
raise ValueError(
f"Bin start time column '{time_bin_start_column}' not found in the"
" input data."
)
if time_bin_end_column is not None:
if time_bin_end_column in table.colnames:
time_bin_end = Time(
table.columns[time_bin_end_column],
scale=time_scale,
format=time_format,
)
table.remove_column(time_bin_end_column)
else:
raise ValueError(
f"Bin end time column '{time_bin_end_column}' not found in the"
" input data."
)
time_bin_size = None
elif time_bin_size_column is not None:
if time_bin_size_column in table.colnames:
time_bin_size = table.columns[time_bin_size_column]
table.remove_column(time_bin_size_column)
else:
raise ValueError(
f"Bin size column '{time_bin_size_column}' not found in the"
" input data."
)
if time_bin_size.unit is None:
if time_bin_size_unit is None or not isinstance(
time_bin_size_unit, u.UnitBase
):
raise ValueError(
"The bin size unit should be specified as an astropy Unit"
" using ``time_bin_size_unit``."
)
time_bin_size = time_bin_size * time_bin_size_unit
else:
time_bin_size = u.Quantity(time_bin_size)
time_bin_end = None
if time_bin_start.isscalar and time_bin_size.isscalar:
return BinnedTimeSeries(
data=table,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
time_bin_size=time_bin_size,
n_bins=len(table),
)
else:
return BinnedTimeSeries(
data=table,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
time_bin_size=time_bin_size,
)
|
a7d672f77e37011b146d6780a5653789fe4ec24bbb16d5e1816bd462de16f48d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utility functions for ``constants`` sub-package."""
import itertools
__all__ = []
def _get_c(codata, iaudata, module, not_in_module_only=True):
"""
Generator to return a Constant object.
Parameters
----------
codata, iaudata : obj
Modules containing CODATA and IAU constants of interest.
module : obj
Namespace module of interest.
not_in_module_only : bool
If ``True``, ignore constants that are already in the
namespace of ``module``.
Returns
-------
_c : Constant
Constant object to process.
"""
from .constant import Constant
for _nm, _c in itertools.chain(
sorted(vars(codata).items()), sorted(vars(iaudata).items())
):
if not isinstance(_c, Constant):
continue
elif (not not_in_module_only) or (_c.abbrev not in module.__dict__):
yield _c
def _set_c(
codata, iaudata, module, not_in_module_only=True, doclines=None, set_class=False
):
"""
Set constants in a given module namespace.
Parameters
----------
codata, iaudata : obj
Modules containing CODATA and IAU constants of interest.
module : obj
Namespace module to modify with the given ``codata`` and ``iaudata``.
not_in_module_only : bool
If ``True``, constants that are already in the namespace
of ``module`` will not be modified.
doclines : list or None
If a list is given, this list will be modified in-place to include
documentation of modified constants. This can be used to update
docstring of ``module``.
set_class : bool
Namespace of ``module`` is populated with ``_c.__class__``
instead of just ``_c`` from :func:`_get_c`.
"""
for _c in _get_c(codata, iaudata, module, not_in_module_only=not_in_module_only):
if set_class:
value = _c.__class__(
_c.abbrev,
_c.name,
_c.value,
_c._unit_string,
_c.uncertainty,
_c.reference,
)
else:
value = _c
setattr(module, _c.abbrev, value)
if doclines is not None:
doclines.append(
f"{_c.abbrev:^10} {_c.value:^14.9g} {_c._unit_string:^16} {_c.name}"
)
|
0542d2c966179e9369baacc239e19f5b9eed809c34c9c6b668c5dad6097a1f7e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import types
import warnings
import numpy as np
from astropy.units.core import Unit, UnitsError
from astropy.units.quantity import Quantity
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["Constant", "EMConstant"]
class ConstantMeta(type):
"""Metaclass for `~astropy.constants.Constant`. The primary purpose of this
is to wrap the double-underscore methods of `~astropy.units.Quantity`
which is the superclass of `~astropy.constants.Constant`.
In particular this wraps the operator overloads such as `__add__` to
prevent their use with constants such as ``e`` from being used in
expressions without specifying a system. The wrapper checks to see if the
constant is listed (by name) in ``Constant._has_incompatible_units``, a set
of those constants that are defined in different systems of units are
physically incompatible. It also performs this check on each `Constant` if
it hasn't already been performed (the check is deferred until the
`Constant` is actually used in an expression to speed up import times,
among other reasons).
"""
def __new__(mcls, name, bases, d):
def wrap(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
name_lower = self.name.lower()
instances = self._registry[name_lower]
if not self._checked_units:
for inst in instances.values():
try:
self.unit.to(inst.unit)
except UnitsError:
self._has_incompatible_units.add(name_lower)
self._checked_units = True
if not self.system and name_lower in self._has_incompatible_units:
systems = sorted(x for x in instances if x)
raise TypeError(
f"Constant {self.abbrev!r} does not have physically compatible "
"units across all systems of units and cannot be "
"combined with other values without specifying a "
f"system (eg. {self.abbrev}.{systems[0]})"
)
return meth(self, *args, **kwargs)
return wrapper
# The wrapper applies to so many of the __ methods that it's easier to
# just exclude the ones it doesn't apply to
exclude = {
"__new__",
"__array_finalize__",
"__array_wrap__",
"__dir__",
"__getattr__",
"__init__",
"__str__",
"__repr__",
"__hash__",
"__iter__",
"__getitem__",
"__len__",
"__bool__",
"__quantity_subclass__",
"__setstate__",
}
for attr, value in vars(Quantity).items():
if (
isinstance(value, types.FunctionType)
and attr.startswith("__")
and attr.endswith("__")
and attr not in exclude
):
d[attr] = wrap(value)
return super().__new__(mcls, name, bases, d)
class Constant(Quantity, metaclass=ConstantMeta):
"""A physical or astronomical constant.
These objects are quantities that are meant to represent physical
constants.
Parameters
----------
abbrev : str
A typical ASCII text abbreviation of the constant, generally
the same as the Python variable used for this constant.
name : str
Full constant name.
value : numbers.Real
Constant value. Note that this should be a bare number, not a
|Quantity|.
unit : str
String representation of the constant units.
uncertainty : numbers.Real
Absolute uncertainty in constant value. Note that this should be
a bare number, not a |Quantity|.
reference : str, optional
Reference where the value is taken from.
system : str
System of units in which the constant is defined. This can be
`None` when the constant's units can be directly converted
between systems.
"""
_registry = {}
_has_incompatible_units = set()
def __new__(
cls, abbrev, name, value, unit, uncertainty, reference=None, system=None
):
if reference is None:
reference = getattr(cls, "default_reference", None)
if reference is None:
raise TypeError(f"{cls} requires a reference.")
name_lower = name.lower()
instances = cls._registry.setdefault(name_lower, {})
# By-pass Quantity initialization, since units may not yet be
# initialized here, and we store the unit in string form.
inst = np.array(value).view(cls)
if system in instances:
warnings.warn(
f"Constant {name!r} already has a definition in "
f"the {system!r} system from {reference!r} reference",
AstropyUserWarning,
)
for c in instances.values():
if system is not None and not hasattr(c.__class__, system):
setattr(c, system, inst)
if c.system is not None and not hasattr(inst.__class__, c.system):
setattr(inst, c.system, c)
instances[system] = inst
inst._abbrev = abbrev
inst._name = name
inst._value = value
inst._unit_string = unit
inst._uncertainty = uncertainty
inst._reference = reference
inst._system = system
inst._checked_units = False
return inst
def __repr__(self):
return (
f"<{self.__class__} "
f"name={self.name!r} "
f"value={self.value} "
f"uncertainty={self.uncertainty} "
f"unit={str(self.unit)!r} "
f"reference={self.reference!r}>"
)
def __str__(self):
return (
f" Name = {self.name}\n"
f" Value = {self.value}\n"
f" Uncertainty = {self.uncertainty}\n"
f" Unit = {self.unit}\n"
f" Reference = {self.reference}"
)
def __quantity_subclass__(self, unit):
return super().__quantity_subclass__(unit)[0], False
def copy(self):
"""
Return a copy of this `Constant` instance. Since they are by
definition immutable, this merely returns another reference to
``self``.
"""
return self
__deepcopy__ = __copy__ = copy
@property
def abbrev(self):
"""A typical ASCII text abbreviation of the constant, also generally
the same as the Python variable used for this constant.
"""
return self._abbrev
@property
def name(self):
"""The full name of the constant."""
return self._name
@lazyproperty
def _unit(self):
"""The unit(s) in which this constant is defined."""
return Unit(self._unit_string)
@property
def uncertainty(self):
"""The known absolute uncertainty in this constant's value."""
return self._uncertainty
@property
def reference(self):
"""The source used for the value of this constant."""
return self._reference
@property
def system(self):
"""The system of units in which this constant is defined (typically
`None` so long as the constant's units can be directly converted
between systems).
"""
return self._system
def _instance_or_super(self, key):
instances = self._registry[self.name.lower()]
inst = instances.get(key)
if inst is not None:
return inst
else:
return getattr(super(), key)
@property
def si(self):
"""If the Constant is defined in the SI system return that instance of
the constant, else convert to a Quantity in the appropriate SI units.
"""
return self._instance_or_super("si")
@property
def cgs(self):
"""If the Constant is defined in the CGS system return that instance of
the constant, else convert to a Quantity in the appropriate CGS units.
"""
return self._instance_or_super("cgs")
def __array_finalize__(self, obj):
for attr in (
"_abbrev",
"_name",
"_value",
"_unit_string",
"_uncertainty",
"_reference",
"_system",
):
setattr(self, attr, getattr(obj, attr, None))
self._checked_units = getattr(obj, "_checked_units", False)
class EMConstant(Constant):
"""An electromagnetic constant."""
@property
def cgs(self):
"""Overridden for EMConstant to raise a `TypeError`
emphasizing that there are multiple EM extensions to CGS.
"""
raise TypeError(
"Cannot convert EM constants to cgs because there "
"are different systems for E.M constants within the "
"c.g.s system (ESU, Gaussian, etc.). Instead, "
"directly use the constant with the appropriate "
"suffix (e.g. e.esu, e.gauss, etc.)."
)
|
37723d9547d3f850bd13306a4594f47f337b4b3532dfba99629afdbc90b5de9e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""``astropy.cosmology`` contains classes and functions for cosmological
distance measures and other cosmology-related calculations.
See the `Astropy documentation
<https://docs.astropy.org/en/latest/cosmology/index.html>`_ for more
detailed usage examples and references.
"""
from . import core, flrw, funcs, parameter, units, utils
from . import io # needed before 'realizations' # isort: split
from . import realizations
from .core import *
from .flrw import *
from .funcs import *
from .parameter import *
from .realizations import available, default_cosmology
from .utils import *
__all__ = (
core.__all__
+ flrw.__all__ # cosmology classes
+ realizations.__all__ # instances thereof
+ ["units"]
# utils
+ funcs.__all__
+ parameter.__all__
+ utils.__all__
)
def __getattr__(name):
"""Get realizations using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in available:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
return getattr(realizations, name)
def __dir__():
"""Directory, including lazily-imported objects."""
return __all__
|
7bc4c9109d80d3c0e5a7ef58d412ee5528edb1a7d3ffb0d95a6e8f1c50d63a40 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.stats import funcs
from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY
from astropy.utils.misc import NumpyRNGContext
def test_median_absolute_deviation():
with NumpyRNGContext(12345):
# test that it runs
randvar = np.random.randn(10000)
mad = funcs.median_absolute_deviation(randvar)
# test whether an array is returned if an axis is used
randvar = randvar.reshape((10, 1000))
mad = funcs.median_absolute_deviation(randvar, axis=1)
assert len(mad) == 10
assert mad.size < randvar.size
mad = funcs.median_absolute_deviation(randvar, axis=0)
assert len(mad) == 1000
assert mad.size < randvar.size
# Test some actual values in a 3 dimensional array
x = np.arange(3 * 4 * 5)
a = np.array([sum(x[: i + 1]) for i in range(len(x))]).reshape(3, 4, 5)
mad = funcs.median_absolute_deviation(a)
assert mad == 389.5
mad = funcs.median_absolute_deviation(a, axis=0)
assert_allclose(
mad,
[
[210.0, 230.0, 250.0, 270.0, 290.0],
[310.0, 330.0, 350.0, 370.0, 390.0],
[410.0, 430.0, 450.0, 470.0, 490.0],
[510.0, 530.0, 550.0, 570.0, 590.0],
],
)
mad = funcs.median_absolute_deviation(a, axis=1)
assert_allclose(
mad,
[
[27.5, 32.5, 37.5, 42.5, 47.5],
[127.5, 132.5, 137.5, 142.5, 147.5],
[227.5, 232.5, 237.5, 242.5, 247.5],
],
)
mad = funcs.median_absolute_deviation(a, axis=2)
assert_allclose(
mad,
[
[3.0, 8.0, 13.0, 18.0],
[23.0, 28.0, 33.0, 38.0],
[43.0, 48.0, 53.0, 58.0],
],
)
def test_median_absolute_deviation_masked():
# Based on the changes introduces in #4658
# normal masked arrays without masked values are handled like normal
# numpy arrays
array = np.ma.array([1, 2, 3])
assert funcs.median_absolute_deviation(array) == 1
# masked numpy arrays return something different (rank 0 masked array)
# but one can still compare it without np.all!
array = np.ma.array([1, 4, 3], mask=[0, 1, 0])
assert funcs.median_absolute_deviation(array) == 1
# Just cross check if that's identical to the function on the unmasked
# values only
assert funcs.median_absolute_deviation(array) == (
funcs.median_absolute_deviation(array[~array.mask])
)
# Multidimensional masked array
array = np.ma.array([[1, 4], [2, 2]], mask=[[1, 0], [0, 0]])
funcs.median_absolute_deviation(array)
assert funcs.median_absolute_deviation(array) == 0
# Just to compare it with the data without mask:
assert funcs.median_absolute_deviation(array.data) == 0.5
# And check if they are also broadcasted correctly
np.testing.assert_array_equal(
funcs.median_absolute_deviation(array, axis=0).data, [0, 1]
)
np.testing.assert_array_equal(
funcs.median_absolute_deviation(array, axis=1).data, [0, 0]
)
def test_median_absolute_deviation_nans():
array = np.array([[1, 4, 3, np.nan], [2, 5, np.nan, 4]])
assert_equal(
funcs.median_absolute_deviation(array, func=np.nanmedian, axis=1), [1, 1]
)
array = np.ma.masked_invalid(array)
assert funcs.median_absolute_deviation(array) == 1
def test_median_absolute_deviation_nans_masked():
"""
Regression test to ensure ignore_nan=True gives same results for
ndarray and masked arrays that contain +/-inf.
"""
data1 = np.array([1.0, np.nan, 2, np.inf])
data2 = np.ma.masked_array(data1, mask=False)
mad1 = funcs.median_absolute_deviation(data1, ignore_nan=True)
mad2 = funcs.median_absolute_deviation(data2, ignore_nan=True)
assert_equal(mad1, mad2)
# ensure that input masked array is not modified
assert np.isnan(data2[1])
def test_median_absolute_deviation_multidim_axis():
array = np.ones((5, 4, 3)) * np.arange(5)[:, np.newaxis, np.newaxis]
mad1 = funcs.median_absolute_deviation(array, axis=(1, 2))
mad2 = funcs.median_absolute_deviation(array, axis=(2, 1))
assert_equal(mad1, np.zeros(5))
assert_equal(mad1, mad2)
def test_median_absolute_deviation_quantity():
# Based on the changes introduces in #4658
# Just a small test that this function accepts Quantities and returns a
# quantity
a = np.array([1, 16, 5]) * u.m
mad = funcs.median_absolute_deviation(a)
# Check for the correct unit and that the result is identical to the
# result without units.
assert mad.unit == a.unit
assert mad.value == funcs.median_absolute_deviation(a.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_binom_conf_interval():
# Test Wilson and Jeffreys interval for corner cases:
# Corner cases: k = 0, k = n, confidence_level = 0., confidence_level = 1.
n = 5
k = [0, 4, 5]
for conf in [0.0, 0.5, 1.0]:
res = funcs.binom_conf_interval(k, n, confidence_level=conf, interval="wilson")
assert ((res >= 0.0) & (res <= 1.0)).all()
res = funcs.binom_conf_interval(
k, n, confidence_level=conf, interval="jeffreys"
)
assert ((res >= 0.0) & (res <= 1.0)).all()
# Test Jeffreys interval accuracy against table in Brown et al. (2001).
# (See `binom_conf_interval` docstring for reference.)
k = [0, 1, 2, 3, 4]
n = 7
conf = 0.95
result = funcs.binom_conf_interval(k, n, confidence_level=conf, interval="jeffreys")
table = np.array(
[[0.000, 0.016, 0.065, 0.139, 0.234], [0.292, 0.501, 0.648, 0.766, 0.861]]
)
assert_allclose(result, table, atol=1.0e-3, rtol=0.0)
# Test scalar version
result = np.array(
[
funcs.binom_conf_interval(
kval, n, confidence_level=conf, interval="jeffreys"
)
for kval in k
]
).transpose()
assert_allclose(result, table, atol=1.0e-3, rtol=0.0)
# Test flat
result = funcs.binom_conf_interval(k, n, confidence_level=conf, interval="flat")
table = np.array(
[
[0.0, 0.03185, 0.08523, 0.15701, 0.24486],
[0.36941, 0.52650, 0.65085, 0.75513, 0.84298],
]
)
assert_allclose(result, table, atol=1.0e-3, rtol=0.0)
# Test Wald interval
result = funcs.binom_conf_interval(0, 5, interval="wald")
assert_allclose(result, 0.0) # conf interval is [0, 0] when k = 0
result = funcs.binom_conf_interval(5, 5, interval="wald")
assert_allclose(result, 1.0) # conf interval is [1, 1] when k = n
result = funcs.binom_conf_interval(
500, 1000, confidence_level=0.68269, interval="wald"
)
assert_allclose(result[0], 0.5 - 0.5 / np.sqrt(1000.0))
assert_allclose(result[1], 0.5 + 0.5 / np.sqrt(1000.0))
# Test shapes
k = 3
n = 7
for interval in ["wald", "wilson", "jeffreys", "flat"]:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
k = np.array(k)
for interval in ["wald", "wilson", "jeffreys", "flat"]:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
n = np.array(n)
for interval in ["wald", "wilson", "jeffreys", "flat"]:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
k = np.array([1, 3, 5])
for interval in ["wald", "wilson", "jeffreys", "flat"]:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2, 3)
n = np.array([5, 5, 5])
for interval in ["wald", "wilson", "jeffreys", "flat"]:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2, 3)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_binned_binom_proportion():
# Check that it works.
nbins = 20
x = np.linspace(0.0, 10.0, 100) # Guarantee an `x` in every bin.
success = np.ones(len(x), dtype=bool)
bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success, bins=nbins)
# Check shape of outputs
assert bin_ctr.shape == (nbins,)
assert bin_hw.shape == (nbins,)
assert p.shape == (nbins,)
assert perr.shape == (2, nbins)
# Check that p is 1 in all bins, since success = True for all `x`.
assert (p == 1.0).all()
# Check that p is 0 in all bins if success = False for all `x`.
success[:] = False
bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success, bins=nbins)
assert (p == 0.0).all()
def test_binned_binom_proportion_exception():
with pytest.raises(ValueError):
funcs.binned_binom_proportion([0], [1, 2], confidence_level=0.75)
def test_signal_to_noise_oir_ccd():
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 0, 1)
assert result == 5.0
# check to make sure gain works
result = funcs.signal_to_noise_oir_ccd(1, 5, 0, 0, 0, 1, 5)
assert result == 5.0
# now add in sky, dark current, and read noise
# make sure the snr goes down
result = funcs.signal_to_noise_oir_ccd(1, 25, 1, 0, 0, 1)
assert result < 5.0
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 1, 0, 1)
assert result < 5.0
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 1, 1)
assert result < 5.0
# make sure snr increases with time
result = funcs.signal_to_noise_oir_ccd(2, 25, 0, 0, 0, 1)
assert result > 5.0
def test_bootstrap():
bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
# test general bootstrapping
answer = np.array([[7, 4, 8, 5, 7, 0, 3, 7, 8, 5], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]])
with NumpyRNGContext(42):
assert_equal(answer, funcs.bootstrap(bootarr, 2))
# test with a bootfunction
with NumpyRNGContext(42):
bootresult = np.mean(funcs.bootstrap(bootarr, 10000, bootfunc=np.mean))
assert_allclose(np.mean(bootarr), bootresult, atol=0.01)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_bootstrap_multiple_outputs():
from scipy.stats import spearmanr
# test a bootfunc with several output values
# return just bootstrapping with one output from bootfunc
with NumpyRNGContext(42):
bootarr = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]
).T
answer = np.array((0.19425, 0.02094))
def bootfunc(x):
return spearmanr(x)[0]
bootresult = funcs.bootstrap(bootarr, 2, bootfunc=bootfunc)
assert_allclose(answer, bootresult, atol=1e-3)
# test a bootfunc with several output values
# return just bootstrapping with the second output from bootfunc
with NumpyRNGContext(42):
bootarr = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], [4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]
).T
answer = np.array((0.5907, 0.9541))
def bootfunc(x):
return spearmanr(x)[1]
bootresult = funcs.bootstrap(bootarr, 2, bootfunc=bootfunc)
assert_allclose(answer, bootresult, atol=1e-3)
# return just bootstrapping with two outputs from bootfunc
with NumpyRNGContext(42):
answer = np.array(((0.1942, 0.5907), (0.0209, 0.9541), (0.4286, 0.2165)))
def bootfunc(x):
return spearmanr(x)
bootresult = funcs.bootstrap(bootarr, 3, bootfunc=bootfunc)
assert bootresult.shape == (3, 2)
assert_allclose(answer, bootresult, atol=1e-3)
def test_mad_std():
with NumpyRNGContext(12345):
data = np.random.normal(5, 2, size=(100, 100))
assert_allclose(funcs.mad_std(data), 2.0, rtol=0.05)
def test_mad_std_scalar_return():
with NumpyRNGContext(12345):
data = np.random.normal(5, 2, size=(10, 10))
# make a masked array with no masked points
data = np.ma.masked_where(np.isnan(data), data)
rslt = funcs.mad_std(data)
# want a scalar result, NOT a masked array
assert np.isscalar(rslt)
data[5, 5] = np.nan
rslt = funcs.mad_std(data, ignore_nan=True)
assert np.isscalar(rslt)
rslt = funcs.mad_std(data)
assert np.isscalar(rslt)
assert np.isnan(rslt)
def test_mad_std_warns():
with NumpyRNGContext(12345):
data = np.random.normal(5, 2, size=(10, 10))
data[5, 5] = np.nan
rslt = funcs.mad_std(data, ignore_nan=False)
assert np.isnan(rslt)
@pytest.mark.filterwarnings("ignore:Invalid value encountered in median")
def test_mad_std_withnan():
with NumpyRNGContext(12345):
data = np.empty([102, 102])
data[:] = np.nan
data[1:-1, 1:-1] = np.random.normal(5, 2, size=(100, 100))
assert_allclose(funcs.mad_std(data, ignore_nan=True), 2.0, rtol=0.05)
assert np.isnan(funcs.mad_std([1, 2, 3, 4, 5, np.nan]))
assert_allclose(
funcs.mad_std([1, 2, 3, 4, 5, np.nan], ignore_nan=True), 1.482602218505602
)
def test_mad_std_with_axis():
data = np.array([[1, 2, 3, 4], [4, 3, 2, 1]])
# results follow data symmetry
result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111, 2.22390333])
result_axis1 = np.array([1.48260222, 1.48260222])
assert_allclose(funcs.mad_std(data, axis=0), result_axis0)
assert_allclose(funcs.mad_std(data, axis=1), result_axis1)
def test_mad_std_with_axis_and_nan():
data = np.array([[1, 2, 3, 4, np.nan], [4, 3, 2, 1, np.nan]])
# results follow data symmetry
result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111, 2.22390333, np.nan])
result_axis1 = np.array([1.48260222, 1.48260222])
with pytest.warns(RuntimeWarning, match=r"All-NaN slice encountered"):
assert_allclose(funcs.mad_std(data, axis=0, ignore_nan=True), result_axis0)
assert_allclose(funcs.mad_std(data, axis=1, ignore_nan=True), result_axis1)
def test_mad_std_with_axis_and_nan_array_type():
# mad_std should return a masked array if given one, and not otherwise
data = np.array([[1, 2, 3, 4, np.nan], [4, 3, 2, 1, np.nan]])
with pytest.warns(RuntimeWarning, match=r"All-NaN slice encountered"):
result = funcs.mad_std(data, axis=0, ignore_nan=True)
assert not np.ma.isMaskedArray(result)
data = np.ma.masked_where(np.isnan(data), data)
result = funcs.mad_std(data, axis=0, ignore_nan=True)
assert np.ma.isMaskedArray(result)
def test_gaussian_fwhm_to_sigma():
fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))
assert_allclose(funcs.gaussian_fwhm_to_sigma * fwhm, 1.0, rtol=1.0e-6)
def test_gaussian_sigma_to_fwhm():
sigma = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0)))
assert_allclose(funcs.gaussian_sigma_to_fwhm * sigma, 1.0, rtol=1.0e-6)
def test_gaussian_sigma_to_fwhm_to_sigma():
assert_allclose(funcs.gaussian_fwhm_to_sigma * funcs.gaussian_sigma_to_fwhm, 1.0)
def test_poisson_conf_interval_rootn():
assert_allclose(funcs.poisson_conf_interval(16, interval="root-n"), (12, 20))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize(
"interval", ["root-n-0", "pearson", "sherpagehrels", "frequentist-confidence"]
)
def test_poisson_conf_large(interval):
n = 100
assert_allclose(
funcs.poisson_conf_interval(n, interval="root-n"),
funcs.poisson_conf_interval(n, interval=interval),
rtol=2e-2,
)
def test_poisson_conf_array_rootn0_zero():
n = np.zeros((3, 4, 5))
assert_allclose(
funcs.poisson_conf_interval(n, interval="root-n-0"),
funcs.poisson_conf_interval(n[0, 0, 0], interval="root-n-0")[
:, None, None, None
]
* np.ones_like(n),
)
assert not np.any(np.isnan(funcs.poisson_conf_interval(n, interval="root-n-0")))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_poisson_conf_array_frequentist_confidence_zero():
n = np.zeros((3, 4, 5))
assert_allclose(
funcs.poisson_conf_interval(n, interval="frequentist-confidence"),
funcs.poisson_conf_interval(n[0, 0, 0], interval="frequentist-confidence")[
:, None, None, None
]
* np.ones_like(n),
)
assert not np.any(np.isnan(funcs.poisson_conf_interval(n, interval="root-n-0")))
def test_poisson_conf_list_rootn0_zero():
n = [0, 0, 0]
assert_allclose(
funcs.poisson_conf_interval(n, interval="root-n-0"), [[0, 0, 0], [1, 1, 1]]
)
assert not np.any(np.isnan(funcs.poisson_conf_interval(n, interval="root-n-0")))
def test_poisson_conf_array_rootn0():
n = 7 * np.ones((3, 4, 5))
assert_allclose(
funcs.poisson_conf_interval(n, interval="root-n-0"),
funcs.poisson_conf_interval(n[0, 0, 0], interval="root-n-0")[
:, None, None, None
]
* np.ones_like(n),
)
n[1, 2, 3] = 0
assert not np.any(np.isnan(funcs.poisson_conf_interval(n, interval="root-n-0")))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_poisson_conf_array_fc():
n = 7 * np.ones((3, 4, 5))
assert_allclose(
funcs.poisson_conf_interval(n, interval="frequentist-confidence"),
funcs.poisson_conf_interval(n[0, 0, 0], interval="frequentist-confidence")[
:, None, None, None
]
* np.ones_like(n),
)
n[1, 2, 3] = 0
assert not np.any(
np.isnan(funcs.poisson_conf_interval(n, interval="frequentist-confidence"))
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_poisson_conf_frequentist_confidence_gehrels():
"""Test intervals against those published in Gehrels 1986"""
nlh = np.array(
[
(0, 0, 1.841),
(1, 0.173, 3.300),
(2, 0.708, 4.638),
(3, 1.367, 5.918),
(4, 2.086, 7.163),
(5, 2.840, 8.382),
(6, 3.620, 9.584),
(7, 4.419, 10.77),
(8, 5.232, 11.95),
(9, 6.057, 13.11),
(10, 6.891, 14.27),
]
)
assert_allclose(
funcs.poisson_conf_interval(nlh[:, 0], interval="frequentist-confidence"),
nlh[:, 1:].T,
rtol=0.001,
atol=0.001,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_poisson_conf_frequentist_confidence_gehrels_2sigma():
"""Test intervals against those published in Gehrels 1986
Note: I think there's a typo (transposition of digits) in Gehrels 1986,
specifically for the two-sigma lower limit for 3 events; they claim
0.569 but this function returns 0.59623...
"""
nlh = np.array(
[
(0, 2, 0, 3.783),
(1, 2, 2.30e-2, 5.683),
(2, 2, 0.230, 7.348),
(3, 2, 0.596, 8.902),
(4, 2, 1.058, 10.39),
(5, 2, 1.583, 11.82),
(6, 2, 2.153, 13.22),
(7, 2, 2.758, 14.59),
(8, 2, 3.391, 15.94),
(9, 2, 4.046, 17.27),
(10, 2, 4.719, 18.58),
]
)
assert_allclose(
funcs.poisson_conf_interval(
nlh[:, 0], sigma=2, interval="frequentist-confidence"
).T,
nlh[:, 2:],
rtol=0.01,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_poisson_conf_frequentist_confidence_gehrels_3sigma():
"""Test intervals against those published in Gehrels 1986"""
nlh = np.array(
[
(0, 3, 0, 6.608),
(1, 3, 1.35e-3, 8.900),
(2, 3, 5.29e-2, 10.87),
(3, 3, 0.212, 12.68),
(4, 3, 0.465, 14.39),
(5, 3, 0.792, 16.03),
(6, 3, 1.175, 17.62),
(7, 3, 1.603, 19.17),
(8, 3, 2.068, 20.69),
(9, 3, 2.563, 22.18),
(10, 3, 3.084, 23.64),
]
)
assert_allclose(
funcs.poisson_conf_interval(
nlh[:, 0], sigma=3, interval="frequentist-confidence"
).T,
nlh[:, 2:],
rtol=0.01,
verbose=True,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("n", [0, 1, 2, 3, 10, 20, 100])
def test_poisson_conf_gehrels86(n):
assert_allclose(
funcs.poisson_conf_interval(n, interval="sherpagehrels")[1],
funcs.poisson_conf_interval(n, interval="frequentist-confidence")[1],
rtol=0.02,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_scipy_poisson_limit():
"""Test that the lower-level routine gives the snae number.
Test numbers are from table1 1, 3 in
Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_
"""
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, 2.5, 0.99), (0, 10.67), rtol=1e-3
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(np.int32(5.0), 2.5, 0.99),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(np.int64(5.0), 2.5, 0.99),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, np.float32(2.5), 0.99),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, np.float64(2.5), 0.99),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, 2.5, np.float32(0.99)),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, 2.5, np.float64(0.99)),
(0, 10.67),
rtol=1e-3,
)
conf = funcs.poisson_conf_interval(
[5, 6],
"kraft-burrows-nousek",
background=[2.5, 2.0],
confidence_level=[0.99, 0.9],
)
assert_allclose(conf[:, 0], (0, 10.67), rtol=1e-3)
assert_allclose(conf[:, 1], (0.81, 8.99), rtol=5e-3)
@pytest.mark.skipif(not HAS_MPMATH, reason="requires mpmath")
def test_mpmath_poisson_limit():
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(1.0, 0.1, 0.99), (0.00, 6.54), rtol=5e-3
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(1.0, 0.5, 0.95), (0.00, 4.36), rtol=5e-3
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(5.0, 0.0, 0.99), (1.17, 13.32), rtol=5e-3
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(5.0, 2.5, 0.99), (0, 10.67), rtol=1e-3
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(np.int32(6), 2.0, 0.9),
(0.81, 8.99),
rtol=5e-3,
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(np.int64(6), 2.0, 0.9),
(0.81, 8.99),
rtol=5e-3,
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(6.0, np.float32(2.0), 0.9),
(0.81, 8.99),
rtol=5e-3,
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(6.0, np.float64(2.0), 0.9),
(0.81, 8.99),
rtol=5e-3,
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(6.0, 2.0, np.float32(0.9)),
(0.81, 8.99),
rtol=5e-3,
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(6.0, 2.0, np.float64(0.9)),
(0.81, 8.99),
rtol=5e-3,
)
assert_allclose(
funcs._mpmath_kraft_burrows_nousek(5.0, 2.5, 0.99), (0, 10.67), rtol=1e-3
)
assert_allclose(
funcs.poisson_conf_interval(
n=160,
background=154.543,
confidence_level=0.95,
interval="kraft-burrows-nousek",
)[:, 0],
(0, 30.30454909),
)
# For this one we do not have the "true" answer from the publication,
# but we want to make sure that it at least runs without error
# see https://github.com/astropy/astropy/issues/9596
_ = funcs._mpmath_kraft_burrows_nousek(1000.0, 900.0, 0.9)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_poisson_conf_value_errors():
with pytest.raises(ValueError, match="Only sigma=1 supported"):
funcs.poisson_conf_interval([5, 6], "root-n", sigma=2)
with pytest.raises(ValueError, match="background not supported"):
funcs.poisson_conf_interval([5, 6], "pearson", background=[2.5, 2.0])
with pytest.raises(ValueError, match="confidence_level not supported"):
funcs.poisson_conf_interval(
[5, 6], "sherpagehrels", confidence_level=[2.5, 2.0]
)
with pytest.raises(ValueError, match="Invalid method"):
funcs.poisson_conf_interval(1, "foo")
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_poisson_conf_kbn_value_errors():
with pytest.raises(ValueError, match="number between 0 and 1"):
funcs.poisson_conf_interval(
5, "kraft-burrows-nousek", background=2.5, confidence_level=99
)
with pytest.raises(ValueError, match="Set confidence_level for method"):
funcs.poisson_conf_interval(5, "kraft-burrows-nousek", background=2.5)
with pytest.raises(ValueError, match="Background must be"):
funcs.poisson_conf_interval(
5, "kraft-burrows-nousek", background=-2.5, confidence_level=0.99
)
with pytest.raises(TypeError, match="Number of counts must be integer"):
funcs.poisson_conf_interval(
5.0, "kraft-burrows-nousek", background=2.5, confidence_level=0.99
)
with pytest.raises(TypeError, match="Number of counts must be integer"):
funcs.poisson_conf_interval(
[5.0, 6.0],
"kraft-burrows-nousek",
background=[2.5, 2.0],
confidence_level=[0.99, 0.9],
)
@pytest.mark.skipif(HAS_SCIPY or HAS_MPMATH, reason="requires neither scipy nor mpmath")
def test_poisson_limit_nodependencies():
with pytest.raises(ImportError):
funcs.poisson_conf_interval(
20, interval="kraft-burrows-nousek", background=10.0, confidence_level=0.95
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("N", [10, 100, 1000, 10000])
def test_uniform(N):
with NumpyRNGContext(12345):
assert funcs.kuiper(np.random.random(N))[1] > 0.01
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize(
"N,M", [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)]
)
def test_kuiper_two_uniform(N, M):
with NumpyRNGContext(12345):
assert funcs.kuiper_two(np.random.random(N), np.random.random(M))[1] > 0.01
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize(
"N,M", [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)]
)
def test_kuiper_two_nonuniform(N, M):
with NumpyRNGContext(12345):
assert (
funcs.kuiper_two(np.random.random(N) ** 2, np.random.random(M) ** 2)[1]
> 0.01
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_detect_kuiper_two_different():
with NumpyRNGContext(12345):
D, f = funcs.kuiper_two(np.random.random(500) * 0.5, np.random.random(500))
assert f < 0.01
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize(
"N,M", [(100, 100), (20, 100), (100, 20), (10, 20), (5, 5), (1000, 100)]
)
def test_fpp_kuiper_two(N, M):
from scipy.stats import binom
with NumpyRNGContext(12345):
R = 100
fpp = 0.05
fps = 0
for i in range(R):
D, f = funcs.kuiper_two(np.random.random(N), np.random.random(M))
if f < fpp:
fps += 1
assert binom(R, fpp).sf(fps - 1) > 0.005
assert binom(R, fpp).cdf(fps - 1) > 0.005
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_kuiper_false_positive_probability():
fpp = funcs.kuiper_false_positive_probability(0.5353333333333409, 1500.0)
assert fpp == 0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_histogram():
from scipy.stats import chi2
with NumpyRNGContext(1234):
a, b = 0.3, 3.14
s = np.random.uniform(a, b, 10000) % 1
b, w = funcs.fold_intervals([(a, b, 1.0 / (b - a))])
h = funcs.histogram_intervals(16, b, w)
nn, bb = np.histogram(s, bins=len(h), range=(0, 1))
uu = np.sqrt(nn)
nn, uu = len(h) * nn / h / len(s), len(h) * uu / h / len(s)
c2 = np.sum(((nn - 1) / uu) ** 2)
assert chi2(len(h)).cdf(c2) > 0.01
assert chi2(len(h)).sf(c2) > 0.01
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize(
"ii,rr",
[
((4, (0, 1), (1,)), (1, 1, 1, 1)),
((2, (0, 1), (1,)), (1, 1)),
((4, (0, 0.5, 1), (1, 1)), (1, 1, 1, 1)),
((4, (0, 0.5, 1), (1, 2)), (1, 1, 2, 2)),
((3, (0, 0.5, 1), (1, 2)), (1, 1.5, 2)),
],
)
def test_histogram_intervals_known(ii, rr):
with NumpyRNGContext(1234):
assert_allclose(funcs.histogram_intervals(*ii), rr)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize(
"N,m,p",
[
pytest.param(100, 10000, 0.01, marks=pytest.mark.skip("Test too slow")),
pytest.param(300, 10000, 0.001, marks=pytest.mark.skip("Test too slow")),
(10, 10000, 0.001),
(3, 10000, 0.001),
],
)
def test_uniform_binomial(N, m, p):
"""Check that the false positive probability is right
In particular, run m trials with N uniformly-distributed photons
and check that the number of false positives is consistent with
a binomial distribution. The more trials, the tighter the bounds
but the longer the runtime.
"""
from scipy.stats import binom
with NumpyRNGContext(1234):
fpps = np.array([funcs.kuiper(np.random.random(N))[1] for i in range(m)])
assert (fpps >= 0).all()
assert (fpps <= 1).all()
low = binom(n=m, p=p).ppf(0.01)
high = binom(n=m, p=p).ppf(0.99)
assert low < sum(fpps < p) < high
|
4951c63a14f55c56e028ae03123a542245a9974529a8042ee9a2a2c22c0701dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import coordinates, time
from astropy import units as u
from astropy.table import Column, NdarrayMixin, QTable, Table, table_helpers, unique
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyUserWarning
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def test_column_group_by(T1):
for masked in (False, True):
t1 = QTable(T1, masked=masked)
t1a = t1["a"].copy()
# Group by a Column (i.e. numpy array)
t1ag = t1a.group_by(t1["a"])
assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8]))
# Group by a Table
t1ag = t1a.group_by(t1["a", "b"])
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Group by a numpy structured array
t1ag = t1a.group_by(t1["a", "b"].as_array())
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
def test_table_group_by(T1):
"""
Test basic table group_by functionality for possible key types and for
masked/unmasked tables.
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# Group by a single column key specified by name
tg = t1.group_by("a")
assert np.all(tg.groups.indices == np.array([0, 1, 4, 8]))
assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>"
assert str(tg["a"].groups) == "<ColumnGroups indices=[0 1 4 8]>"
# Sorted by 'a' and in original order for rest
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a 0.0 4 4.0",
" 1 b 3.0 5 5.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 c 7.0 0 0.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 a 4.0 3 3.0",
]
assert tg.meta["ta"] == 1
assert tg["c"].meta["a"] == 1
assert tg["c"].description == "column c"
# Group by a table column
tg2 = t1.group_by(t1["a"])
assert tg.pformat() == tg2.pformat()
# Group by two columns spec'd by name
for keys in (["a", "b"], ("a", "b")):
tg = t1.group_by(keys)
assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Sorted by 'a', 'b' and in original order for rest
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a 0.0 4 4.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 1 b 3.0 5 5.0",
" 2 a 4.0 3 3.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 c 7.0 0 0.0",
]
# Group by a Table
tg2 = t1.group_by(t1["a", "b"])
assert tg.pformat() == tg2.pformat()
# Group by a structured array
tg2 = t1.group_by(t1["a", "b"].as_array())
assert tg.pformat() == tg2.pformat()
# Group by a simple ndarray
tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0]))
assert np.all(tg.groups.indices == np.array([0, 4, 7, 8]))
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 2 c 7.0 0 0.0",
" 2 b 6.0 2 2.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 b 5.0 1 1.0",
" 2 a 4.0 3 3.0",
" 1 b 3.0 5 5.0",
" 0 a 0.0 4 4.0",
]
def test_groups_keys(T1):
tg = T1.group_by("a")
keys = tg.groups.keys
assert keys.dtype.names == ("a",)
assert np.all(keys["a"] == np.array([0, 1, 2]))
tg = T1.group_by(["a", "b"])
keys = tg.groups.keys
assert keys.dtype.names == ("a", "b")
assert np.all(keys["a"] == np.array([0, 1, 1, 2, 2, 2]))
assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"]))
# Grouping by Column ignores column name
tg = T1.group_by(T1["b"])
keys = tg.groups.keys
assert keys.dtype.names is None
def test_groups_iterator(T1):
tg = T1.group_by("a")
for ii, group in enumerate(tg.groups):
assert group.pformat() == tg.groups[ii].pformat()
assert group["a"][0] == tg["a"][tg.groups.indices[ii]]
def test_grouped_copy(T1):
"""
Test that copying a table or column copies the groups properly
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
tg = t1.group_by("a")
tgc = tg.copy()
assert np.all(tgc.groups.indices == tg.groups.indices)
assert np.all(tgc.groups.keys == tg.groups.keys)
tac = tg["a"].copy()
assert np.all(tac.groups.indices == tg["a"].groups.indices)
c1 = t1["a"].copy()
gc1 = c1.group_by(t1["a"])
gc1c = gc1.copy()
assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8]))
def test_grouped_slicing(T1):
"""
Test that slicing a table removes previous grouping
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by("a")
tg2 = tg[3:5]
assert np.all(tg2.groups.indices == np.array([0, len(tg2)]))
assert tg2.groups.keys is None
def test_group_column_from_table(T1):
"""
Group a column that is part of a table
"""
cg = T1["c"].group_by(np.array(T1["a"]))
assert np.all(cg.groups.keys == np.array([0, 1, 2]))
assert np.all(cg.groups.indices == np.array([0, 1, 4, 8]))
def test_table_groups_mask_index(T1):
"""
Use boolean mask as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
t2 = t1.groups[np.array([True, False, True])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_table_groups_array_index(T1):
"""
Use numpy array as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
t2 = t1.groups[np.array([0, 2])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_table_groups_slicing(T1):
"""
Test that slicing table groups works
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
# slice(0, 2)
t2 = t1.groups[0:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 1]))
# slice(1, 2)
t2 = t1.groups[1:2]
assert len(t2.groups) == 1
assert t2.groups[0].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys["a"] == np.array([1]))
# slice(0, 3, 2)
t2 = t1.groups[0:3:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_grouped_item_access(T1):
"""
Test that column slicing preserves grouping
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by("a")
tgs = tg["a", "c", "d"]
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
tgs = tg["c", "d"]
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [
" c d ",
"---- ---",
" 0.0 4",
" 6.0 18",
"22.0 6",
]
def test_mutable_operations(T1):
"""
Operations like adding or deleting a row should removing grouping,
but adding or removing or renaming a column should retain grouping.
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# add row
tg = t1.group_by("a")
tg.add_row((0, "a", 3.0, 4, 4 * u.m))
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# remove row
tg = t1.group_by("a")
tg.remove_row(4)
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# add column
tg = t1.group_by("a")
indices = tg.groups.indices.copy()
tg.add_column(Column(name="e", data=np.arange(len(tg))))
assert np.all(tg.groups.indices == indices)
assert np.all(tg["e"].groups.indices == indices)
assert np.all(tg["e"].groups.keys == tg.groups.keys)
# remove column (not key column)
tg = t1.group_by("a")
tg.remove_column("b")
assert np.all(tg.groups.indices == indices)
# Still has original key col names
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["a"].groups.indices == indices)
# remove key column
tg = t1.group_by("a")
tg.remove_column("a")
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["b"].groups.indices == indices)
# rename key column
tg = t1.group_by("a")
tg.rename_column("a", "aa")
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["aa"].groups.indices == indices)
def test_group_by_masked(T1):
t1m = QTable(T1, masked=True)
t1m["c"].mask[4] = True
t1m["d"].mask[5] = True
assert t1m.group_by("a").pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a -- 4 4.0",
" 1 b 3.0 -- 5.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 c 7.0 0 0.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 a 4.0 3 3.0",
]
def test_group_by_errors(T1):
"""
Appropriate errors get raised.
"""
# Bad column name as string
with pytest.raises(ValueError):
T1.group_by("f")
# Bad column names in list
with pytest.raises(ValueError):
T1.group_by(["f", "g"])
# Wrong length array
with pytest.raises(ValueError):
T1.group_by(np.array([1, 2]))
# Wrong type
with pytest.raises(TypeError):
T1.group_by(None)
# Masked key column
t1 = QTable(T1, masked=True)
t1["a"].mask[4] = True
with pytest.raises(ValueError):
t1.group_by("a")
def test_groups_keys_meta(T1):
"""
Make sure the keys meta['grouped_by_table_cols'] is working.
"""
# Group by column in this table
tg = T1.group_by("a")
assert tg.groups.keys.meta["grouped_by_table_cols"] is True
assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is True
assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is True
assert (
tg["d"]
.groups[np.array([False, True, True])]
.groups.keys.meta["grouped_by_table_cols"]
is True
)
# Group by external Table
tg = T1.group_by(T1["a", "b"])
assert tg.groups.keys.meta["grouped_by_table_cols"] is False
assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is False
assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is False
# Group by external numpy array
tg = T1.group_by(T1["a", "b"].as_array())
assert not hasattr(tg.groups.keys, "meta")
assert not hasattr(tg["c"].groups.keys, "meta")
# Group by Column
tg = T1.group_by(T1["a"])
assert "grouped_by_table_cols" not in tg.groups.keys.meta
assert "grouped_by_table_cols" not in tg["c"].groups.keys.meta
def test_table_aggregate(T1):
"""
Aggregate a table
"""
# Table with only summable cols
t1 = T1["a", "c", "d"]
tg = t1.group_by("a")
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
# Reverts to default groups
assert np.all(tga.groups.indices == np.array([0, 3]))
assert tga.groups.keys is None
# metadata survives
assert tga.meta["ta"] == 1
assert tga["c"].meta["a"] == 1
assert tga["c"].description == "column c"
# Aggregate with np.sum with masked elements. This results
# in one group with no elements, hence a nan result and conversion
# to float for the 'd' column.
t1m = QTable(T1, masked=True)
t1m["c"].mask[4:6] = True
t1m["d"].mask[4:6] = True
tg = t1m.group_by("a")
with pytest.warns(UserWarning, match="converting a masked element to nan"):
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d q ",
" m ",
"--- ---- ---- ----",
" 0 nan nan 4.0",
" 1 3.0 13.0 18.0",
" 2 22.0 6.0 6.0",
]
# Aggregate with np.sum with masked elements, but where every
# group has at least one remaining (unmasked) element. Then
# the int column stays as an int.
t1m = QTable(t1, masked=True)
t1m["c"].mask[5] = True
t1m["d"].mask[5] = True
tg = t1m.group_by("a")
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 3.0 13",
" 2 22.0 6",
]
# Aggregate with a column type that cannot by supplied to the aggregating
# function. This raises a warning but still works.
tg = T1.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"):
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d q ",
" m ",
"--- ---- --- ----",
" 0 0.0 4 4.0",
" 1 6.0 18 18.0",
" 2 22.0 6 6.0",
]
def test_table_aggregate_reduceat(T1):
"""
Aggregate table with functions which have a reduceat method
"""
# Comparison functions without reduceat
def np_mean(x):
return np.mean(x)
def np_sum(x):
return np.sum(x)
def np_add(x):
return np.add(x)
# Table with only summable cols
t1 = T1["a", "c", "d"]
tg = t1.group_by("a")
# Comparison
tga_r = tg.groups.aggregate(np.sum)
tga_a = tg.groups.aggregate(np.add)
tga_n = tg.groups.aggregate(np_sum)
assert np.all(tga_r == tga_n)
assert np.all(tga_a == tga_n)
assert tga_n.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
tga_r = tg.groups.aggregate(np.mean)
tga_n = tg.groups.aggregate(np_mean)
assert np.all(tga_r == tga_n)
assert tga_n.pformat() == [
" a c d ",
"--- --- ---",
" 0 0.0 4.0",
" 1 2.0 6.0",
" 2 5.5 1.5",
]
# Binary ufunc np_add should raise warning without reduceat
t2 = T1["a", "c"]
tg = t2.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"):
tga = tg.groups.aggregate(np_add)
assert tga.pformat() == [" a ", "---", " 0", " 1", " 2"]
def test_column_aggregate(T1):
"""
Aggregate a single table column
"""
for masked in (False, True):
tg = QTable(T1, masked=masked).group_by("a")
tga = tg["c"].groups.aggregate(np.sum)
assert tga.pformat() == [" c ", "----", " 0.0", " 6.0", "22.0"]
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1,
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_column_aggregate_f8():
"""https://github.com/astropy/astropy/issues/12706"""
# Just want to make sure it does not crash again.
for masked in (False, True):
tg = Table({"a": np.arange(2, dtype=">f8")}, masked=masked).group_by("a")
tga = tg["a"].groups.aggregate(np.sum)
assert tga.pformat() == [" a ", "---", "0.0", "1.0"]
def test_table_filter():
"""
Table groups filtering
"""
def all_positive(table, key_colnames):
return all(
np.all(table[colname] >= 0)
for colname in table.colnames
if colname not in key_colnames
)
# Negative value in 'a' column should not filter because it is a key col
t = Table.read(
[
" a c d",
" -2 7.0 0",
" -2 5.0 1",
" 0 0.0 4",
" 1 3.0 5",
" 1 2.0 -6",
" 1 1.0 7",
" 3 3.0 5",
" 3 -2.0 6",
" 3 1.0 7",
],
format="ascii",
)
tg = t.group_by("a")
t2 = tg.groups.filter(all_positive)
assert t2.groups[0].pformat() == [
" a c d ",
"--- --- ---",
" -2 7.0 0",
" -2 5.0 1",
]
assert t2.groups[1].pformat() == [" a c d ", "--- --- ---", " 0 0.0 4"]
def test_column_filter():
"""
Table groups filtering
"""
def all_positive(column):
if np.any(column < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read(
[
" a c d",
" -2 7.0 0",
" -2 5.0 1",
" 0 0.0 4",
" 1 3.0 5",
" 1 2.0 -6",
" 1 1.0 7",
" 3 3.0 5",
" 3 -2.0 6",
" 3 1.0 7",
],
format="ascii",
)
tg = t.group_by("a")
c2 = tg["c"].groups.filter(all_positive)
assert len(c2.groups) == 3
assert c2.groups[0].pformat() == [" c ", "---", "7.0", "5.0"]
assert c2.groups[1].pformat() == [" c ", "---", "0.0"]
assert c2.groups[2].pformat() == [" c ", "---", "3.0", "2.0", "1.0"]
def test_group_mixins():
"""
Test grouping a table with mixin columns
"""
# Setup mixins
idx = np.arange(4)
x = np.array([3.0, 1.0, 2.0, 1.0])
q = x * u.m
lon = coordinates.Longitude(x * u.deg)
lat = coordinates.Latitude(x * u.deg)
# For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision)
tm = time.Time(2000, format="jyear") + time.TimeDelta(x * 1e-10, format="sec")
sc = coordinates.SkyCoord(ra=lon, dec=lat)
aw = table_helpers.ArrayWrapper(x)
nd = np.array([(3, "c"), (1, "a"), (2, "b"), (1, "a")], dtype="<i4,|S1").view(
NdarrayMixin
)
qt = QTable(
[idx, x, q, lon, lat, tm, sc, aw, nd],
names=["idx", "x", "q", "lon", "lat", "tm", "sc", "aw", "nd"],
)
# Test group_by with each supported mixin type
mixin_keys = ["x", "q", "lon", "lat", "tm", "sc", "aw", "nd"]
for key in mixin_keys:
qtg = qt.group_by(key)
# Test that it got the sort order correct
assert np.all(qtg["idx"] == [1, 3, 2, 0])
# Test that the groups are right
# Note: skip testing SkyCoord column because that doesn't have equality
for name in ["x", "q", "lon", "lat", "tm", "aw", "nd"]:
assert np.all(qt[name][[1, 3]] == qtg.groups[0][name])
assert np.all(qt[name][[2]] == qtg.groups[1][name])
assert np.all(qt[name][[0]] == qtg.groups[2][name])
# Test that unique also works with mixins since most of the work is
# done with group_by(). This is using *every* mixin as key.
uqt = unique(qt, keys=mixin_keys)
assert len(uqt) == 3
assert np.all(uqt["idx"] == [1, 2, 0])
assert np.all(uqt["x"] == [1.0, 2.0, 3.0])
# Column group_by() with mixins
idxg = qt["idx"].group_by(qt[mixin_keys])
assert np.all(idxg == [1, 3, 2, 0])
@pytest.mark.parametrize(
"col",
[
time.TimeDelta([1, 2], format="sec"),
time.Time([1, 2], format="cxcsec"),
coordinates.SkyCoord([1, 2], [3, 4], unit="deg,deg"),
],
)
def test_group_mixins_unsupported(col):
"""Test that aggregating unsupported mixins produces a warning only"""
t = Table([[1, 1], [3, 4], col], names=["a", "b", "mix"])
tg = t.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column 'mix'"):
tg.groups.aggregate(np.sum)
|
aa221e7801fae23aa46f638cccf412851ca5a35e7d2413e1ab4e5e26f38e5b72 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides functions to help with testing against iraf tasks.
"""
import numpy as np
from astropy.logger import log
iraf_models_map = {1.0: "Chebyshev", 2.0: "Legendre", 3.0: "Spline3", 4.0: "Spline1"}
def get_records(fname):
"""
Read the records of an IRAF database file into a python list.
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
A list of records
"""
f = open(fname)
dtb = f.read()
f.close()
recs = dtb.split("begin")[1:]
records = [Record(r) for r in recs]
return records
def get_database_string(fname):
"""
Read an IRAF database file.
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
the database file as a string
"""
f = open(fname)
dtb = f.read()
f.close()
return dtb
class Record:
"""
A base class for all records - represents an IRAF database record.
Attributes
----------
recstr: string
the record as a string
fields: dict
the fields in the record
taskname: string
the name of the task which created the database file
"""
def __init__(self, recstr):
self.recstr = recstr
self.fields = self.get_fields()
self.taskname = self.get_task_name()
def aslist(self):
reclist = self.recstr.split("\n")
reclist = [entry.strip() for entry in reclist]
[reclist.remove(entry) for entry in reclist if len(entry) == 0]
return reclist
def get_fields(self):
# read record fields as an array
fields = {}
flist = self.aslist()
numfields = len(flist)
for i in range(numfields):
line = flist[i]
if line and line[0].isalpha():
field = line.split()
if i + 1 < numfields:
if not flist[i + 1][0].isalpha():
fields[field[0]] = self.read_array_field(
flist[i : i + int(field[1]) + 1]
)
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
continue
return fields
def get_task_name(self):
try:
return self.fields["task"]
except KeyError:
return None
def read_array_field(self, fieldlist):
# Turn an iraf record array field into a numpy array
fieldline = [entry.split() for entry in fieldlist[1:]]
# take only the first 3 columns
# identify writes also strings at the end of some field lines
xyz = [entry[:3] for entry in fieldline]
try:
farr = np.array(xyz)
except Exception:
log.debug(f"Could not read array field {fieldlist[0].split()[0]}")
return farr.astype(np.float64)
class IdentifyRecord(Record):
"""
Represents a database record for the onedspec.identify task.
Attributes
----------
x: array
the X values of the identified features
this represents values on axis1 (image rows)
y: int
the Y values of the identified features
(image columns)
z: array
the values which X maps into
modelname: string
the function used to fit the data
nterms: int
degree of the polynomial which was fit to the data
in IRAF this is the number of coefficients, not the order
mrange: list
the range of the data
coeff: array
function (modelname) coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._flatcoeff = self.fields["coefficients"].flatten()
self.x = self.fields["features"][:, 0]
self.y = self.get_ydata()
self.z = self.fields["features"][:, 1]
self.modelname = self.get_model_name()
self.nterms = self.get_nterms()
self.mrange = self.get_range()
self.coeff = self.get_coeff()
def get_model_name(self):
return iraf_models_map[self._flatcoeff[0]]
def get_nterms(self):
return self._flatcoeff[1]
def get_range(self):
low = self._flatcoeff[2]
high = self._flatcoeff[3]
return [low, high]
def get_coeff(self):
return self._flatcoeff[4:]
def get_ydata(self):
image = self.fields["image"]
left = image.find("[") + 1
right = image.find("]")
section = image[left:right]
if "," in section:
yind = image.find(",") + 1
return int(image[yind:-1])
else:
return int(section)
class FitcoordsRecord(Record):
"""
Represents a database record for the longslit.fitccords task.
Attributes
----------
modelname: string
the function used to fit the data
xorder: int
number of terms in x
yorder: int
number of terms in y
xbounds: list
data range in x
ybounds: list
data range in y
coeff: array
function coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._surface = self.fields["surface"].flatten()
self.modelname = iraf_models_map[self._surface[0]]
self.xorder = self._surface[1]
self.yorder = self._surface[2]
self.xbounds = [self._surface[4], self._surface[5]]
self.ybounds = [self._surface[6], self._surface[7]]
self.coeff = self.get_coeff()
def get_coeff(self):
return self._surface[8:]
class IDB:
"""
Base class for an IRAF identify database.
Attributes
----------
records: list
a list of all `IdentifyRecord` in the database
numrecords: int
number of records
"""
def __init__(self, dtbstr):
self.records = [IdentifyRecord(rstr) for rstr in self.aslist(dtbstr)]
self.numrecords = len(self.records)
def aslist(self, dtb):
# return a list of records
# if the first one is a comment remove it from the list
rl = dtb.split("begin")
try:
rl0 = rl[0].split("\n")
except Exception:
return rl
if len(rl0) == 2 and rl0[0].startswith("#") and not rl0[1].strip():
return rl[1:]
else:
return rl
class ReidentifyRecord(IDB):
"""
Represents a database record for the onedspec.reidentify task.
"""
def __init__(self, databasestr):
super().__init__(databasestr)
self.x = np.array([r.x for r in self.records])
self.y = self.get_ydata()
self.z = np.array([r.z for r in self.records])
def get_ydata(self):
y = np.ones(self.x.shape)
y = y * np.array([r.y for r in self.records])[:, np.newaxis]
return y
|
3df9e30d1478a26284fac538213a426d647d66e00cb0187559938641427765bc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
# pylint: disable=invalid-name
import os.path
import unittest.mock as mk
from importlib.metadata import EntryPoint
from itertools import combinations
from unittest import mock
import numpy as np
import pytest
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import (
DogBoxLSQFitter,
Fitter,
FittingWithOutlierRemoval,
JointFitter,
LevMarLSQFitter,
LinearLSQFitter,
LMLSQFitter,
NonFiniteValueError,
SimplexLSQFitter,
SLSQPLSQFitter,
TRFLSQFitter,
_NLLSQFitter,
populate_entry_points,
)
from astropy.modeling.optimizers import Optimization
from astropy.stats import sigma_clip
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from . import irafutil
if HAS_SCIPY:
from scipy import optimize
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
non_linear_fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomial fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x**2 + 4 * y + 5 * y**2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_nonlinear_fitting(self, fitter):
fitter = fitter()
self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7]
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_compare_nonlinear_fitting(self):
self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7]
fit_models = []
for fitter in non_linear_fitters:
fitter = fitter()
with pytest.warns(
AstropyUserWarning, match=r"Model is linear in parameters"
):
fit_models.append(fitter(self.model, self.x, self.y, self.z))
for pair in combinations(fit_models, 2):
assert_allclose(pair[0].parameters, pair[1].parameters)
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array(
[1344.0, 1772.0, 400.0, 1860.0, 2448.0, 552.0, 432.0, 568.0, 128.0]
)
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, 0.6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
model = fitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting_with_weights(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, 0.6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
weights = np.ones_like(self.y)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
model = fitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=0.4)
self.jf = JointFitter(
[self.g1, self.g2], {self.g1: ["amplitude"], self.g2: ["amplitude"]}, [9.8]
)
self.x = np.arange(10, 20, 0.1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, 0.3]
p2 = [13, 0.4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(
np.r_[model(p[0], p[1:3], x1) - y1, model(p[0], p[3:], x2) - y2]
)
coeff, _ = optimize.leastsq(
errfunc, p, args=(self.x, self.ny1, self.x, self.ny2)
)
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
MESSAGE = r"Model must be simple, not compound"
with pytest.raises(ValueError, match=MESSAGE):
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
fitter(init_model_comp, x, y)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join("data", "idcompspec.fits"))
with open(test_file) as f:
lines = f.read()
reclist = lines.split("begin")
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields["order"])
initial_model = models.Chebyshev1D(order - 1, domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs), rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected, rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5 * x * x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5 * x * x, -2 * x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)
assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(
degree=2,
c1_0=[1, 2],
c0_1=[-0.5, 1],
n_models=2,
fixed={"c1_0": True, "c0_1": True},
)
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2 * x + 1, x - 2], mask=np.zeros_like([x, x]))
y[0, 7] = 100.0 # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.0
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c1, [2.0, 1.0], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array(
[2 * x + 3 * y + 1, x - 0.5 * y - 2], mask=np.zeros_like([x, x])
)
z[0, 3, 1] = -1000.0 # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2.0, 1.0], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3.0, -0.5], atol=1e-14)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4.0 * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
@pytest.mark.parametrize("fitter0", non_linear_fitters)
@pytest.mark.parametrize("fitter1", non_linear_fitters)
def test_estimated_vs_analytic_deriv(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize("fitter0", non_linear_fitters)
@pytest.mark.parametrize("fitter1", non_linear_fitters)
def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.0)
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(
g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True
)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_with_optimize(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` against
`scipy.optimize.leastsq`.
"""
fitter = fitter()
model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(
errfunc, self.initial_values, args=(self.xdata, self.ydata)
)
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_with_weights(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` with weights.
"""
fitter = fitter()
# part 1: weights are equal to 1
model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True)
withw = fitter(
self.gauss,
self.xdata,
self.ydata,
estimate_jacobian=True,
weights=np.ones_like(self.xdata),
)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.0
mask = weights >= 1.0
model = fitter(
self.gauss, self.xdata[mask], self.ydata[mask], estimate_jacobian=True
)
withw = fitter(
self.gauss, self.xdata, self.ydata, estimate_jacobian=True, weights=weights
)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(r"ignore:.* Maximum number of iterations reached")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter_class", fitters)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_fitter_against_LevMar(self, fitter_class, fitter):
"""
Tests results from non-linear fitters against `LevMarLSQFitter`
and `TRFLSQFitter`
"""
fitter = fitter()
fitter_cls = fitter_class()
# This emits a warning from fitter that we need to ignore with
# pytest.mark.filterwarnings above.
new_model = fitter_cls(self.gauss, self.xdata, self.ydata)
model = fitter(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_LSQ_SLSQP_with_constraints(self, fitter):
"""
Runs `LevMarLSQFitter`/`TRFLSQFitter` and `SLSQPLSQFitter` on a
model with constraints.
"""
fitter = fitter()
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fslsqp = SLSQPLSQFitter()
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters, rtol=10 ** (-4))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_linear_lsq_fitter_with_weights(self, fitter):
"""
Tests that issue #11581 has been solved.
"""
fitter = fitter()
np.random.seed(42)
norder = 2
fitter2 = LinearLSQFitter()
model = models.Polynomial1D(norder)
npts = 10000
c = [2.0, -10.0, 7.0]
tw = np.random.uniform(0.0, 10.0, npts)
tx = np.random.uniform(0.0, 10.0, npts)
ty = c[0] + c[1] * tx + c[2] * (tx**2)
ty += np.random.normal(0.0, 1.5, npts)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
tf1 = fitter(model, tx, ty, weights=tw)
tf2 = fitter2(model, tx, ty, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16))
assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2))
model = models.Gaussian1D()
if isinstance(fitter, (TRFLSQFitter, LMLSQFitter)):
with pytest.warns(
AstropyUserWarning, match=r"The fit may be unsuccessful; *."
):
fitter(model, tx, ty, weights=tw)
else:
fitter(model, tx, ty, weights=tw)
model = models.Polynomial2D(norder)
nxpts = 100
nypts = 150
npts = nxpts * nypts
c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]
tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tz = (
c[0]
+ c[1] * tx
+ c[2] * (tx**2)
+ c[3] * ty
+ c[4] * (ty**2)
+ c[5] * tx * ty
)
tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
tf1 = fitter(model, tx, ty, tz, weights=tw)
tf2 = fitter2(model, tx, ty, tz, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16))
assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x**2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0.0, 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_param_cov(self, fitter):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
fitter = fitter()
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x * a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.vstack([x, np.ones(len(x))]).T
beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)
s2 = np.sum((y - np.matmul(X, beta).ravel()) ** 2) / (len(y) - len(beta))
olscov = np.linalg.inv(np.matmul(X.T, X)) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(olscov, fitter.fit_info["param_cov"])
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
with pytest.warns(AstropyUserWarning, match=r".*ImportError.*"):
populate_entry_points([mock_entry_importerror])
def test_bad_func(self):
"""This returns a function which fails the type check"""
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
with pytest.warns(AstropyUserWarning, match=r".*Class.*"):
populate_entry_points([mock_entry_badfunc])
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter"""
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
with pytest.warns(AstropyUserWarning, match=r".*BadClass.*"):
populate_entry_points([mock_entry_badclass])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5.0, 5.0, 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0] * np.exp(-0.5 * (x - p[1]) ** 2 / p[2] ** 2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
y = self.y + (
np.random.normal(0.0, 0.2, self.x.shape)
+ c * np.random.normal(3.0, 5.0, self.x.shape)
)
g_init = models.Gaussian1D(amplitude=1.0, mean=0, stddev=1.0)
fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0] * np.exp(
-0.5 * (pos[0] - p[2]) ** 2 / p[4] ** 2
- 0.5 * (pos[1] - p[1]) ** 2 / p[3] ** 2
)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.0).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.0).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
z = self.z + (
np.random.normal(0.0, 0.2, self.z.shape)
+ c * np.random.normal(self.z, 2.0, self.z.shape)
)
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(
amplitude=guess[0],
x_mean=guess[1],
y_mean=guess[2],
x_stddev=0.75,
y_stddev=1.25,
)
fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0)
fitted_model, _ = fit(g2_init, self.x, self.y, z)
assert_allclose(fitted_model.parameters[0:5], self.model_params, atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(),
sigma_clip,
sigma=2.5,
niter=3,
cenfunc=np.ma.mean,
stdfunc=np.ma.std,
)
x = np.arange(10)
y = np.array([2.5 * x - 4, 2 * x * x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4.0, 10.0], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.0], atol=1e-14)
assert_allclose(poly_set.c2, [0.0, 2.0], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(),
sigma_clip,
sigma=2.5,
niter=3,
cenfunc=np.ma.mean,
stdfunc=np.ma.std,
)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x + y, 1 - 0.1 * x + 0.2 * y]), 0, 3)
z[3, 3:5, 0] = 100.0 # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0.0, 1.0]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1.0, -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1.0, 0.2]]], atol=1e-14)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020"""
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0, 0] = 1000.0 # outlier
self.z[0, 1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10 ** (-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, mask = fitter(model, self.x1d, self.z1d)
assert (~mask).sum() == self.z1d.size - 2
assert mask[0] and mask[1]
assert_allclose(
fit.parameters[0], 0.0, atol=10 ** (-2)
) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert fit.parameters[0] > 1.0 # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""
smoke test for #7020 - fails without fitting.py
patch because weights does not propagate
"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_1d_set_with_weights_with_sigma_clip(self):
"""1D model set with separate weights"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
z1d = np.array([self.z1d, self.z1d])
weights = np.array([self.weights1d, self.weights1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=weights)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10 ** (-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, mask = fitter(model, self.x, self.y, self.z)
assert (~mask).sum() == self.z.size - 2
assert mask[0, 0] and mask[0, 1]
assert_allclose(fit.parameters[0], 0.0, atol=10 ** (-2))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_2d_with_weights_without_sigma_clip(self, fitter):
fitter = fitter()
model = models.Polynomial2D(0)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 1.0 # outliers pulled it high
def test_2d_linear_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
# LinearLSQFitter doesn't handle weights properly in 2D
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 1.0 # outliers pulled it high
@pytest.mark.parametrize("base_fitter", non_linear_fitters)
def test_2d_with_weights_with_sigma_clip(self, base_fitter):
"""smoke test for #7020 - fails without fitting.py patch because
weights does not propagate"""
base_fitter = base_fitter()
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(base_fitter, sigma_clip, niter=3, sigma=3.0)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
def test_2d_linear_with_weights_with_sigma_clip(self):
"""same as test above with a linear fitter."""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_fitters_with_weights(fitter):
"""Issue #5737"""
fitter = fitter()
if isinstance(fitter, _NLLSQFitter):
pytest.xfail(
"This test is poorly designed and causes issues for "
"scipy.optimize.least_squares based fitters"
)
Xin, Yin = np.mgrid[0:21, 0:21]
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.parametrize(
"fixed, warns",
[
({}, True), # tests fitting non-fixed parameters models produces warnings
(
{"c1_0": True},
True,
), # tests fitting fixed par models produces warnings - #14037
(
{"c0_1": True},
False,
), # https://github.com/astropy/astropy/pull/14037#pullrequestreview-1191726872
],
)
def test_polynomial_poorly_conditioned(fixed, warns):
p0 = models.Polynomial2D(degree=1, c0_0=3, c1_0=5, c0_1=0, fixed=fixed)
fitter = LinearLSQFitter()
x = [1, 2, 3, 4, 5]
y = [1, 1, 1, 1, 1]
values = p0(x, y)
if warns:
with pytest.warns(
AstropyUserWarning, match="The fit may be poorly conditioned"
):
p = fitter(p0, x, y, values)
else:
p = fitter(p0, x, y, values)
assert np.allclose(p0.parameters, p.parameters, rtol=0, atol=1e-14)
def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_fitters_interface(fitter):
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
fitter = fitter()
model = models.Gaussian1D(10, 4, 0.3)
x = np.arange(21)
y = model(x)
if isinstance(fitter, SimplexLSQFitter):
kwargs = {"maxiter": 79, "verblevel": 1, "acc": 1e-6}
else:
kwargs = {"maxiter": 77, "verblevel": 1, "epsilon": 1e-2, "acc": 1e-6}
if isinstance(fitter, (LevMarLSQFitter, _NLLSQFitter)):
kwargs.pop("verblevel")
_ = fitter(model, x, y, **kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter_class", [SLSQPLSQFitter, SimplexLSQFitter])
def test_optimizers(fitter_class):
fitter = fitter_class()
# Test maxiter
assert fitter._opt_method.maxiter == 100
fitter._opt_method.maxiter = 1000
assert fitter._opt_method.maxiter == 1000
# Test eps
assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps)
fitter._opt_method.eps = 1e-16
assert fitter._opt_method.eps == 1e-16
# Test acc
assert fitter._opt_method.acc == 1e-7
fitter._opt_method.acc = 1e-16
assert fitter._opt_method.acc == 1e-16
# Test repr
assert repr(fitter._opt_method) == f"{fitter._opt_method.__class__.__name__}()"
fitparams = mk.MagicMock()
final_func_val = mk.MagicMock()
numiter = mk.MagicMock()
funcalls = mk.MagicMock()
exit_mode = 1
mess = mk.MagicMock()
xtol = mk.MagicMock()
if fitter_class == SLSQPLSQFitter:
return_value = (fitparams, final_func_val, numiter, exit_mode, mess)
fit_info = {
"final_func_val": final_func_val,
"numiter": numiter,
"exit_mode": exit_mode,
"message": mess,
}
else:
return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode)
fit_info = {
"final_func_val": final_func_val,
"numiter": numiter,
"exit_mode": exit_mode,
"num_function_calls": funcalls,
}
with mk.patch.object(
fitter._opt_method.__class__, "opt_method", return_value=return_value
):
with pytest.warns(AstropyUserWarning, match=r"The fit may be unsuccessful; .*"):
assert (fitparams, fit_info) == fitter._opt_method(
mk.MagicMock(), mk.MagicMock(), mk.MagicMock(), xtol=xtol
)
assert fit_info == fitter._opt_method.fit_info
if isinstance(fitter, SLSQPLSQFitter):
fitter._opt_method.acc == 1e-16
else:
fitter._opt_method.acc == xtol
@mk.patch.multiple(Optimization, __abstractmethods__=set())
def test_Optimization_abstract_call():
optimization = Optimization(mk.MagicMock())
MESSAGE = r"Subclasses should implement this method"
with pytest.raises(NotImplementedError, match=MESSAGE):
optimization()
def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10.0, scale=1.0, size=(2, 25))
y[0, 14] = 100.0
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(),
outlier_func=sigma_clip,
niter=5,
sigma_lower=3.0,
sigma_upper=3.0,
maxiters=1,
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info["niter"] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(),
outlier_func=sigma_clip,
niter=0,
sigma_lower=3.0,
sigma_upper=3.0,
maxiters=1,
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info["niter"] == 0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestFittingUncertanties:
"""
Test that parameter covariance is calculated correctly for the fitters
that do so (currently LevMarLSQFitter, LinearLSQFitter).
"""
example_1D_models = [models.Polynomial1D(2), models.Linear1D()]
example_1D_sets = [
models.Polynomial1D(2, n_models=2, model_set_axis=False),
models.Linear1D(n_models=2, slope=[1.0, 1.0], intercept=[0, 0]),
]
def setup_class(self):
np.random.seed(619)
self.x = np.arange(10)
self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.rand_grid = np.random.random(100).reshape(10, 10)
self.rand = self.rand_grid[0]
@pytest.mark.parametrize(
("single_model", "model_set"), list(zip(example_1D_models, example_1D_sets))
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_1d_models(self, single_model, model_set, fitter):
"""Test that fitting uncertainties are computed correctly for 1D models
and 1D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
# test 1D single models
# fit single model w/ nonlinear fitter
y = single_model(self.x) + self.rand
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit_model = fitter(single_model, self.x, y)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ linlsq fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x, y)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
# check covariance, stds computed correctly computed
assert_allclose(cov_model_linlsq, cov_model)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)), fit_model_linlsq.stds.stds)
# now test 1D model sets
# fit set of models w/ linear fitter
y = model_set(self.x, model_set_axis=False) + np.array([self.rand, self.rand])
fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)
cov_1d_set_linlsq = [j.cov_matrix for j in fit_1d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_1d_set_linlsq[0], cov_model)
assert_allclose(
np.sqrt(np.diag(cov_1d_set_linlsq[0])), fit_1d_set_linlsq.stds[0].stds
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_2d_models(self, fitter):
"""
Test that fitting uncertainties are computed correctly for 2D models
and 2D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
single_model = models.Polynomial2D(2, c0_0=2)
model_set = models.Polynomial2D(
degree=2, n_models=2, c0_0=[2, 3], model_set_axis=False
)
# fit single model w/ nonlinear fitter
z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit_model = fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ nonlinear fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
assert_allclose(cov_model, cov_model_linlsq)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)), fit_model_linlsq.stds.stds)
# fit 2d model set
z_grid = model_set(self.x_grid, self.y_grid) + np.array(
(self.rand_grid, self.rand_grid)
)
fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid, z_grid)
cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_2d_set_linlsq[0], cov_model)
assert_allclose(
np.sqrt(np.diag(cov_2d_set_linlsq[0])), fit_2d_set_linlsq.stds[0].stds
)
def test_covariance_std_printing_indexing(self, capsys):
"""
Test printing methods and indexing.
"""
# test str representation for Covariance/stds
fitter = LinearLSQFitter(calc_uncertainties=True)
mod = models.Linear1D()
fit_mod = fitter(mod, self.x, mod(self.x) + self.rand)
print(fit_mod.cov_matrix)
captured = capsys.readouterr()
assert "slope | 0.001" in captured.out
assert "intercept| -0.005, 0.03" in captured.out
print(fit_mod.stds)
captured = capsys.readouterr()
assert "slope | 0.032" in captured.out
assert "intercept| 0.173" in captured.out
# test 'pprint' for Covariance/stds
print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))
captured = capsys.readouterr()
assert "slope | 0.00105" in captured.out
assert "intercept" not in captured.out
print(fit_mod.stds.pprint(max_lines=1, round_val=5))
captured = capsys.readouterr()
assert "slope | 0.03241" in captured.out
assert "intercept" not in captured.out
# test indexing for Covariance class.
assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix["slope", "slope"]
# test indexing for stds class.
assert fit_mod.stds[1] == fit_mod.stds["intercept"]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_finite_error(fitter):
"""Regression test error introduced to solve issues #3575 and #12809"""
x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])
m_init = models.Gaussian1D()
fit = fitter()
# Raise warning, notice fit fails due to nans
with pytest.raises(
NonFiniteValueError, match=r"Objective function has encountered.*"
):
fit(m_init, x, y)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_finite_filter_1D(fitter):
"""Regression test filter introduced to remove non-finte values from data"""
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])
m_init = models.Gaussian1D()
fit = fitter()
with pytest.warns(
AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter",
):
fit(m_init, x, y, filter_non_finite=True)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_finite_filter_2D(fitter):
"""Regression test filter introduced to remove non-finte values from data"""
x, y = np.mgrid[0:10, 0:10]
m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2)
with NumpyRNGContext(_RANDOM_SEED):
z = m_true(x, y) + np.random.rand(*x.shape)
z[0, 0] = np.nan
z[3, 3] = np.inf
z[7, 5] = -np.inf
m_init = models.Gaussian2D()
fit = fitter()
with pytest.warns(
AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter",
):
fit(m_init, x, y, z, filter_non_finite=True)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters*")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_linear_fit_zero_degree_polynomial_with_weights(fitter):
"""
Regression test for issue #13617
Issue:
Weighted non-linear weighted fits of O-degree polynomials cause an error
to be raised by scipy.
Fix:
There should be no error raised in this circumstance
"""
model = models.Polynomial1D(0, c0=0)
fitter = fitter()
x = np.arange(10, dtype=float)
y = np.ones((10,))
weights = np.ones((10,))
fit = fitter(model, x, y)
assert_almost_equal(fit.c0, 1.0)
fit = fitter(model, x, y, weights=weights)
assert_almost_equal(fit.c0, 1.0)
|
9b0a7bdeec2c075c15b5cdce4a0905c9a0cd5db2bbe7bdb59c45e90f1236cb71 | # Licensed under a 3-clause BSD style license - see LICENSE.rst:
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
import unittest.mock as mk
import numpy as np
# pylint: disable=invalid-name, no-member
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy.modeling.tabular as tabular_models
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import FittableModel, Model, _ModelMeta
from astropy.modeling.models import Gaussian2D
from astropy.modeling.parameters import InputParameterError, Parameter
from astropy.modeling.polynomial import PolynomialBase
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
SmoothlyBrokenPowerLaw1D,
)
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils import NumpyRNGContext, minversion
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .example_models import models_1D, models_2D
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter,
]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_custom_model(fitter, amplitude=4, frequency=1):
fitter = fitter()
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
sin_model.evaluate(x, 5.0, 2.0)
sin_model.fit_deriv(x, 5.0, 2.0)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
model = fitter(sin_model, x, data)
assert np.all(
(
np.array([model.amplitude.value, model.frequency.value])
- np.array([amplitude, frequency])
)
< 0.001
)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2.0, frequency=0.5)
assert sin_model.amplitude == 2.0
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_inconsistent_input_shapes():
g = Gaussian2D()
x = np.arange(-1.0, 1, 0.2)
y = x.copy()
# check scalar input broadcasting works
assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0
# but not array broadcasting
x.shape = (10, 1)
y.shape = (1, 10)
result = g(x, y)
assert result.shape == (10, 10)
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return (
(self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a),
)
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox.bounding_box()
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[
slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1),
]
z2, y2, x2 = np.mgrid[
slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1),
]
arr = model(x2, y2, z2, with_bounding_box=True)
sub_arr = model(x1, y1, z1, with_bounding_box=True)
# check for flux agreement
assert abs(np.nansum(arr) - np.nansum(sub_arr)) < np.nansum(arr) * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, 0.1)
self.y1 = np.arange(1, 10, 0.1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters["x_values"]
y = test_parameters["y_values"]
z = test_parameters["z_values"]
assert np.all(np.abs(model(x, y) - z) < self.eval_error)
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
MESSAGE = r"No bounding box is defined for this model .*"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
# test the exception of dimensions don't match
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
return
ddx = 0.01
ylim, xlim = bbox
x1 = np.arange(xlim[0], xlim[1], ddx)
y1 = np.arange(ylim[0], ylim[1], ddx)
x2 = np.concatenate(
(
[xlim[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[xlim[1] + idx * ddx for idx in range(1, 10)],
)
)
y2 = np.concatenate(
(
[ylim[0] - idx * ddx for idx in range(10, 0, -1)],
y1,
[ylim[1] + idx * ddx for idx in range(1, 10)],
)
)
inside_bbox = model(x1, y1)
outside_bbox = model(x2, y2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box2D_peak(self, model_class, test_parameters):
if not test_parameters.pop("bbox_peak", False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
ylim, xlim = bbox
dy, dx = np.diff(bbox) / 2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1), slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[
slice(ylim[0] - dy, ylim[1] + dy + 1), slice(xlim[0] - dx, xlim[1] + dx + 1)
]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitter2D(self, model_class, test_parameters, fitter):
"""Test if the parametric model works with the fitter."""
fitter = fitter()
x_lim = test_parameters["x_lim"]
y_lim = test_parameters["y_lim"]
parameters = test_parameters["parameters"]
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed])
fitted = np.array([param.value for param in params if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_deriv_2D(self, model_class, test_parameters, fitter):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
fitter = fitter()
x_lim = test_parameters["x_lim"]
y_lim = test_parameters["y_lim"]
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
x_test = np.logspace(x_lim[0], x_lim[1], self.N * 10)
y_test = np.logspace(y_lim[0], y_lim[1], self.M * 10)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
x_test = np.linspace(x_lim[0], x_lim[1], self.N * 10)
y_test = np.linspace(y_lim[0], y_lim[1], self.M * 10)
xv, yv = np.meshgrid(x, y)
xv_test, yv_test = np.meshgrid(x_test, y_test)
try:
model_with_deriv = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
model_no_deriv = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
model = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
except KeyError:
model_with_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model_no_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model = create_model(model_class, test_parameters, use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.default_rng(0)
amplitude = test_parameters["parameters"][0]
n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitter
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv, data)
fitter_no_deriv = fitter
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, xv, yv, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv(xv_test, yv_test),
new_model_no_deriv(xv_test, yv_test),
rtol=1e-2,
)
if model_class != Gaussian2D:
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, rtol=0.1
)
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
_non_finite_models = [
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
SmoothlyBrokenPowerLaw1D,
]
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.11
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, 0.1)
self.y1 = np.arange(1, 10, 0.1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters["x_values"]
y = test_parameters["y_values"]
assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
MESSAGE = r"No bounding box is defined for this model .*"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
model.bounding_box = 5
try:
bbox = model.bounding_box.bounding_box()
except NotImplementedError:
return
ddx = 0.01
x1 = np.arange(bbox[0], bbox[1], ddx)
x2 = np.concatenate(
(
[bbox[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[bbox[1] + idx * ddx for idx in range(1, 10)],
)
)
inside_bbox = model(x1)
outside_bbox = model(x2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box1D_peak(self, model_class, test_parameters):
if not test_parameters.pop("bbox_peak", False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
if isinstance(model, (models.Lorentz1D, models.Drude1D)):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
dx = np.diff(bbox) / 2
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitter1D(self, model_class, test_parameters, fitter):
"""
Test if the parametric model works with the fitter.
"""
SCIPY_LT_1_6 = not minversion("scipy", "1.6")
if (
model_class == models.BrokenPowerLaw1D
and fitter == fitting.TRFLSQFitter
and SCIPY_LT_1_6
):
pytest.xfail(reason="TRF fitter fails for BrokenPowerLaw1D in scipy < 1.6")
fitter = fitter()
x_lim = test_parameters["x_lim"]
parameters = test_parameters["parameters"]
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = (1 + relative_noise_amplitude * np.random.randn(len(x))) * model(x)
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed])
fitted = np.array([param.value for param in params if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.parametrize("fitter", fitters)
def test_deriv_1D(self, model_class, test_parameters, fitter):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
fitter = fitter()
if model_class in self._non_finite_models:
return
x_lim = test_parameters["x_lim"]
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters["parameters"]
model_with_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model_no_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
# fmt: off
rsn_rand_1234567890 = np.array(
[
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890
]
)
# fmt: on
n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitter
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitter
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, x, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.15
)
def create_model(
model_class, test_parameters, use_constraints=True, parameter_key="parameters"
):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if "constraints" in test_parameters:
constraints = test_parameters["constraints"]
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
@pytest.mark.parametrize(
("model_class", "test_parameters"),
sorted(models_1D.items(), key=lambda x: str(x[0])),
)
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.parametrize(
("model_class", "test_parameters"),
sorted(models_2D.items(), key=lambda x: str(x[0])),
)
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False), [[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False), [[42, 84], [43, 86]])
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == "<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>"
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1.0, 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0.0, 0.7, 1.4, 2.1, 3.9]
ans1 = [1.0, 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0.0, 0.7, 1.4, 2.1, 3.9, 4.1]
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False, fill_value=None)
assert_allclose(model(xextrap), [1.0, 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points * u.nm, lookup_table=values * u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable(
[1, 2, 3],
[10, 20, 30] * u.nJy,
bounds_error=False,
fill_value=1e-33 * (u.W / (u.m * u.m * u.Hz)),
)
assert_quantity_allclose(model(np.arange(5)), [100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_interp_2d():
table = np.array(
[
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131],
]
)
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0.0, 0.7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array([-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
MESSAGE = r"Only n_models=1 is supported"
with pytest.raises(NotImplementedError, match=MESSAGE):
model = LookupTable(n_models=2)
MESSAGE = r"Must provide a lookup table"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
MESSAGE = r"lookup_table should be an array with 2 dimensions"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(lookup_table=[1, 2, 3])
MESSAGE = r"lookup_table should be an array with 2 dimensions"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
MESSAGE = r"points must all have the same unit"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
MESSAGE = r"fill value is in Jy but expected to be unitless"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(points, table, bounds_error=False, fill_value=1 * u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
MESSAGE = r"Lookup table must have at least one dimension"
with pytest.raises(ValueError, match=MESSAGE):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert p(1) == p(1, with_bounding_box=True)
t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(
t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
[[np.nan, 11], [np.nan, 14], [np.nan, 4]],
)
trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_with_bounding_box():
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t(1, with_bounding_box=True)
assert result == 3.4
assert t.inverse(result, with_bounding_box=True) == 1.0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_bounding_box_with_units():
points = np.arange(5) * u.pix
lt = np.arange(5) * u.AA
t = models.Tabular1D(points, lt)
result = t(1 * u.pix, with_bounding_box=True)
assert result == 1.0 * u.AA
assert t.inverse(result, with_bounding_box=True) == 1 * u.pix
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular1d_inverse():
"""Test that the Tabular1D inverse is defined"""
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t.inverse((3.4, 6.7))
assert_allclose(result, np.array((1.0, 2.0)))
# Check that it works for descending values in lookup_table
t2 = models.Tabular1D(points, values[::-1])
assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
result2 = t2.inverse((7, 6.7))
assert_allclose(result2, np.array((1.0, 2.0)))
# Check that it errors on double-valued lookup_table
points = np.arange(5)
values = np.array([1.5, 3.4, 3.4, 32, 25])
t = models.Tabular1D(points, values)
with pytest.raises(NotImplementedError, match=r""):
t.inverse((3.4, 7.0))
# Check that Tabular2D.inverse raises an error
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t3 = models.Tabular2D(points=points, lookup_table=table)
with pytest.raises(NotImplementedError, match=r""):
t3.inverse((3, 3))
# Check that it uses the same kwargs as the original model
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
t.inverse(100)
t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
result = t.inverse(100)
assert_allclose(t(result), 100)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_grid_shape_mismatch_error():
points = np.arange(5)
lt = np.mgrid[0:5, 0:5][0]
MESSAGE = r"Expected grid points in 2 directions, got 5."
with pytest.raises(ValueError, match=MESSAGE):
models.Tabular2D(points, lt)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_repr():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert (
repr(t)
== "<Tabular1D(points=(array([0, 1, 2, 3, 4]),), lookup_table=[0 1 2 3 4])>"
)
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert (
repr(t)
== "<Tabular2D(points=(array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])), "
"lookup_table=[[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]])>"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_str():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert (
str(t) == "Model: Tabular1D\n"
"N_inputs: 1\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]),)\n"
" lookup_table: [0 1 2 3 4]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert (
str(t) == "Model: Tabular2D\n"
"N_inputs: 2\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4]))\n"
" lookup_table: [[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_evaluate():
points = np.arange(5)
lt = np.arange(5)[::-1]
t = models.Tabular1D(points, lt)
assert (t.evaluate([1, 2, 3]) == [3, 2, 1]).all()
assert (t.evaluate(np.array([1, 2, 3]) * u.m) == [3, 2, 1]).all()
t.n_outputs = 2
value = [np.array([3, 2, 1]), np.array([1, 2, 3])]
with mk.patch.object(
tabular_models, "interpn", autospec=True, return_value=value
) as mkInterpn:
outputs = t.evaluate([1, 2, 3])
for index, output in enumerate(outputs):
assert np.all(value[index] == output)
assert mkInterpn.call_count == 1
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_module_name():
"""
The module name must be set manually because
these classes are created dynamically.
"""
for model in [models.Tabular1D, models.Tabular2D]:
assert model.__module__ == "astropy.modeling.tabular"
class classmodel(FittableModel):
f = Parameter(default=1)
x = Parameter(default=0)
y = Parameter(default=2)
def __init__(self, f=f.default, x=x.default, y=y.default):
super().__init__(f, x, y)
def evaluate(self):
pass
class subclassmodel(classmodel):
f = Parameter(default=3, fixed=True)
x = Parameter(default=10)
y = Parameter(default=12)
h = Parameter(default=5)
def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default):
super().__init__(f, x, y)
def evaluate(self):
pass
def test_parameter_inheritance():
b = subclassmodel()
assert b.param_names == ("f", "x", "y", "h")
assert b.h == 5
assert b.f == 3
assert b.f.fixed == True # noqa: E712
def test_parameter_description():
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
model = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
assert model.amplitude_L._description == "The Lorentzian amplitude"
assert model.fwhm_L._description == "The Lorentzian full width at half maximum"
assert model.fwhm_G._description == "The Gaussian full width at half maximum"
def test_SmoothlyBrokenPowerLaw1D_validators():
MESSAGE = r"amplitude parameter must be > 0"
with pytest.raises(InputParameterError, match=MESSAGE):
SmoothlyBrokenPowerLaw1D(amplitude=-1)
MESSAGE = r"delta parameter must be >= 0.001"
with pytest.raises(InputParameterError, match=MESSAGE):
SmoothlyBrokenPowerLaw1D(delta=0)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
def test_SmoothlyBrokenPowerLaw1D_fit_deriv():
x_lim = [0.01, 100]
x = np.logspace(x_lim[0], x_lim[1], 100)
parameters = {
"parameters": [1, 10, -2, 2, 0.5],
"constraints": {"fixed": {"x_break": True, "delta": True}},
}
model_with_deriv = create_model(
SmoothlyBrokenPowerLaw1D, parameters, use_constraints=False
)
model_no_deriv = create_model(
SmoothlyBrokenPowerLaw1D, parameters, use_constraints=False
)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
# fmt: off
rsn_rand_1234567890 = np.array(
[
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890
]
)
# fmt: on
n = 0.1 * parameters["parameters"][0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, x, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.5
)
class _ExtendedModelMeta(_ModelMeta):
@classmethod
def __prepare__(mcls, name, bases, **kwds):
# this shows the parent class machinery still applies
namespace = super().__prepare__(name, bases, **kwds)
# the custom bit
namespace.update(kwds)
return namespace
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
def test_metaclass_kwargs():
"""Test can pass kwargs to Models"""
class ClassModel(FittableModel, flag="flag"):
def evaluate(self):
pass
# Nothing further to test, just making the class is good enough.
def test_submetaclass_kwargs():
"""Test can pass kwargs to Model subclasses."""
class ClassModel(FittableModel, metaclass=_ExtendedModelMeta, flag="flag"):
def evaluate(self):
pass
assert ClassModel.flag == "flag"
class ModelDefault(Model):
slope = Parameter()
intercept = Parameter()
_separable = False
@staticmethod
def evaluate(x, slope, intercept):
return slope * x + intercept
class ModelCustom(ModelDefault):
def _calculate_separability_matrix(self):
return np.array([[0]])
def test_custom_separability_matrix():
original = separability_matrix(ModelDefault(slope=1, intercept=2))
assert original.all()
custom = separability_matrix(ModelCustom(slope=1, intercept=2))
assert not custom.any()
|
cd34904d3309991e4dcf6209687d5eb3d1a272893e29d14954e821f303472dfb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import unittest.mock as mk
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import SpectralCoord
from astropy.modeling.bounding_box import (
CompoundBoundingBox,
ModelBoundingBox,
_BaseInterval,
_BaseSelectorArgument,
_BoundingDomain,
_ignored_interval,
_Interval,
_SelectorArgument,
_SelectorArguments,
)
from astropy.modeling.core import Model, fix_inputs
from astropy.modeling.models import (
Gaussian1D,
Gaussian2D,
Identity,
Polynomial2D,
Scale,
Shift,
)
class Test_Interval:
def test_create(self):
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
assert isinstance(interval, _BaseInterval)
assert interval.lower == lower
assert interval.upper == upper
assert interval == (lower, upper)
assert interval.__repr__() == f"Interval(lower={lower}, upper={upper})"
def test_copy(self):
interval = _Interval(0.5, 1.5)
copy = interval.copy()
assert interval == copy
assert id(interval) != id(copy)
# Same float values have will have same id
assert interval.lower == copy.lower
assert id(interval.lower) == id(copy.lower)
# Same float values have will have same id
assert interval.upper == copy.upper
assert id(interval.upper) == id(copy.upper)
def test__validate_shape(self):
MESSAGE = r"An interval must be some sort of sequence of length 2"
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
# Passes (2,)
interval._validate_shape((1, 2))
interval._validate_shape([1, 2])
interval._validate_shape((1 * u.m, 2 * u.m))
interval._validate_shape([1 * u.m, 2 * u.m])
# Passes (1, 2)
interval._validate_shape(((1, 2),))
interval._validate_shape(([1, 2],))
interval._validate_shape([(1, 2)])
interval._validate_shape([[1, 2]])
interval._validate_shape(((1 * u.m, 2 * u.m),))
interval._validate_shape(([1 * u.m, 2 * u.m],))
interval._validate_shape([(1 * u.m, 2 * u.m)])
interval._validate_shape([[1 * u.m, 2 * u.m]])
# Passes (2, 0)
interval._validate_shape((mk.MagicMock(), mk.MagicMock()))
interval._validate_shape([mk.MagicMock(), mk.MagicMock()])
# Passes with array inputs:
interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
interval._validate_shape(
(np.array([-2.5, -3.5, -4.5]), np.array([2.5, 3.5, 4.5]))
)
# Fails shape (no units)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((1, 2, 3))
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([1, 2, 3])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(1)
# Fails shape (units)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((1 * u.m, 2 * u.m, 3 * u.m))
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([1 * u.m, 2 * u.m, 3 * u.m])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(
[[1 * u.m, 2 * u.m, 3 * u.m], [4 * u.m, 5 * u.m, 6 * u.m]]
)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(1 * u.m)
# Fails shape (arrays):
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(
(np.array([-2.5, -3.5]), np.array([2.5, 3.5]), np.array([3, 4]))
)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5]))
def test__validate_bounds(self):
# Passes
assert _Interval._validate_bounds(1, 2) == (1, 2)
assert _Interval._validate_bounds(1 * u.m, 2 * u.m) == (1 * u.m, 2 * u.m)
interval = _Interval._validate_bounds(
np.array([-2.5, -3.5]), np.array([2.5, 3.5])
)
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
# Fails
with pytest.warns(
RuntimeWarning,
match=r"Invalid interval: upper bound 1 is strictly "
r"less than lower bound 2\.",
):
_Interval._validate_bounds(2, 1)
with pytest.warns(
RuntimeWarning,
match=r"Invalid interval: upper bound 1\.0 m is strictly "
r"less than lower bound 2\.0 m\.",
):
_Interval._validate_bounds(2 * u.m, 1 * u.m)
def test_validate(self):
# Passes
assert _Interval.validate((1, 2)) == (1, 2)
assert _Interval.validate([1, 2]) == (1, 2)
assert _Interval.validate((1 * u.m, 2 * u.m)) == (1 * u.m, 2 * u.m)
assert _Interval.validate([1 * u.m, 2 * u.m]) == (1 * u.m, 2 * u.m)
assert _Interval.validate(((1, 2),)) == (1, 2)
assert _Interval.validate(([1, 2],)) == (1, 2)
assert _Interval.validate([(1, 2)]) == (1, 2)
assert _Interval.validate([[1, 2]]) == (1, 2)
assert _Interval.validate(((1 * u.m, 2 * u.m),)) == (1 * u.m, 2 * u.m)
assert _Interval.validate(([1 * u.m, 2 * u.m],)) == (1 * u.m, 2 * u.m)
assert _Interval.validate([(1 * u.m, 2 * u.m)]) == (1 * u.m, 2 * u.m)
assert _Interval.validate([[1 * u.m, 2 * u.m]]) == (1 * u.m, 2 * u.m)
interval = _Interval.validate((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
interval = _Interval.validate(
(np.array([-2.5, -3.5, -4.5]), np.array([2.5, 3.5, 4.5]))
)
assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all()
assert (interval.upper == np.array([2.5, 3.5, 4.5])).all()
# Fail shape
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
_Interval.validate((1, 2, 3))
# Fail bounds
with pytest.warns(RuntimeWarning):
_Interval.validate((2, 1))
def test_outside(self):
interval = _Interval.validate((0, 1))
# fmt: off
assert (
interval.outside(np.linspace(-1, 2, 13))
== [
True, True, True, True,
False, False, False, False, False,
True, True, True, True
]
).all()
# fmt: on
def test_domain(self):
interval = _Interval.validate((0, 1))
assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all()
def test__ignored_interval(self):
assert _ignored_interval.lower == -np.inf
assert _ignored_interval.upper == np.inf
for num in [0, -1, -100, 3.14, 10**100, -(10**100)]:
assert not num < _ignored_interval[0]
assert num > _ignored_interval[0]
assert not num > _ignored_interval[1]
assert num < _ignored_interval[1]
assert not (_ignored_interval.outside(np.array([num]))).all()
def test_validate_with_SpectralCoord(self):
"""Regression test for issue #12439"""
lower = SpectralCoord(1, u.um)
upper = SpectralCoord(10, u.um)
interval = _Interval.validate((lower, upper))
assert interval.lower == lower
assert interval.upper == upper
class Test_BoundingDomain:
def setup_method(self):
class BoundingDomain(_BoundingDomain):
def fix_inputs(self, model, fix_inputs):
super().fix_inputs(model, fixed_inputs=fix_inputs)
def prepare_inputs(self, input_shape, inputs):
super().prepare_inputs(input_shape, inputs)
self.BoundingDomain = BoundingDomain
def test_create(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "C"
bounding_box = self.BoundingDomain(model, order="F")
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "F"
bounding_box = self.BoundingDomain(Gaussian2D(), ["x"])
assert bounding_box._ignored == [0]
assert bounding_box._order == "C"
# Error
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
self.BoundingDomain(model, order=mk.MagicMock())
def test_model(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box.model == model
def test_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock(), order="C")
assert bounding_box._order == "C"
assert bounding_box.order == "C"
bounding_box = self.BoundingDomain(mk.MagicMock(), order="F")
assert bounding_box._order == "F"
assert bounding_box.order == "F"
bounding_box._order = "test"
assert bounding_box.order == "test"
def test_ignored(self):
ignored = [0]
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = self.BoundingDomain(model, ignored=ignored)
assert bounding_box._ignored == ignored
assert bounding_box.ignored == ignored
def test__get_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Success (default 'C')
assert bounding_box._order == "C"
assert bounding_box._get_order() == "C"
assert bounding_box._get_order("C") == "C"
assert bounding_box._get_order("F") == "F"
# Success (default 'F')
bounding_box._order = "F"
assert bounding_box._order == "F"
assert bounding_box._get_order() == "F"
assert bounding_box._get_order("C") == "C"
assert bounding_box._get_order("F") == "F"
# Error
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_order(mk.MagicMock())
def test__get_index(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass input name
assert bounding_box._get_index("x") == 0
assert bounding_box._get_index("y") == 1
# Pass invalid input name
MESSAGE = r"'z' is not one of the inputs: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_index("z")
# Pass valid index
assert bounding_box._get_index(0) == 0
assert bounding_box._get_index(1) == 1
assert bounding_box._get_index(np.int32(0)) == 0
assert bounding_box._get_index(np.int32(1)) == 1
assert bounding_box._get_index(np.int64(0)) == 0
assert bounding_box._get_index(np.int64(1)) == 1
# Pass invalid index
MESSAGE = r"Integer key: .* must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(2)
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(np.int32(2))
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(np.int64(2))
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(-1)
# Pass invalid key
MESSAGE = r"Key value: .* must be string or integer"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_index(mk.MagicMock())
def test__get_name(self):
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = self.BoundingDomain(model)
index = mk.MagicMock()
name = mk.MagicMock()
model.inputs = mk.MagicMock()
model.inputs.__getitem__.return_value = name
assert bounding_box._get_name(index) == name
assert model.inputs.__getitem__.call_args_list == [mk.call(index)]
def test_ignored_inputs(self):
model = mk.MagicMock()
ignored = list(range(4, 8))
model.n_inputs = 8
model.inputs = [mk.MagicMock() for _ in range(8)]
bounding_box = self.BoundingDomain(model, ignored=ignored)
inputs = bounding_box.ignored_inputs
assert isinstance(inputs, list)
for index, _input in enumerate(inputs):
assert _input in model.inputs
assert model.inputs[index + 4] == _input
for index, _input in enumerate(model.inputs):
if _input in inputs:
assert inputs[index - 4] == _input
else:
assert index < 4
def test__validate_ignored(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass
assert bounding_box._validate_ignored(None) == []
assert bounding_box._validate_ignored(["x", "y"]) == [0, 1]
assert bounding_box._validate_ignored([0, 1]) == [0, 1]
assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]
# Fail
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
bounding_box._validate_ignored([mk.MagicMock()])
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
bounding_box._validate_ignored(["z"])
MESSAGE = r"Integer key: 3 must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([3])
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([np.int32(3)])
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([np.int64(3)])
def test___call__(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
MESSAGE = (
r"This bounding box is fixed by the model and does not have adjustable"
r" parameters"
)
with pytest.raises(RuntimeError, match=MESSAGE):
bounding_box(*args, **kwargs)
def test_fix_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
model = mk.MagicMock()
fixed_inputs = mk.MagicMock()
with pytest.raises(
NotImplementedError, match=r"This should be implemented by a child class"
):
bounding_box.fix_inputs(model, fixed_inputs)
def test__prepare_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
with pytest.raises(
NotImplementedError,
match=r"This has not been implemented for BoundingDomain",
):
bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())
def test__base_ouput(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Simple shape
input_shape = (13,)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
# Complex shape
input_shape = (13, 7)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
def test__all_out_output(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
# Simple shape
model.n_outputs = 1
input_shape = (13,)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (1, 13)
assert output_unit is None
# Complex shape
model.n_outputs = 6
input_shape = (13, 7)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (6, 13, 7)
assert output_unit is None
def test__modify_output(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
# Simple shape
with mk.patch.object(
_BoundingDomain,
"_base_output",
autospec=True,
return_value=np.asanyarray(0),
) as mkBase:
assert (
np.array([1, 2, 3])
== bounding_box._modify_output(
[1, 2, 3], valid_index, input_shape, fill_value
)
).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
# Replacement
with mk.patch.object(
_BoundingDomain,
"_base_output",
autospec=True,
return_value=np.array([1, 2, 3, 4, 5, 6]),
) as mkBase:
assert (
np.array([7, 2, 8, 4, 9, 6])
== bounding_box._modify_output(
[7, 8, 9], np.array([[0, 2, 4]]), input_shape, fill_value
)
).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
def test__prepare_outputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
valid_outputs = [mk.MagicMock() for _ in range(3)]
effects = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(
_BoundingDomain, "_modify_output", autospec=True, side_effect=effects
) as mkModify:
assert effects == bounding_box._prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkModify.call_args_list == [
mk.call(
bounding_box,
valid_outputs[idx],
valid_index,
input_shape,
fill_value,
)
for idx in range(3)
]
def test_prepare_outputs(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
valid_outputs = mk.MagicMock()
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with mk.patch.object(
_BoundingDomain, "_prepare_outputs", autospec=True
) as mkPrepare:
# Reshape valid_outputs
model.n_outputs = 1
assert mkPrepare.return_value == bounding_box.prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box, [valid_outputs], valid_index, input_shape, fill_value
)
]
mkPrepare.reset_mock()
# No reshape valid_outputs
model.n_outputs = 2
assert mkPrepare.return_value == bounding_box.prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box, valid_outputs, valid_index, input_shape, fill_value
)
]
def test__get_valid_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Don't get unit
assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None
# Get unit from unitless
assert bounding_box._get_valid_outputs_unit(7, True) is None
# Get unit
assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m
def test__evaluate_model(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
valid_inputs = mk.MagicMock()
input_shape = mk.MagicMock()
valid_index = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
with mk.patch.object(
_BoundingDomain, "_get_valid_outputs_unit", autospec=True
) as mkGet:
with mk.patch.object(
_BoundingDomain, "prepare_outputs", autospec=True
) as mkPrepare:
assert bounding_box._evaluate_model(
evaluate,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units,
) == (mkPrepare.return_value, mkGet.return_value)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box,
evaluate.return_value,
valid_index,
input_shape,
fill_value,
)
]
assert mkGet.call_args_list == [
mk.call(evaluate.return_value, with_units)
]
assert evaluate.call_args_list == [mk.call(valid_inputs)]
def test__evaluate(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
valid_inputs = mk.MagicMock()
valid_index = mk.MagicMock()
effects = [
(valid_inputs, valid_index, True),
(valid_inputs, valid_index, False),
]
with mk.patch.object(
self.BoundingDomain, "prepare_inputs", autospec=True, side_effect=effects
) as mkPrepare:
with mk.patch.object(
_BoundingDomain, "_all_out_output", autospec=True
) as mkAll:
with mk.patch.object(
_BoundingDomain, "_evaluate_model", autospec=True
) as mkEvaluate:
# all_out
assert (
bounding_box._evaluate(
evaluate, inputs, input_shape, fill_value, with_units
)
== mkAll.return_value
)
assert mkAll.call_args_list == [
mk.call(bounding_box, input_shape, fill_value)
]
assert mkEvaluate.call_args_list == []
assert mkPrepare.call_args_list == [
mk.call(bounding_box, input_shape, inputs)
]
mkAll.reset_mock()
mkPrepare.reset_mock()
# not all_out
assert (
bounding_box._evaluate(
evaluate, inputs, input_shape, fill_value, with_units
)
== mkEvaluate.return_value
)
assert mkAll.call_args_list == []
assert mkEvaluate.call_args_list == [
mk.call(
bounding_box,
evaluate,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units,
)
]
assert mkPrepare.call_args_list == [
mk.call(bounding_box, input_shape, inputs)
]
def test__set_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# set no unit
assert bounding_box._set_outputs_unit(27, None) == 27
# set unit
assert bounding_box._set_outputs_unit(27, u.m) == 27 * u.m
def test_evaluate(self):
bounding_box = self.BoundingDomain(Gaussian2D())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
fill_value = mk.MagicMock()
outputs = mk.MagicMock()
valid_outputs_unit = mk.MagicMock()
value = (outputs, valid_outputs_unit)
with mk.patch.object(
_BoundingDomain, "_evaluate", autospec=True, return_value=value
) as mkEvaluate:
with mk.patch.object(
_BoundingDomain, "_set_outputs_unit", autospec=True
) as mkSet:
with mk.patch.object(Model, "input_shape", autospec=True) as mkShape:
with mk.patch.object(
Model, "bbox_with_units", new_callable=mk.PropertyMock
) as mkUnits:
assert tuple(mkSet.return_value) == bounding_box.evaluate(
evaluate, inputs, fill_value
)
assert mkSet.call_args_list == [
mk.call(outputs, valid_outputs_unit)
]
assert mkEvaluate.call_args_list == [
mk.call(
bounding_box,
evaluate,
inputs,
mkShape.return_value,
fill_value,
mkUnits.return_value,
)
]
assert mkShape.call_args_list == [
mk.call(bounding_box._model, inputs)
]
assert mkUnits.call_args_list == [mk.call()]
class TestModelBoundingBox:
def test_create(self):
intervals = ()
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "C"
# Set optional
intervals = {}
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model, order="F")
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "F"
# Set interval
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
# Set ignored
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 2
model.inputs = ["x", "y"]
bounding_box = ModelBoundingBox(intervals, model, ignored=[1])
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
assert bounding_box._ignored == [1]
intervals = ((1, 2), (3, 4))
model = mk.MagicMock()
model.n_inputs = 3
model.inputs = ["x", "y", "z"]
bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order="F")
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)}
assert bounding_box._model == model
assert bounding_box._ignored == [2]
assert bounding_box._order == "F"
def test_copy(self):
bounding_box = ModelBoundingBox.validate(
Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4))
)
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
assert bounding_box.ignored == copy.ignored
assert id(bounding_box.ignored) != id(copy.ignored)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
# Check interval objects
for index, interval in bounding_box.intervals.items():
assert interval == copy.intervals[index]
assert id(interval) != id(copy.intervals[index])
# Same float values have will have same id
assert interval.lower == copy.intervals[index].lower
assert id(interval.lower) == id(copy.intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.intervals[index].upper
assert id(interval.upper) == id(copy.intervals[index].upper)
assert len(bounding_box.intervals) == len(copy.intervals)
assert bounding_box.intervals.keys() == copy.intervals.keys()
def test_intervals(self):
intervals = {0: _Interval(1, 2)}
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = ModelBoundingBox(intervals, model)
assert bounding_box._intervals == intervals
assert bounding_box.intervals == intervals
def test_named_intervals(self):
intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)}
model = mk.MagicMock()
model.n_inputs = 4
model.inputs = [mk.MagicMock() for _ in range(4)]
bounding_box = ModelBoundingBox(intervals, model)
named = bounding_box.named_intervals
assert isinstance(named, dict)
for name, interval in named.items():
assert name in model.inputs
assert intervals[model.inputs.index(name)] == interval
for index, name in enumerate(model.inputs):
assert index in intervals
assert name in named
assert intervals[index] == named[name]
def test___repr__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert (
bounding_box.__repr__() == "ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" y: Interval(lower=-4, upper=4)\n"
" }\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
intervals = {0: _Interval(-1, 1)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=["y"])
assert (
bounding_box.__repr__() == "ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['y']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
def test___len__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert len(bounding_box) == 0 == len(bounding_box._intervals)
def test___contains__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Contains with keys
assert "x" in bounding_box
assert "y" in bounding_box
assert "z" not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
# General not in
assert mk.MagicMock() not in bounding_box
# Contains with ignored
del bounding_box["y"]
# Contains with keys
assert "x" in bounding_box
assert "y" in bounding_box
assert "z" not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
def test___getitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Get using input key
assert bounding_box["x"] == (-1, 1)
assert bounding_box["y"] == (-4, 4)
# Fail with input key
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
bounding_box["z"]
# Get using index
assert bounding_box[0] == (-1, 1)
assert bounding_box[1] == (-4, 4)
assert bounding_box[np.int32(0)] == (-1, 1)
assert bounding_box[np.int32(1)] == (-4, 4)
assert bounding_box[np.int64(0)] == (-1, 1)
assert bounding_box[np.int64(1)] == (-4, 4)
# Fail with index
MESSAGE = r"Integer key: 2 must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[2]
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[np.int32(2)]
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[np.int64(2)]
# get ignored interval
del bounding_box[0]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == (-4, 4)
del bounding_box[1]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == _ignored_interval
def test_bounding_box(self):
# 0D
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=["x"])
assert bounding_box.bounding_box() == (-np.inf, np.inf)
assert bounding_box.bounding_box("C") == (-np.inf, np.inf)
assert bounding_box.bounding_box("F") == (-np.inf, np.inf)
# 1D
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == (-1, 1)
assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1)
# > 1D
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box("C") == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box("F") == ((-1, 1), (-4, 4))
def test___eq__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == bounding_box
assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == (-1, 1)
assert not (bounding_box == mk.MagicMock())
assert not (bounding_box == (-2, 2))
assert not (
bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)})
)
# Respect ordering
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box_1 = ModelBoundingBox.validate(model, intervals)
bounding_box_2 = ModelBoundingBox.validate(model, intervals, order="F")
assert bounding_box_1._order == "C"
assert bounding_box_1 == ((-4, 4), (-1, 1))
assert not (bounding_box_1 == ((-1, 1), (-4, 4)))
assert bounding_box_2._order == "F"
assert not (bounding_box_2 == ((-4, 4), (-1, 1)))
assert bounding_box_2 == ((-1, 1), (-4, 4))
assert bounding_box_1 == bounding_box_2
# Respect ignored
model = Gaussian2D()
bounding_box_1._ignored = [mk.MagicMock()]
bounding_box_2._ignored = [mk.MagicMock()]
assert bounding_box_1._ignored != bounding_box_2._ignored
assert not (bounding_box_1 == bounding_box_2)
def test__setitem__(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1])
assert bounding_box._ignored == [0, 1]
# USING Intervals directly
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box["x"] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box["x"], _Interval)
assert bounding_box["x"] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box["y"] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box["y"], _Interval)
assert bounding_box["y"] == (-4, 4)
del bounding_box["x"]
del bounding_box["y"]
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
del bounding_box[0]
del bounding_box[1]
# USING tuples
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box["x"] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box["x"], _Interval)
assert bounding_box["x"] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box["y"] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box["y"], _Interval)
assert bounding_box["y"] == (-4, 4)
del bounding_box["x"]
del bounding_box["y"]
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# USING Intervals directly
# Set interval using key
assert "x" not in bounding_box
bounding_box["x"] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert "x" in bounding_box
assert isinstance(bounding_box["x"], _Interval)
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
# USING tuples
# Set interval using key
bounding_box._intervals = {}
assert "x" not in bounding_box
bounding_box["x"] = (np.array([-1, -2]), np.array([1, 2]))
assert "x" in bounding_box
assert isinstance(bounding_box["x"], _Interval)
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = (np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test___delitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Using index
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert 0 in bounding_box
assert "x" in bounding_box
del bounding_box[0]
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
assert 0 in bounding_box
assert "x" in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError, match=r"Cannot delete ignored input: 0!"):
del bounding_box[0]
# Using key
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert 0 in bounding_box
assert "y" in bounding_box
del bounding_box["y"]
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
assert 0 in bounding_box
assert "y" in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError, match=r"Cannot delete ignored input: y!"):
del bounding_box["y"]
def test__validate_dict(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Input name keys
intervals = {"x": _Interval(-1, 1), "y": _Interval(-4, 4)}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_dict(intervals)
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Input index
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# name keys
intervals = {"x": _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert "x" not in bounding_box
bounding_box._validate_dict(intervals)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# input index
bounding_box._intervals = {}
intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 0 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test__validate_sequence(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# C order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order="C")
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Fortran order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Invalid order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=mk.MagicMock())
assert "x" not in bounding_box
assert "y" not in bounding_box
assert len(bounding_box.intervals) == 0
def test__n_inputs(self):
model = Gaussian2D()
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box._n_inputs == 2
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=["y"])
assert bounding_box._n_inputs == 1
bounding_box = ModelBoundingBox.validate(model, {}, ignored=["x", "y"])
assert bounding_box._n_inputs == 0
bounding_box._ignored = ["x", "y", "z"]
assert bounding_box._n_inputs == 0
def test__validate_iterable(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass with ignored
bounding_box._intervals = {}
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1)}
assert 0 not in bounding_box.intervals
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
# Invalid iterable
MESSAGE = "Found {} intervals, but must have exactly {}"
bounding_box._intervals = {}
bounding_box._ignored = []
assert "x" not in bounding_box
assert "y" not in bounding_box
with pytest.raises(ValueError, match=MESSAGE.format(3, 2)):
bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3)))
assert len(bounding_box.intervals) == 0
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
with pytest.raises(ValueError, match=MESSAGE.format(2, 1)):
bounding_box._validate_iterable(intervals)
assert len(bounding_box.intervals) == 0
bounding_box._ignored = []
intervals = {0: _Interval(-1, 1)}
with pytest.raises(ValueError, match=MESSAGE.format(1, 2)):
bounding_box._validate_iterable(intervals)
assert "x" not in bounding_box
assert "y" not in bounding_box
assert len(bounding_box.intervals) == 0
def test__validate(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass single with ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox({}, model, ignored=[1])
assert 0 not in bounding_box.intervals
assert 1 not in bounding_box.intervals
bounding_box._validate(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert len(bounding_box.intervals) == 1
# Pass single
model = Gaussian1D()
bounding_box = ModelBoundingBox({}, model)
assert "x" not in bounding_box
bounding_box._validate((-1, 1))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
sequence = (np.array([-1, -2]), np.array([1, 2]))
assert "x" not in bounding_box
bounding_box._validate(sequence)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
def test_validate(self):
model = Gaussian2D()
kwargs = {"test": mk.MagicMock()}
# Pass sequence Default order
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box = ModelBoundingBox.validate(
model, ((-4, 4), (-1, 1)), order="F", **kwargs
)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals, order="F", **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == "F"
# Pass ModelBoundingBox
bbox = bounding_box
bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == "F"
# Pass single ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(
model, intervals, ignored=["y"], **kwargs
)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == _ignored_interval
assert len(bounding_box.intervals) == 1
# Pass single
bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs)
assert (bounding_box._model.parameters == Gaussian1D().parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
sequence = (np.array([-1, -2]), np.array([1, 2]))
bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
def test_fix_inputs(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1)))
# keep_ignored = False (default)
new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()})
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all()
assert "x" in new_bounding_box
assert new_bounding_box["x"] == (-1, 1)
assert "y" not in new_bounding_box
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == []
# keep_ignored = True
new_bounding_box = bounding_box.fix_inputs(
Gaussian2D(), {1: mk.MagicMock()}, _keep_ignored=True
)
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all()
assert "x" in new_bounding_box
assert new_bounding_box["x"] == (-1, 1)
assert "y" in new_bounding_box
assert "y" in new_bounding_box.ignored_inputs
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == [1]
def test_dimension(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert bounding_box.dimension == 0 == len(bounding_box._intervals)
def test_domain(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# test defaults
assert (
np.array(bounding_box.domain(0.25))
== np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])
).all()
# test C order
assert (
np.array(bounding_box.domain(0.25, "C"))
== np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])
).all()
# test Fortran order
assert (
np.array(bounding_box.domain(0.25, "F"))
== np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)])
).all()
# test error order
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box.domain(0.25, mk.MagicMock())
def test__outside(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False for _ in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
# fmt: off
assert (
outside_index
== [
True, True, True, True,
False, False, False, False, False,
True, True, True, True
]
).all()
# fmt: on
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True for _ in range(13)]).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True]).all()
assert all_out and isinstance(all_out, bool)
def test__valid_index(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_prepare_inputs(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (np.array(new_inputs) == np.array(inputs)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (
np.array(new_inputs)
== np.array(
[
[x[4], x[5], x[6], x[7], x[8]],
[y[4], y[5], y[6], y[7], y[8]],
]
)
).all()
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all()
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_bounding_box_ignore(self):
"""Regression test for #13028"""
bbox_x = ModelBoundingBox((9, 10), Polynomial2D(1), ignored=["x"])
assert bbox_x.ignored_inputs == ["x"]
bbox_y = ModelBoundingBox((11, 12), Polynomial2D(1), ignored=["y"])
assert bbox_y.ignored_inputs == ["y"]
class Test_SelectorArgument:
def test_create(self):
index = mk.MagicMock()
ignore = mk.MagicMock()
argument = _SelectorArgument(index, ignore)
assert isinstance(argument, _BaseSelectorArgument)
assert argument.index == index
assert argument.ignore == ignore
assert argument == (index, ignore)
def test_validate(self):
model = Gaussian2D()
# default integer
assert _SelectorArgument.validate(model, 0) == (0, True)
assert _SelectorArgument.validate(model, 1) == (1, True)
# default string
assert _SelectorArgument.validate(model, "x") == (0, True)
assert _SelectorArgument.validate(model, "y") == (1, True)
ignore = mk.MagicMock()
# non-default integer
assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)
# non-default string
assert _SelectorArgument.validate(model, "x", ignore) == (0, ignore)
assert _SelectorArgument.validate(model, "y", ignore) == (1, ignore)
# Fail
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
_SelectorArgument.validate(model, "z")
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer."
):
_SelectorArgument.validate(model, mk.MagicMock())
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
_SelectorArgument.validate(model, 2)
def test_get_selector(self):
# single inputs
inputs = [idx + 17 for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
# numpy array of single inputs
inputs = [np.array([idx + 11]) for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
inputs = [np.asanyarray(idx + 13) for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
# multi entry numpy array
inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(
*inputs
) == tuple(inputs[index])
def test_name(self):
model = Gaussian2D()
for index in range(model.n_inputs):
assert (
_SelectorArgument(index, mk.MagicMock()).name(model)
== model.inputs[index]
)
def test_pretty_repr(self):
model = Gaussian2D()
assert (
_SelectorArgument(0, False).pretty_repr(model)
== "Argument(name='x', ignore=False)"
)
assert (
_SelectorArgument(0, True).pretty_repr(model)
== "Argument(name='x', ignore=True)"
)
assert (
_SelectorArgument(1, False).pretty_repr(model)
== "Argument(name='y', ignore=False)"
)
assert (
_SelectorArgument(1, True).pretty_repr(model)
== "Argument(name='y', ignore=True)"
)
def test_get_fixed_value(self):
model = Gaussian2D()
values = {0: 5, "y": 7}
# Get index value
assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5
# Get name value
assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7
# Fail
MESSAGE = r".* was not found in .*"
with pytest.raises(RuntimeError, match=MESSAGE) as err:
_SelectorArgument(1, True).get_fixed_value(model, {0: 5})
def test_is_argument(self):
model = Gaussian2D()
argument = _SelectorArgument.validate(model, 0)
# Is true
assert argument.is_argument(model, 0) is True
assert argument.is_argument(model, "x") is True
# Is false
assert argument.is_argument(model, 1) is False
assert argument.is_argument(model, "y") is False
# Fail
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
argument.is_argument(model, "z")
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
argument.is_argument(model, mk.MagicMock())
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
argument.is_argument(model, 2)
def test_named_tuple(self):
model = Gaussian2D()
for index in range(model.n_inputs):
ignore = mk.MagicMock()
assert _SelectorArgument(index, ignore).named_tuple(model) == (
model.inputs[index],
ignore,
)
class Test_SelectorArguments:
def test_create(self):
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == []
kept_ignore = mk.MagicMock()
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False)), kept_ignore
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == kept_ignore
def test_pretty_repr(self):
model = Gaussian2D()
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
)
assert (
arguments.pretty_repr(model) == "SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" Argument(name='y', ignore=False)\n"
")"
)
def test_ignore(self):
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, True))
).ignore == [0, 1]
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, True)), [13, 4]
).ignore == [0, 1, 13, 4]
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
).ignore == [0]
assert _SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, True))
).ignore == [1]
assert (
_SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, False))
).ignore
== []
)
assert _SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, False)), [17, 14]
).ignore == [17, 14]
def test_validate(self):
# Integer key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Default ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, True))
assert arguments.kept_ignore == []
# String key and passed ignore
arguments = _SelectorArguments.validate(
Gaussian2D(), (("x", True), ("y", False))
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Test kept_ignore option
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8])
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [11, 5, 8]
arguments._kept_ignore = [13, 17, 14]
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [13, 17, 14]
# Invalid, bad argument
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
_SelectorArguments.validate(Gaussian2D(), ((0, True), ("z", False)))
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
_SelectorArguments.validate(
Gaussian2D(), ((mk.MagicMock(), True), (1, False))
)
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False)))
# Invalid, repeated argument
with pytest.raises(ValueError, match=r"Input: 'x' has been repeated"):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False)))
# Invalid, no arguments
with pytest.raises(
ValueError, match=r"There must be at least one selector argument"
):
_SelectorArguments.validate(Gaussian2D(), ())
def test_get_selector(self):
inputs = [idx + 19 for idx in range(4)]
assert _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).get_selector(*inputs) == tuple(inputs[:2])
assert _SelectorArguments.validate(
Gaussian2D(), ((1, True), (0, False))
).get_selector(*inputs) == tuple(inputs[:2][::-1])
assert _SelectorArguments.validate(Gaussian2D(), ((1, False),)).get_selector(
*inputs
) == (inputs[1],)
assert _SelectorArguments.validate(Gaussian2D(), ((0, True),)).get_selector(
*inputs
) == (inputs[0],)
def test_is_selector(self):
# Is Selector
assert _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5, 2.5))
assert _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
(0.5,)
)
# Is not selector
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5, 2.5, 3.5))
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5,))
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector(0.5)
assert not _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
(0.5, 2.5)
)
assert not _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
2.5
)
def test_get_fixed_values(self):
model = Gaussian2D()
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {0: 11, 1: 7}) == (11, 7)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {0: 5, "y": 47}) == (5, 47)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {"x": 2, "y": 9}) == (2, 9)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {"x": 12, 1: 19}) == (12, 19)
def test_is_argument(self):
model = Gaussian2D()
# Is true
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, "x") is True
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, "y") is True
# Is true and false
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, "x") is True
assert arguments.is_argument(model, 1) is False
assert arguments.is_argument(model, "y") is False
arguments = _SelectorArguments.validate(model, ((1, False),))
assert arguments.is_argument(model, 0) is False
assert arguments.is_argument(model, "x") is False
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, "y") is True
def test_selector_index(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.selector_index(model, 0) == 0
assert arguments.selector_index(model, "x") == 0
assert arguments.selector_index(model, 1) == 1
assert arguments.selector_index(model, "y") == 1
arguments = _SelectorArguments.validate(model, ((1, True), (0, False)))
assert arguments.selector_index(model, 0) == 1
assert arguments.selector_index(model, "x") == 1
assert arguments.selector_index(model, 1) == 0
assert arguments.selector_index(model, "y") == 0
# Error
arguments = _SelectorArguments.validate(model, ((0, True),))
with pytest.raises(
ValueError, match=r"y does not correspond to any selector argument"
):
arguments.selector_index(model, "y")
def test_add_ignore(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments == ((0, True),)
assert arguments._kept_ignore == []
new_arguments0 = arguments.add_ignore(model, 1)
assert new_arguments0 == arguments
assert new_arguments0._kept_ignore == [1]
assert arguments._kept_ignore == []
assert arguments._kept_ignore == []
new_arguments1 = new_arguments0.add_ignore(model, "y")
assert new_arguments1 == arguments == new_arguments0
assert new_arguments0._kept_ignore == [1]
assert new_arguments1._kept_ignore == [1, 1]
assert arguments._kept_ignore == []
# Error
with pytest.raises(
ValueError, match=r"0: is a selector argument and cannot be ignored"
):
arguments.add_ignore(model, 0)
def test_reduce(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
new_arguments = arguments.reduce(model, 0)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, "x")
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 1)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, "y")
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
def test_named_tuple(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.named_tuple(model) == (("x", True), ("y", False))
class TestCompoundBoundingBox:
def test_create(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox(
bounding_boxes, model, selector_args, create_selector, order="F"
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "F"
def test_copy(self):
bounding_box = CompoundBoundingBox.validate(
Gaussian2D(),
{(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)},
((0, True),),
mk.MagicMock(),
)
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
assert bounding_box._create_selector == copy._create_selector
assert id(bounding_box._create_selector) != id(copy._create_selector)
# Check selector_args
for index, argument in enumerate(bounding_box.selector_args):
assert argument == copy.selector_args[index]
assert id(argument) != id(copy.selector_args[index])
# Same integer values have will have same id
assert argument.index == copy.selector_args[index].index
assert id(argument.index) == id(copy.selector_args[index].index)
# Same boolean values have will have same id
assert argument.ignore == copy.selector_args[index].ignore
assert id(argument.ignore) == id(copy.selector_args[index].ignore)
assert len(bounding_box.selector_args) == len(copy.selector_args)
# Check bounding_boxes
for selector, bbox in bounding_box.bounding_boxes.items():
assert bbox == copy.bounding_boxes[selector]
assert id(bbox) != id(copy.bounding_boxes[selector])
assert bbox.ignored == copy.bounding_boxes[selector].ignored
assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored)
# model is not copied to prevent infinite recursion
assert bbox._model == copy.bounding_boxes[selector]._model
assert id(bbox._model) == id(copy.bounding_boxes[selector]._model)
# Same string values have will have same id
assert bbox._order == copy.bounding_boxes[selector]._order
assert id(bbox._order) == id(copy.bounding_boxes[selector]._order)
# Check interval objects
for index, interval in bbox.intervals.items():
assert interval == copy.bounding_boxes[selector].intervals[index]
assert id(interval) != id(
copy.bounding_boxes[selector].intervals[index]
)
# Same float values have will have same id
assert (
interval.lower
== copy.bounding_boxes[selector].intervals[index].lower
)
assert id(interval.lower) == id(
copy.bounding_boxes[selector].intervals[index].lower
)
# Same float values have will have same id
assert (
interval.upper
== copy.bounding_boxes[selector].intervals[index].upper
)
assert id(interval.upper) == id(
copy.bounding_boxes[selector].intervals[index].upper
)
assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals)
assert (
bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys()
)
assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes)
assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys()
def test___repr__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (
bounding_box.__repr__() == "CompoundBoundingBox(\n"
" bounding_boxes={\n"
" (1,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" (2,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-2, upper=2)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" }\n"
" selector_args = SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" )\n"
")"
)
def test_bounding_boxes(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box.bounding_boxes == bounding_boxes
def test_selector_args(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_box = CompoundBoundingBox({}, model, selector_args)
# Get
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
# Set
selector_args = ((1, False),)
with pytest.warns(RuntimeWarning, match=r"Overriding selector_args.*"):
bounding_box.selector_args = selector_args
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
def test_create_selector(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector)
assert bounding_box._create_selector == create_selector
assert bounding_box.create_selector == create_selector
def test__get_selector_key(self):
bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),))
assert len(bounding_box.bounding_boxes) == 0
# Singular
assert bounding_box._get_selector_key(5) == (5,)
assert bounding_box._get_selector_key((5,)) == (5,)
assert bounding_box._get_selector_key([5]) == (5,)
assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,)
assert bounding_box._get_selector_key(np.array([5])) == (5,)
# multiple
assert bounding_box._get_selector_key((5, 19)) == (5, 19)
assert bounding_box._get_selector_key([5, 19]) == (5, 19)
assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19)
def test___setitem__(self):
model = Gaussian2D()
# Ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, True),), order="F")
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15,)] = (-15, 15)
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == (-15, 15)
assert bounding_box._bounding_boxes[(15,)].order == "F"
# Invalid key
assert (7, 13) not in bounding_box._bounding_boxes
with pytest.raises(ValueError, match=".* is not a selector!"):
bounding_box[(7, 13)] = (-7, 7)
assert (7, 13) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box[(13,)] = ((-13, 13), (-3, 3))
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# No ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, False),), order="F")
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15,)] = ((-15, 15), (-6, 6))
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6))
assert bounding_box._bounding_boxes[(15,)].order == "F"
# Invalid key
assert (14, 11) not in bounding_box._bounding_boxes
with pytest.raises(ValueError, match=".* is not a selector!"):
bounding_box[(14, 11)] = ((-7, 7), (-12, 12))
assert (14, 11) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box[(13,)] = (-13, 13)
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
def test__validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
# Tuple selector_args
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox({}, model, selector_args)
bounding_box._validate(bounding_boxes)
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
def test___eq__(self):
bounding_box_1 = CompoundBoundingBox(
{(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),)
)
bounding_box_2 = CompoundBoundingBox(
{(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),)
)
# Equal
assert bounding_box_1 == bounding_box_2
# Not equal to non-compound bounding_box
assert not bounding_box_1 == mk.MagicMock()
assert not bounding_box_2 == mk.MagicMock()
# Not equal bounding_boxes
bounding_box_2[(15,)] = (-15, 15)
assert not bounding_box_1 == bounding_box_2
del bounding_box_2._bounding_boxes[(15,)]
assert bounding_box_1 == bounding_box_2
# Not equal selector_args
bounding_box_2._selector_args = _SelectorArguments.validate(
Gaussian2D(), ((0, False),)
)
assert not bounding_box_1 == bounding_box_2
bounding_box_2._selector_args = _SelectorArguments.validate(
Gaussian2D(), ((0, True),)
)
assert bounding_box_1 == bounding_box_2
# Not equal create_selector
bounding_box_2._create_selector = mk.MagicMock()
assert not bounding_box_1 == bounding_box_2
def test_validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
# Fail selector_args
MESSAGE = r"Selector arguments must be provided .*"
with pytest.raises(ValueError, match=MESSAGE):
CompoundBoundingBox.validate(model, bounding_boxes)
# Normal validate
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args, create_selector, order="F"
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "F"
# Re-validate
new_bounding_box = CompoundBoundingBox.validate(model, bounding_box)
assert bounding_box == new_bounding_box
assert new_bounding_box._order == "F"
# Default order
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args, create_selector
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "C"
def test___contains__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (1,) in bounding_box
assert (2,) in bounding_box
assert (3,) not in bounding_box
assert 1 not in bounding_box
assert 2 not in bounding_box
def test__create_bounding_box(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1, False),), create_selector)
# Create is successful
create_selector.return_value = ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 0
bbox = bounding_box._create_bounding_box((7,))
assert isinstance(bbox, ModelBoundingBox)
assert bbox == ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 1
assert (7,) in bounding_box
assert isinstance(bounding_box[(7,)], ModelBoundingBox)
assert bounding_box[(7,)] == bbox
# Create is unsuccessful
create_selector.return_value = (-42, 42)
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box._create_bounding_box((27,))
def test___getitem__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
# already exists
assert isinstance(bounding_box[1], ModelBoundingBox)
assert bounding_box[1] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[2] == (-2, 2)
assert isinstance(bounding_box[(1,)], ModelBoundingBox)
assert bounding_box[(1,)] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[(2,)] == (-2, 2)
# no selector
with pytest.raises(
RuntimeError, match="No bounding box is defined for selector: .*"
):
bounding_box[(3,)]
# Create a selector
bounding_box._create_selector = mk.MagicMock()
with mk.patch.object(
CompoundBoundingBox, "_create_bounding_box", autospec=True
) as mkCreate:
assert bounding_box[(3,)] == mkCreate.return_value
assert mkCreate.call_args_list == [mk.call(bounding_box, (3,))]
def test__select_bounding_box(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
inputs = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(
_SelectorArguments, "get_selector", autospec=True
) as mkSelector:
with mk.patch.object(
CompoundBoundingBox, "__getitem__", autospec=True
) as mkGet:
assert bounding_box._select_bounding_box(inputs) == mkGet.return_value
assert mkGet.call_args_list == [
mk.call(bounding_box, mkSelector.return_value)
]
assert mkSelector.call_args_list == [
mk.call(bounding_box.selector_args, *inputs)
]
def test_prepare_inputs(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
input_shape = mk.MagicMock()
with mk.patch.object(
ModelBoundingBox, "prepare_inputs", autospec=True
) as mkPrepare:
assert (
bounding_box.prepare_inputs(input_shape, [1, 2, 3])
== mkPrepare.return_value
)
assert mkPrepare.call_args_list == [
mk.call(bounding_box[(1,)], input_shape, [1, 2, 3])
]
mkPrepare.reset_mock()
assert (
bounding_box.prepare_inputs(input_shape, [2, 2, 3])
== mkPrepare.return_value
)
assert mkPrepare.call_args_list == [
mk.call(bounding_box[(2,)], input_shape, [2, 2, 3])
]
mkPrepare.reset_mock()
def test__matching_bounding_boxes(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes("x", value)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes("x", value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
matching = bounding_box._matching_bounding_boxes("y", value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "y" in bbox
assert "y" in bbox.ignored_inputs
assert "x" in bbox
assert bbox["x"] == (-(5 - value), (5 - value))
assert len(bbox.intervals) == 1
assert bbox.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
matching = bounding_box._matching_bounding_boxes("slit_id", 0)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
matching = bounding_box._matching_bounding_boxes("slit_id", 1)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 3047.5), "y": (-0.5, 4047.5)}
assert bbox.order == "F"
# Errors
MESSAGE = (
r"Attempting to fix input .*, but there are no bounding boxes for argument"
r" value .*"
)
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._matching_bounding_boxes("slit_id", 2)
def test__fix_input_selector_arg(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg("x", value)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg("x", value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((1, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox_selector
assert "x" in bbox_selector.ignored_inputs
assert "y" in bbox_selector
assert bbox_selector["y"] == (-value, value)
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [0]
bbox = bounding_box._fix_input_selector_arg("y", value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((0, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert "y" in bbox_selector
assert "y" in bbox_selector.ignored_inputs
assert "x" in bbox_selector
assert bbox_selector["x"] == (-(5 - value), (5 - value))
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
bbox = bounding_box._fix_input_selector_arg("slit_id", 0)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
bbox = bounding_box._fix_input_selector_arg("slit_id", 1)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 3047.5), "y": (-0.5, 4047.5)}
assert bbox.order == "F"
def test__fix_input_bbox_arg(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
bbox = bounding_box._fix_input_bbox_arg("x", 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [0]
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert len(bbox._bounding_boxes) == 2
bbox = bounding_box._fix_input_bbox_arg("y", 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [1]
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert len(bbox._bounding_boxes) == 2
def test_fix_inputs(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
model.bounding_box = bounding_box
# Fix selector argument
new_model = fix_inputs(model, {"slit_id": 0})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
# Fix a bounding_box field
new_model = fix_inputs(model, {"x": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
new_model = fix_inputs(model, {"y": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
# Fix selector argument and a bounding_box field
new_model = fix_inputs(model, {"slit_id": 0, "x": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"y": (-0.5, 2047.5)}
assert bbox.order == "F"
new_model = fix_inputs(model, {"y": 5, "slit_id": 1})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"x": (-0.5, 3047.5)}
assert bbox.order == "F"
# Fix two bounding_box fields
new_model = fix_inputs(model, {"x": 5, "y": 7})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert bbox.selector_args == ((0, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
def test_complex_compound_bounding_box(self):
model = Identity(4)
bounding_boxes = {
(2.5, 1.3): ((-1, 1), (-3, 3)),
(2.5, 2.71): ((-3, 3), (-1, 1)),
}
selector_args = (("x0", True), ("x1", True))
bbox = CompoundBoundingBox.validate(model, bounding_boxes, selector_args)
assert bbox[(2.5, 1.3)] == ModelBoundingBox(
((-1, 1), (-3, 3)), model, ignored=["x0", "x1"]
)
assert bbox[(2.5, 2.71)] == ModelBoundingBox(
((-3, 3), (-1, 1)), model, ignored=["x0", "x1"]
)
|
0dc5630efd50c1221d98235b9523b91bcd9ae7d6e18c2f9758bee9797c826bf6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import os
import subprocess
import sys
import unittest.mock as mk
from inspect import signature
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy
import astropy.modeling.core as core
import astropy.units as u
from astropy.convolution import convolve_models
from astropy.modeling import models
from astropy.modeling.bounding_box import CompoundBoundingBox, ModelBoundingBox
from astropy.modeling.core import (
SPECIAL_OPERATORS,
CompoundModel,
Model,
_add_special_operator,
bind_bounding_box,
bind_compound_bounding_box,
custom_model,
fix_inputs,
)
from astropy.modeling.parameters import Parameter
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (
str(m) == "Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5"
)
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0))
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
MESSAGE = r"Gaussian1D.__init__.* got an unrecognized parameter 'wrong'"
with pytest.raises(TypeError, match=MESSAGE):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ["self", "args", "meta", "name", "kwargs"]
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ("a", "b")
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ("a", "b")
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ("a",)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "kwargs"]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_model_n_outputs():
"""
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved
"""
@custom_model
def model(x, y, n_outputs=2):
return x + 1, y + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 2
assert m.outputs == ("x0", "x1")
assert (
separability_matrix(m)
== [
[True, True],
[True, True],
]
).all()
@custom_model
def model(x, y, z, n_outputs=3):
return x + 1, y + 1, z + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 3
assert m.outputs == ("x0", "x1", "x2")
assert (
separability_matrix(m)
== [
[True, True, True],
[True, True, True],
[True, True, True],
]
).all()
def test_custom_model_settable_parameters():
"""
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set.
"""
@custom_model
def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.bounding_box == ((1, 2), (3, 4))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
m = model(bounding_box=((5, 6), (7, 8)))
assert m.n_outputs == 2
assert m.bounding_box == ((5, 6), (7, 8))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
@custom_model
def model(x, y, n_outputs=2, outputs=("z0", "z1")):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.outputs == ("z0", "z1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
m = model(outputs=("w0", "w1"))
assert m.n_outputs == 2
assert m.outputs == ("w0", "w1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
def test_custom_model_regected_parameters():
"""
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur).
"""
with pytest.raises(
ValueError, match=r"Parameter 'n_inputs' cannot be a model property: *"
):
@custom_model
def model1(x, y, n_outputs=2, n_inputs=3):
return x + 1, y + 1
with pytest.raises(
ValueError, match=r"Parameter 'uses_quantity' cannot be a model property: *"
):
@custom_model
def model2(x, y, n_outputs=2, uses_quantity=True):
return x + 1, y + 1
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2.0 / 3.0), c1=(1.0 / 3.0))
MESSAGE = (
r"No analytical or user-supplied inverse transform has been implemented for"
r" this model"
)
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ("y",)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, 0.2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
@pytest.mark.filterwarnings("ignore:invalid value encountered in less")
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13.0, y0=10.0, z0=8.0, a=4.0, b=3.0, c=2.0, amp=1.0):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return (
(self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a),
)
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
MESSAGE = (
r"Cannot cast ufunc 'add' output from .* to .* with casting rule 'same_kind"
)
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError, match=MESSAGE):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im)
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D().n_submodels == 1
assert models.Gaussian2D().n_submodels == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._leaflist) != id(new_model._leaflist)
assert id(model[0]) != id(new_model[0])
assert id(model[1]) != id(new_model[1])
assert id(model[2]) != id(new_model[2])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_units_with_bounding_box():
points = np.arange(10, 20)
table = np.arange(10) * u.Angstrom
t = models.Tabular1D(points, lookup_table=table)
assert isinstance(t(10), u.Quantity)
assert isinstance(t(10, with_bounding_box=True), u.Quantity)
assert_quantity_allclose(t(10), t(10, with_bounding_box=True))
RENAMED_MODEL = models.Gaussian1D.rename("CustomGaussian")
MODEL_RENAME_CODE = """
from astropy.modeling.models import Gaussian1D
print(repr(Gaussian1D))
print(repr(Gaussian1D.rename('CustomGaussian')))
""".strip()
MODEL_RENAME_EXPECTED = b"""
<class 'astropy.modeling.functional_models.Gaussian1D'>
Name: Gaussian1D
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
<class '__main__.CustomGaussian'>
Name: CustomGaussian (Gaussian1D)
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
""".strip()
def test_rename_path(tmp_path):
# Regression test for a bug that caused the path to the class to be
# incorrect in a renamed model's __repr__.
assert (
repr(RENAMED_MODEL).splitlines()[0]
== "<class 'astropy.modeling.tests.test_core.CustomGaussian'>"
)
# Make sure that when called from a user script, the class name includes
# __main__.
env = os.environ.copy()
paths = [os.path.dirname(astropy.__path__[0])] + sys.path
env["PYTHONPATH"] = os.pathsep.join(paths)
script = tmp_path / "rename.py"
with open(script, "w") as f:
f.write(MODEL_RENAME_CODE)
output = subprocess.check_output([sys.executable, script], env=env)
assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines()
@pytest.mark.parametrize(
"model_class",
[models.Gaussian1D, models.Polynomial1D, models.Shift, models.Tabular1D],
)
def test_rename_1d(model_class):
new_model = model_class.rename(name="Test1D")
assert new_model.name == "Test1D"
@pytest.mark.parametrize(
"model_class", [models.Gaussian2D, models.Polynomial2D, models.Tabular2D]
)
def test_rename_2d(model_class):
new_model = model_class.rename(name="Test2D")
assert new_model.name == "Test2D"
def test_fix_inputs_integer():
"""
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {1: 22})
assert mf(1) == (1, 22)
mf_int32 = models.fix_inputs(m, {np.int32(1): 33})
assert mf_int32(1) == (1, 33)
mf_int64 = models.fix_inputs(m, {np.int64(1): 44})
assert mf_int64(1) == (1, 44)
def test_fix_inputs_empty_dict():
"""
Tests that empty dictionary can be passed to fix_inputs
Issue #11355
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {})
assert mf(1, 2) == (1, 2)
def test_rename_inputs_outputs():
g2 = models.Gaussian2D(10, 2, 3, 1, 2)
assert g2.inputs == ("x", "y")
assert g2.outputs == ("z",)
MESSAGE = r"Expected .* number of .*, got .*"
with pytest.raises(ValueError, match=MESSAGE):
g2.inputs = ("w",)
with pytest.raises(ValueError, match=MESSAGE):
g2.outputs = ("w", "e")
def test__prepare_output_single_model():
model = models.Gaussian1D()
# No broadcast
assert (
np.array([1, 2]) == model._prepare_output_single_model(np.array([1, 2]), None)
).all()
# Broadcast to scalar
assert model._prepare_output_single_model(np.array([1]), ()) == 1
assert model._prepare_output_single_model(np.asanyarray(2), ()) == 2
# Broadcast reshape
output = np.array([[1, 2, 3], [4, 5, 6]])
reshape = np.array([[1, 2], [3, 4], [5, 6]])
assert (output == model._prepare_output_single_model(output, (2, 3))).all()
assert (reshape == model._prepare_output_single_model(output, (3, 2))).all()
# Broadcast reshape scalar
assert model._prepare_output_single_model(np.array([1]), (1, 2)) == 1
assert model._prepare_output_single_model(np.asanyarray(2), (3, 4)) == 2
# Fail to broadcast
assert (output == model._prepare_output_single_model(output, (1, 2))).all()
assert (output == model._prepare_output_single_model(output, (3, 4))).all()
def test_prepare_outputs_mixed_broadcast():
"""
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model([1, 2], 3)
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])
output = model(4, [5, 6])
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.8146473164114145, 0.7371233743916278])
def test_prepare_outputs_complex_reshape():
x = np.array(
[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
]
)
y = np.array(
[
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30],
]
)
m = models.Identity(3) | models.Mapping((2, 1, 0))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((2, 1), n_inputs=3)
output = mf(1, 2)
assert output == (22, 2, 1)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
m = models.Identity(3) | models.Mapping((0, 1, 2))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((0, 1), n_inputs=3)
output = mf(1, 2)
assert output == (1, 2, 22)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
def test_prepare_outputs_single_entry_vector():
"""
jwst and gwcs both require that single entry vectors produce single
entry output vectors, not scalars. This tests for that behavior.
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model(np.array([1]), np.array([2]))
assert output.shape == (1,)
np.testing.assert_allclose(output, [0.9500411305585278])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings("ignore: Using a non-tuple")
def test_prepare_outputs_sparse_grid():
"""
Test to show that #11060 has been solved.
"""
shape = (3, 3)
data = np.arange(np.product(shape)).reshape(shape) * u.m / u.s
points_unit = u.pix
points = [np.arange(size) * points_unit for size in shape]
kwargs = {
"bounds_error": False,
"fill_value": np.nan,
"method": "nearest",
}
transform = models.Tabular2D(points, data, **kwargs)
truth = (
np.array(
[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
]
)
* u.m
/ u.s
)
points = np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=True)
x = points[0] * u.pix
y = points[1] * u.pix
value = transform(x, y)
assert (value == truth).all()
points = (
np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=False) * u.pix
)
value = transform(*points)
assert (value == truth).all()
def test_coerce_units():
model = models.Polynomial1D(1, c0=1, c1=2)
MESSAGE = r"Can only apply 'add' function to dimensionless quantities when other .*"
with pytest.raises(u.UnitsError, match=MESSAGE):
model(u.Quantity(10, u.m))
with_input_units = model.coerce_units({"x": u.m})
result = with_input_units(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_input_units_tuple = model.coerce_units((u.m,))
result = with_input_units_tuple(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_return_units = model.coerce_units(return_units={"y": u.s})
result = with_return_units(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_return_units_tuple = model.coerce_units(return_units=(u.s,))
result = with_return_units_tuple(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_both = model.coerce_units({"x": u.m}, {"y": u.s})
result = with_both(u.Quantity(10, u.m))
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with pytest.raises(
ValueError, match=r"input_units keys.*do not match model inputs"
):
model.coerce_units({"q": u.m})
with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"):
model.coerce_units((u.m, u.s))
model_with_existing_input_units = models.BlackBody()
with pytest.raises(
ValueError,
match=r"Cannot specify input_units for model with existing input units",
):
model_with_existing_input_units.coerce_units({"x": u.m})
with pytest.raises(
ValueError, match=r"return_units keys.*do not match model outputs"
):
model.coerce_units(return_units={"q": u.m})
with pytest.raises(
ValueError, match=r"return_units length does not match n_outputs"
):
model.coerce_units(return_units=(u.m, u.s))
def test_bounding_box_general_inverse():
model = NonFittableModel(42.5)
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
model.bounding_box = ()
assert model.bounding_box.bounding_box() == ()
model.inverse = NonFittableModel(3.14)
inverse_model = model.inverse
with pytest.raises(NotImplementedError, match=MESSAGE):
inverse_model.bounding_box
def test__add_special_operator():
sop_name = "name"
sop = "value"
key = _add_special_operator(sop_name, "value")
assert key[0] == sop_name
assert key[1] == SPECIAL_OPERATORS._unique_id
assert key in SPECIAL_OPERATORS
assert SPECIAL_OPERATORS[key] == sop
def test_print_special_operator_CompoundModel(capsys):
"""
Test that issue #11310 has been fixed
"""
model = convolve_models(models.Sersic2D(), models.Gaussian2D())
with astropy.conf.set_temp("max_width", 80):
# fmt: off
assert str(model) == (
"Model: CompoundModel\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Expression: convolve_fft (([0]), ([1]))\n"
"Components: \n"
" [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., "
"x_0=0., y_0=0., ellip=0., theta=0.)>\n"
"\n"
" [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., "
"x_stddev=1., y_stddev=1., theta=0.)>\n"
"Parameters:\n"
" amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n"
" ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n"
" 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0"
)
# fmt: on
def test__validate_input_shape():
model = models.Gaussian1D()
model._n_models = 2
_input = np.array(
[
[1, 2, 3],
[4, 5, 6],
]
)
# Successful validation
assert model._validate_input_shape(_input, 0, model.inputs, 1, False) == (2, 3)
# Fail number of axes
MESSAGE = r"For model_set_axis=2, all inputs must be at least 3-dimensional"
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, model.inputs, 2, True)
# Fail number of models (has argname)
MESSAGE = r"Input argument '.*' does not have the correct dimensions in .*"
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, model.inputs, 1, True)
# Fail number of models (no argname)
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, [], 1, True)
def test__validate_input_shapes():
model = models.Gaussian1D()
model._n_models = 2
inputs = [mk.MagicMock() for _ in range(3)]
argnames = mk.MagicMock()
model_set_axis = mk.MagicMock()
all_shapes = [mk.MagicMock() for _ in inputs]
# Successful validation
with mk.patch.object(
Model, "_validate_input_shape", autospec=True, side_effect=all_shapes
) as mkValidate:
with mk.patch.object(core, "check_broadcast", autospec=True) as mkCheck:
assert mkCheck.return_value == model._validate_input_shapes(
inputs, argnames, model_set_axis
)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
# Fail check_broadcast
MESSAGE = r"All inputs must have identical shapes or must be scalars"
with mk.patch.object(
Model, "_validate_input_shape", autospec=True, side_effect=all_shapes
) as mkValidate:
with mk.patch.object(
core, "check_broadcast", autospec=True, return_value=None
) as mkCheck:
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
def test__remove_axes_from_shape():
model = models.Gaussian1D()
# len(shape) == 0
assert model._remove_axes_from_shape((), mk.MagicMock()) == ()
# axis < 0
assert model._remove_axes_from_shape((1, 2, 3), -1) == (1, 2)
assert model._remove_axes_from_shape((1, 2, 3), -2) == (1, 3)
assert model._remove_axes_from_shape((1, 2, 3), -3) == (2, 3)
# axis >= len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 3) == ()
assert model._remove_axes_from_shape((1, 2, 3), 4) == ()
# 0 <= axis < len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 0) == (2, 3)
assert model._remove_axes_from_shape((1, 2, 3), 1) == (3,)
assert model._remove_axes_from_shape((1, 2, 3), 2) == ()
def test_get_bounding_box():
model = models.Const2D(2)
# No with_bbox
assert model.get_bounding_box(False) is None
# No bounding_box
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
assert model.get_bounding_box(True) is None
# Normal bounding_box
model.bounding_box = ((0, 1), (0, 1))
assert not isinstance(model.bounding_box, CompoundBoundingBox)
assert model.get_bounding_box(True) == ((0, 1), (0, 1))
# CompoundBoundingBox with no removal
bbox = CompoundBoundingBox.validate(
model,
{(1,): ((-1, 0), (-1, 0)), (2,): ((0, 1), (0, 1))},
selector_args=[("y", False)],
)
model.bounding_box = bbox
assert isinstance(model.bounding_box, CompoundBoundingBox)
# Get using argument not with_bbox
assert model.get_bounding_box(True) == bbox
# Get using with_bbox not argument
assert model.get_bounding_box((1,)) == ((-1, 0), (-1, 0))
assert model.get_bounding_box((2,)) == ((0, 1), (0, 1))
def test_compound_bounding_box():
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox1 = CompoundBoundingBox.validate(
model, {(1,): (-1, 0), (2,): (0, 1)}, selector_args=[("x", False)]
)
bbox2 = CompoundBoundingBox.validate(
model, {(-0.5,): (-1, 0), (0.5,): (0, 1)}, selector_args=[("x", False)]
)
# Using with_bounding_box to pass a selector
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=(1,)) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=(2,)) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
# Using argument value to pass bounding_box
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0, with_bounding_box=True)
model1 = models.Gaussian1D()
truth1 = models.Gaussian1D()
model2 = models.Const1D(2)
truth2 = models.Const1D(2)
model = model1 + model2
truth = truth1 + truth2
assert isinstance(model, CompoundModel)
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=1) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=2) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError, match=MESSAGE):
model(0, with_bounding_box=True)
def test_bind_bounding_box():
model = models.Polynomial2D(3)
bbox = ((-1, 1), (-2, 2))
bind_bounding_box(model, bbox)
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box["x"] == (-2, 2)
assert model.bounding_box["y"] == (-1, 1)
bind_bounding_box(model, bbox, order="F")
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box["x"] == (-1, 1)
assert model.bounding_box["y"] == (-2, 2)
def test_bind_compound_bounding_box_using_with_bounding_box_select():
"""
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit.
"""
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox = (0, 1)
MESSAGE = r"'tuple' object has no attribute 'items"
with pytest.raises(AttributeError, match=MESSAGE):
bind_compound_bounding_box(model, bbox, "x")
bbox = {0: (-1, 0), 1: (0, 1)}
bind_compound_bounding_box(model, bbox, [("x", False)])
# No bounding box
assert model(-0.5) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0) == truth(0)
assert model(1) == truth(1)
# `with_bounding_box` selects as `-0.5` will not be a key
assert model(-0.5, with_bounding_box=0) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=1))
# `with_bounding_box` selects as `0.5` will not be a key
assert model(0.5, with_bounding_box=1) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(0,)))
# Fall back onto implicit selector
assert model(0, with_bounding_box=True) == truth(0)
assert model(1, with_bounding_box=True) == truth(1)
# Attempt to fall-back on implicit selector, but no bounding_box
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0.5, with_bounding_box=True)
# Override implicit selector
assert np.isnan(model(1, with_bounding_box=0))
def test_fix_inputs_compound_bounding_box():
base_model = models.Gaussian2D(1, 2, 3, 4, 5)
bbox = {2.5: (-1, 1), 3.14: (-7, 3)}
model = fix_inputs(base_model, {"y": 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {"x": 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"y": 2.5}, bounding_boxes=bbox, selector_args=(("y", True),)
)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=(("x", True),)
)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=((0, True),)
)
assert model.bounding_box == (-1, 1)
base_model = models.Identity(4)
bbox = {(2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1))}
model = fix_inputs(base_model, {"x0": 2.5, "x1": 1.3}, bounding_boxes=bbox)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(
base_model,
{"x0": 2.5, "x1": 1.3},
bounding_boxes=bbox,
selector_args=(("x0", True), ("x1", True)),
)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(
base_model,
{"x0": 2.5, "x1": 1.3},
bounding_boxes=bbox,
selector_args=((0, True), (1, True)),
)
assert model.bounding_box == ((-1, 1), (-3, 3))
def test_model_copy_with_bounding_box():
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5)), order="F")
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_model_copy_with_compound_bounding_box():
model = models.Polynomial2D(2)
bbox = {(0,): (-0.5, 1047.5), (1,): (-0.5, 3047.5)}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("x", True)], order="F"
)
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(
model.bounding_box.selector_args
)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[
index
]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_compound_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("slit_id", True)], order="F"
)
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(
model.bounding_box.selector_args
)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[
index
]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_user_attribute():
"""Regression test for issue #12370"""
model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1)
model.xname = "x_mean" # user-defined attribute
assert hasattr(model, "xname")
assert model.xname == "x_mean"
model_copy = model.copy()
model_copy.xname
assert hasattr(model_copy, "xname")
assert model_copy.xname == "x_mean"
def test_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Gaussian2D()
bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order="F")
model.bounding_box = bbox
x = np.array([-0.5, 0.5])
y = 0
# Everything works when its all in the bounding box
assert (model(x, y) == (model(x, y, with_bounding_box=True))).all()
def test_compound_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
model.bounding_box = bbox
x = np.array([1000, 1001])
y = np.array([2000, 2001])
slit_id = 0
# Everything works when its all in the bounding box
value0 = model(x, y, slit_id)
value1 = model(x, y, slit_id, with_bounding_box=True)
assert_equal(value0, value1)
def test_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,))
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
def test_compound_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,)) | models.Shift(1)
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
def test_bounding_box_pass_with_ignored():
"""Test the possibility of setting ignored variables in bounding box"""
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=["y"])
model.bounding_box = bbox
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
model = models.Polynomial2D(2)
bind_bounding_box(model, (-1, 1), ignored=["y"])
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
def test_compound_bounding_box_pass_with_ignored():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = {
(0,): (-0.5, 1047.5),
(1,): (-0.5, 2047.5),
}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F"
)
model.bounding_box = cbbox
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bind_compound_bounding_box(
model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F"
)
assert model.bounding_box == cbbox
@pytest.mark.parametrize("int_type", [int, np.int32, np.int64, np.uint32, np.uint64])
def test_model_integer_indexing(int_type):
"""Regression for PR 12561; verify that compound model components
can be accessed by integer index"""
gauss = models.Gaussian2D()
airy = models.AiryDisk2D()
compound = gauss + airy
assert compound[int_type(0)] == gauss
assert compound[int_type(1)] == airy
def test_model_string_indexing():
"""Regression for PR 12561; verify that compound model components
can be accessed by indexing with model name"""
gauss = models.Gaussian2D()
gauss.name = "Model1"
airy = models.AiryDisk2D()
airy.name = "Model2"
compound = gauss + airy
assert compound["Model1"] == gauss
assert compound["Model2"] == airy
|
64991a92b38c2541c26321ffa0ee7d8d16ce2a4611009cfe458906a93840243d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for physical functions."""
# pylint: disable=no-member, invalid-name
import numpy as np
import pytest
from astropy import cosmology
from astropy import units as u
from astropy.modeling.fitting import (
DogBoxLSQFitter,
LevMarLSQFitter,
LMLSQFitter,
TRFLSQFitter,
)
from astropy.modeling.physical_models import NFW, BlackBody
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
__doctest_skip__ = ["*"]
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
# BlackBody tests
@pytest.mark.parametrize("temperature", (3000 * u.K, 2726.85 * u.deg_C))
def test_blackbody_evaluate(temperature):
b = BlackBody(temperature=temperature, scale=1.0)
assert_quantity_allclose(b(1.4 * u.micron), 486787299458.15656 * u.MJy / u.sr)
assert_quantity_allclose(b(214.13747 * u.THz), 486787299458.15656 * u.MJy / u.sr)
def test_blackbody_weins_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.lambda_max, 9.890006672986939 * u.micron)
assert_quantity_allclose(b.nu_max, 17.22525080856469 * u.THz)
def test_blackbody_sefanboltzman_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.bolometric_flux, 133.02471751812573 * u.W / (u.m * u.m))
def test_blackbody_input_units():
SLAM = u.erg / (u.cm**2 * u.s * u.AA * u.sr)
SNU = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
b_lam = BlackBody(3000 * u.K, scale=1 * SLAM)
assert b_lam.input_units["x"] == u.AA
b_nu = BlackBody(3000 * u.K, scale=1 * SNU)
assert b_nu.input_units["x"] == u.Hz
def test_blackbody_return_units():
# return of evaluate has no units when temperature has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert not isinstance(b.evaluate(1.0 * u.micron, 1000.0, 1.0), u.Quantity)
# return has "standard" units when scale has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
# return has scale units when scale has units
b = BlackBody(1000.0 * u.K, scale=1.0 * u.MJy / u.sr)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.MJy / u.sr
# scale has units but evaluate scale has no units
assert_quantity_allclose(
b.evaluate(1.0 * u.micron, 1000.0 * u.K, 4.0), 89668184.86321202 * u.MJy / u.sr
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_blackbody_fit(fitter):
fitter = fitter()
if isinstance(fitter, (TRFLSQFitter, DogBoxLSQFitter)):
rtol = 0.54
atol = 1e-15
else:
rtol = 1e-7
atol = 0
b = BlackBody(3000 * u.K, scale=5e-17 * u.Jy / u.sr)
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
b_fit = fitter(b, wav, fnu, maxiter=1000)
assert_quantity_allclose(b_fit.temperature, 2840.7438355865065 * u.K, rtol=rtol)
assert_quantity_allclose(b_fit.scale, 5.803783292762381e-17, atol=atol)
def test_blackbody_overflow():
"""Test Planck function with overflow."""
photlam = u.photon / (u.cm**2 * u.s * u.AA)
wave = [0.0, 1000.0, 100000.0, 1e55] # Angstrom
temp = 10000.0 # Kelvin
bb = BlackBody(temperature=temp * u.K, scale=1.0)
with pytest.warns(
AstropyUserWarning,
match=r"Input contains invalid wavelength/frequency value\(s\)",
):
with np.errstate(all="ignore"):
bb_lam = bb(wave) * u.sr
flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr
# First element is NaN, last element is very small, others normal
assert np.isnan(flux[0])
with np.errstate(all="ignore"):
assert np.log10(flux[-1].value) < -134
np.testing.assert_allclose(
flux.value[1:-1], [0.00046368, 0.04636773], rtol=1e-3
) # 0.1% accuracy in PHOTLAM/sr
with np.errstate(all="ignore"):
flux = bb(1.0 * u.AA)
assert flux.value == 0
def test_blackbody_exceptions_and_warnings():
"""Test exceptions."""
# Negative temperature
with pytest.raises(
ValueError, match="Temperature should be positive: \\[-100.\\] K"
):
bb = BlackBody(-100 * u.K)
bb(1.0 * u.micron)
bb = BlackBody(5000 * u.K)
# Zero wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match="invalid") as w:
bb(0 * u.AA)
assert len(w) == 3 # 2 of these are RuntimeWarning from zero divide
# Negative wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match="invalid") as w:
bb(-1.0 * u.AA)
assert len(w) == 1
# Test that a non surface brightness convertible scale unit raises an error
with pytest.raises(
ValueError, match="scale units not dimensionless or in surface brightness: Jy"
):
bb = BlackBody(5000 * u.K, scale=1.0 * u.Jy)
def test_blackbody_array_temperature():
"""Regression test to make sure that the temperature can be an array."""
multibb = BlackBody([100, 200, 300] * u.K)
flux = multibb(1.2 * u.mm)
np.testing.assert_allclose(
flux.value, [1.804908e-12, 3.721328e-12, 5.638513e-12], rtol=1e-5
)
flux = multibb([2, 4, 6] * u.mm)
np.testing.assert_allclose(
flux.value, [6.657915e-13, 3.420677e-13, 2.291897e-13], rtol=1e-5
)
multibb = BlackBody(np.ones(4) * u.K)
flux = multibb(np.ones((3, 4)) * u.mm)
assert flux.shape == (3, 4)
def test_blackbody_dimensionless():
"""Test support for dimensionless (but not unscaled) units for scale"""
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL) ** 2
bb1 = BlackBody(temperature=T, scale=scale)
# even though we passed scale with units, we should be able to evaluate with unitless
bb1.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
bb2.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
# bolometric flux for both cases should be equivalent
assert bb1.bolometric_flux == bb2.bolometric_flux
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_blackbody_dimensionless_fit():
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL) ** 2
bb1 = BlackBody(temperature=T, scale=scale)
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
fitter = LevMarLSQFitter()
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
bb1_fit = fitter(bb1, wav, fnu, maxiter=1000)
bb2_fit = fitter(bb2, wav, fnu, maxiter=1000)
assert bb1_fit.temperature == bb2_fit.temperature
@pytest.mark.parametrize("mass", (2.0000000000000e15 * u.M_sun, 3.976819741e45 * u.kg))
def test_NFW_evaluate(mass):
"""Evaluation, density, and radii validation of NFW model."""
# Test parameters
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
# Parsec tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3.0 * u.Mpc),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (7800150779863018.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(n200c.r_s, (0.24684627641195428 * u.Mpc))
assert_quantity_allclose(n200c.r_virial, (2.0981933495016114 * u.Mpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3.0 * u.Mpc),
(
3.626093406e12 * (u.solMass / u.Mpc**3),
7.210159921e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (5118547639858115.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(n200m.r_s, (0.2840612517326848 * u.Mpc))
assert_quantity_allclose(n200m.r_virial, (2.414520639727821 * u.Mpc))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3.0 * u.Mpc),
(
3.646475546e12 * (u.solMass / u.Mpc**3),
7.250687967e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (5649367524651067.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(nvir.r_s, (0.2748701862303786 * u.Mpc))
assert_quantity_allclose(nvir.r_virial, (2.3363965829582183 * u.Mpc))
# kpc tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3141 * u.kpc),
(
3254.373619264334 * (u.solMass / u.kpc**3),
6.471028627484543e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (7800150.779863021 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(n200c.r_s, (246.84627641195425 * u.kpc))
assert_quantity_allclose(n200c.r_virial, (2098.193349501611 * u.kpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3141 * u.kpc),
(
3184.0370866188623 * (u.solMass / u.kpc**3),
6.33117077170161e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (5118547.639858116 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(n200m.r_s, (284.0612517326848 * u.kpc))
assert_quantity_allclose(n200m.r_virial, (2414.5206397278207 * u.kpc))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3141 * u.kpc),
(
3201.1946851294997 * (u.solMass / u.kpc**3),
6.365287109937637e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (5649367.5246510655 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(nvir.r_s, (274.87018623037864 * u.kpc))
assert_quantity_allclose(nvir.r_virial, (2336.3965829582185 * u.kpc))
# Meter tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(4.2e23 * u.m),
(
1.527649658673012e-57 * (u.solMass / u.m**3),
3.0375936602739256e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (2.654919529637763e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(n200c.r_s, (7.616880211930209e21 * u.m))
assert_quantity_allclose(n200c.r_virial, (6.474348180140678e22 * u.m))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(4.2e23 * u.m),
(
1.5194778058079436e-57 * (u.solMass / u.m**3),
3.0213446673751314e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (1.742188385322371e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(n200m.r_s, (8.76521436235054e21 * u.m))
assert_quantity_allclose(n200m.r_virial, (7.450432207997959e22 * u.m))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(4.2e23 * u.m),
(
1.5214899184117633e-57 * (u.solMass / u.m**3),
3.0253455719375224e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (1.922862338766335e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(nvir.r_s, (8.481607714647913e21 * u.m))
assert_quantity_allclose(nvir.r_virial, (7.209366557450727e22 * u.m))
# Verify string input of overdensity type
# 200c Overdensity
massfactor = "200c"
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3.0 * u.Mpc),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
# 200m Overdensity
massfactor = "200m"
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3.0 * u.Mpc),
(
3.626093406e12 * (u.solMass / u.Mpc**3),
7.210159921e42 * (u.kg / u.Mpc**3),
),
)
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3.0 * u.Mpc),
(
3.646475546e12 * (u.solMass / u.Mpc**3),
7.250687967e42 * (u.kg / u.Mpc**3),
),
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_NFW_fit(fitter):
"""Test linear fitting of NFW model."""
fitter = fitter()
if isinstance(fitter, DogBoxLSQFitter):
pytest.xfail("dogbox method is poor fitting method for NFW model")
# Fixed parameters
redshift = 0.63
cosmo = cosmology.Planck15
# Radial set
# fmt: off
r = np.array(
[
1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04
]
) * u.kpc
# fmt: on
# 200c Overdensity
massfactor = ("critical", 200)
# fmt: off
density_r = np.array(
[
1.77842761e+08, 9.75233623e+06, 2.93789626e+06, 1.90107238e+06,
1.30776878e+06, 7.01004140e+05, 4.20678479e+05, 1.57421880e+05,
7.54669701e+04, 2.56319769e+04, 6.21976562e+03, 3.96522424e+02,
7.39336808e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
n200c = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
n200c.redshift.fixed = True
n_fit = fitter(n200c, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# 200m Overdensity
massfactor = ("mean", 200)
# fmt: off
density_r = np.array(
[
1.35677282e+08, 7.95392979e+06, 2.50352599e+06, 1.64535870e+06,
1.14642248e+06, 6.26805453e+05, 3.81691731e+05, 1.46294819e+05,
7.11559560e+04, 2.45737796e+04, 6.05459585e+03, 3.92183991e+02,
7.34674416e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
n200m = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
n200m.redshift.fixed = True
n_fit = fitter(n200m, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# Virial mass
massfactor = ("virial", 200)
# fmt: off
density_r = np.array(
[
1.44573515e+08, 8.34873998e+06, 2.60137484e+06, 1.70348738e+06,
1.18337370e+06, 6.43994654e+05, 3.90800249e+05, 1.48930537e+05,
7.21856397e+04, 2.48289464e+04, 6.09477095e+03, 3.93248818e+02,
7.35821787e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
nvir = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
nvir.redshift.fixed = True
n_fit = fitter(nvir, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
def test_NFW_circular_velocity():
"""Test circular velocity and radial validation of NFW model."""
# Test parameters
mass = 2.0000000000000e15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
r_r = (
np.array([0.01, 0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.75, 1.0, 1.5, 2.5, 6.5, 11.5])
* u.Mpc
)
# 200c Overdensity tests
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_200c = np.array(
[
702.45487454, 1812.4138346, 2150.50929296, 2231.5802568,
2283.96950242, 2338.45989696, 2355.78876772, 2332.41766543,
2276.89433811, 2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
assert_quantity_allclose(n200c.r_max, (0.5338248204429641 * u.Mpc))
assert_quantity_allclose(n200c.v_max, (2356.7204380904027 * (u.km / u.s)))
# 200m Overdensity tests
massfactor = ("mean", 200)
mass = 1.0e14 * u.M_sun
concentration = 12.3
redshift = 1.5
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_200m = np.array(
[
670.18236647, 1088.9843324, 1046.82334367, 1016.88890732,
987.97273478, 936.00207134, 891.80115232, 806.63307977,
744.91002191, 659.33401039, 557.82823549, 395.9735786,
318.29863006
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200m.circular_velocity(r_r), circ_v_200m)
assert_quantity_allclose(n200m.r_max, (0.10196917920081808 * u.Mpc))
assert_quantity_allclose(n200m.v_max, (1089.0224395818727 * (u.km / u.s)))
# Virial Overdensity tests
massfactor = "virial"
mass = 1.2e45 * u.kg
concentration = 2.4
redshift = 0.34
# fmt: off
r_r = np.array(
[
3.08567758e+20, 3.08567758e+21, 6.17135516e+21, 7.71419395e+21,
9.25703274e+21, 1.23427103e+22, 1.54283879e+22, 2.31425819e+22,
3.08567758e+22, 4.62851637e+22, 7.71419395e+22, 2.00569043e+23,
3.54852922e+23
]
) * u.m
# fmt: on
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_vir = np.array(
[
205.87461783, 604.65091823, 793.9190629, 857.52516521,
908.90280843, 986.53582718, 1041.69089845, 1124.19719446,
1164.58270747, 1191.33193561, 1174.02934755, 1023.69360527,
895.52206321
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(nvir.circular_velocity(r_r), circ_v_vir)
assert_quantity_allclose(nvir.r_max, (1.6484542328623448 * u.Mpc))
assert_quantity_allclose(nvir.v_max, (1192.3130989914962 * (u.km / u.s)))
def test_NFW_exceptions_and_warnings_and_misc():
"""Test NFW exceptions."""
# Arbitrary Test parameters
mass = 2.0000000000000e15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
massfactor = ("critical", 200)
# fmt: off
r_r = np.array(
[
1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04
]
) * u.kpc
# fmt: on
# Massfactor exception tests
MESSAGE = r"Massfactor 'not' not one of 'critical', 'mean', or 'virial'"
with pytest.raises(ValueError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=("not", "virial"),
)
MESSAGE = r"Massfactor not virial string not of the form '#m', '#c', or 'virial'"
with pytest.raises(ValueError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor="not virial",
)
MESSAGE = r"Massfactor 200 not a tuple or string"
with pytest.raises(TypeError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=200,
)
# Verify unitless mass
# Density test
n200c = NFW(
mass=mass.value,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3000.0),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
# Circular velocity test with unitless mass
# fmt: off
circ_v_200c = np.array(
[
702.45487454, 1812.4138346, 2150.50929296, 2231.5802568,
2283.96950242, 2338.45989696, 2355.78876772, 2332.41766543,
2276.89433811, 2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
# test with unitless input velocity
assert_quantity_allclose(n200c.circular_velocity(r_r.value), circ_v_200c)
# Test Default Cosmology
ncos = NFW(mass=mass, concentration=concentration, redshift=redshift)
assert_quantity_allclose(ncos.A_NFW(concentration), 1.356554956501232)
|
cb761210464a5ad3782e1a9f14c91d11f09aeccd4706d7609a9ad73491f3c69b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import platform
import types
import warnings
import numpy as np
import pytest
from numpy.random import default_rng
from numpy.testing import assert_allclose
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LevMarLSQFitter,
fitting.DogBoxLSQFitter,
]
class TestNonLinearConstraints:
def setup_class(self):
self.g1 = models.Gaussian1D(10, 14.9, stddev=0.3)
self.g2 = models.Gaussian1D(10, 13, stddev=0.4)
self.x = np.arange(10, 20, 0.1)
self.y1 = self.g1(self.x)
self.y2 = self.g2(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(100)
self.ny1 = self.y1 + 2 * self.n
self.ny2 = self.y2 + 2 * self.n
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fixed_par(self, fitter):
fitter = fitter()
g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3, fixed={"amplitude": True})
model = fitter(g1, self.x, self.ny1)
assert model.amplitude.value == 10
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_tied_par(self, fitter):
fitter = fitter()
def tied(model):
mean = 50 * model.stddev
return mean
g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3, tied={"mean": tied})
model = fitter(g1, self.x, self.ny1)
assert_allclose(model.mean.value, 50 * model.stddev, rtol=10 ** (-5))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_joint_fitter(self):
from scipy import optimize
g1 = models.Gaussian1D(10, 14.9, stddev=0.3)
g2 = models.Gaussian1D(10, 13, stddev=0.4)
jf = fitting.JointFitter(
[g1, g2], {g1: ["amplitude"], g2: ["amplitude"]}, [9.8]
)
x = np.arange(10, 20, 0.1)
y1 = g1(x)
y2 = g2(x)
n = np.random.randn(100)
ny1 = y1 + 2 * n
ny2 = y2 + 2 * n
jf(x, ny1, x, ny2)
p1 = [14.9, 0.3]
p2 = [13, 0.4]
A = 9.8
p = np.r_[A, p1, p2]
def compmodel(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errf(p, x1, y1, x2, y2):
return np.ravel(
np.r_[compmodel(p[0], p[1:3], x1) - y1, compmodel(p[0], p[3:], x2) - y2]
)
fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2))
assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5))
assert_allclose(g1.amplitude.value, g2.amplitude.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_no_constraints(self, fitter):
from scipy import optimize
fitter = fitter()
g1 = models.Gaussian1D(9.9, 14.5, stddev=0.3)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errf(p, x, y):
return func(p, x) - y
p0 = [9.9, 14.5, 0.3]
y = g1(self.x)
n = np.random.randn(100)
ny = y + n
fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny))
model = fitter(g1, self.x, ny)
assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestBounds:
def setup_class(self):
A = -2.0
B = 0.5
self.x = np.linspace(-1.0, 1.0, 100)
self.y = A * self.x + B + np.random.normal(scale=0.1, size=100)
# fmt: off
data = np.array(
[
505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0,
426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0,
479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0,
610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0,
1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0,
1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0,
3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0,
1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0,
733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0,
504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0,
435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0,
443.0, 416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0,
428.0
]
)
# fmt: on
self.data = data.reshape(11, 11)
@pytest.mark.parametrize("fitter", fitters)
def test_bounds_lsq(self, fitter):
fitter = fitter()
guess_slope = 1.1
guess_intercept = 0.0
bounds = {"slope": (-1.5, 5.0), "intercept": (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept, bounds=bounds)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10**-5 >= bounds["slope"][0]
assert slope - 10**-5 <= bounds["slope"][1]
assert intercept + 10**-5 >= bounds["intercept"][0]
assert intercept - 10**-5 <= bounds["intercept"][1]
def test_bounds_slsqp(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {"slope": (-1.5, 5.0), "intercept": (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept, bounds=bounds)
fitter = fitting.SLSQPLSQFitter()
with pytest.warns(
AstropyUserWarning, match="consider using linear fitting methods"
):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10**-5 >= bounds["slope"][0]
assert slope - 10**-5 <= bounds["slope"][1]
assert intercept + 10**-5 >= bounds["intercept"][0]
assert intercept - 10**-5 <= bounds["intercept"][1]
@pytest.mark.parametrize("fitter", fitters)
def test_bounds_gauss2d_lsq(self, fitter):
fitter = fitter()
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {
"x_mean": [0.0, 11.0],
"y_mean": [0.0, 11.0],
"x_stddev": [1.0, 4],
"y_stddev": [1.0, 4],
}
gauss = models.Gaussian2D(
amplitude=10.0,
x_mean=5.0,
y_mean=5.0,
x_stddev=4.0,
y_stddev=4.0,
theta=0.5,
bounds=bounds,
)
if isinstance(fitter, (fitting.LevMarLSQFitter, fitting.DogBoxLSQFitter)):
with pytest.warns(AstropyUserWarning, match="The fit may be unsuccessful"):
model = fitter(gauss, X, Y, self.data)
else:
model = fitter(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10**-5 >= bounds["x_mean"][0]
assert x_mean - 10**-5 <= bounds["x_mean"][1]
assert y_mean + 10**-5 >= bounds["y_mean"][0]
assert y_mean - 10**-5 <= bounds["y_mean"][1]
assert x_stddev + 10**-5 >= bounds["x_stddev"][0]
assert x_stddev - 10**-5 <= bounds["x_stddev"][1]
assert y_stddev + 10**-5 >= bounds["y_stddev"][0]
assert y_stddev - 10**-5 <= bounds["y_stddev"][1]
def test_bounds_gauss2d_slsqp(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {
"x_mean": [0.0, 11.0],
"y_mean": [0.0, 11.0],
"x_stddev": [1.0, 4],
"y_stddev": [1.0, 4],
}
gauss = models.Gaussian2D(
amplitude=10.0,
x_mean=5.0,
y_mean=5.0,
x_stddev=4.0,
y_stddev=4.0,
theta=0.5,
bounds=bounds,
)
gauss_fit = fitting.SLSQPLSQFitter()
# Warning does not appear in all the CI jobs.
# TODO: Rewrite the test for more consistent warning behavior.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=r".*The fit may be unsuccessful.*",
category=AstropyUserWarning,
)
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10**-5 >= bounds["x_mean"][0]
assert x_mean - 10**-5 <= bounds["x_mean"][1]
assert y_mean + 10**-5 >= bounds["y_mean"][0]
assert y_mean - 10**-5 <= bounds["y_mean"][1]
assert x_stddev + 10**-5 >= bounds["x_stddev"][0]
assert x_stddev - 10**-5 <= bounds["x_stddev"][1]
assert y_stddev + 10**-5 >= bounds["y_stddev"][0]
assert y_stddev - 10**-5 <= bounds["y_stddev"][1]
class TestLinearConstraints:
def setup_class(self):
self.p1 = models.Polynomial1D(4)
self.p1.c0 = 0
self.p1.c1 = 0
self.p1.window = [0.0, 9.0]
self.x = np.arange(10)
self.y = self.p1(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(10)
self.ny = self.y + self.n
def test(self):
self.p1.c0.fixed = True
self.p1.c1.fixed = True
pfit = fitting.LinearLSQFitter()
model = pfit(self.p1, self.x, self.y)
assert_allclose(self.y, model(self.x))
# Test constraints as parameter properties
def test_set_fixed_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.mean.fixed = True
assert gauss.fixed == {"amplitude": False, "mean": True, "stddev": False}
def test_set_fixed_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, fixed={"mean": True})
assert gauss.mean.fixed is True
def test_set_tied_1():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.amplitude.tied = tie_amplitude
assert gauss.amplitude.tied is not False
assert isinstance(gauss.tied["amplitude"], types.FunctionType)
def test_set_tied_2():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(
amplitude=20, mean=2, stddev=1, tied={"amplitude": tie_amplitude}
)
assert gauss.amplitude.tied
def test_unset_fixed():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, fixed={"mean": True})
gauss.mean.fixed = False
assert gauss.fixed == {"amplitude": False, "mean": False, "stddev": False}
def test_unset_tied():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(
amplitude=20, mean=2, stddev=1, tied={"amplitude": tie_amplitude}
)
gauss.amplitude.tied = False
assert gauss.tied == {"amplitude": False, "mean": False, "stddev": False}
def test_set_bounds_1():
gauss = models.Gaussian1D(
amplitude=20, mean=2, stddev=1, bounds={"stddev": (0, None)}
)
assert gauss.bounds == {
"amplitude": (None, None),
"mean": (None, None),
"stddev": (0.0, None),
}
def test_set_bounds_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.stddev.min = 0.0
assert gauss.bounds == {
"amplitude": (None, None),
"mean": (None, None),
"stddev": (0.0, None),
}
def test_unset_bounds():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, bounds={"stddev": (0, 2)})
gauss.stddev.min = None
gauss.stddev.max = None
assert gauss.bounds == {
"amplitude": (None, None),
"mean": (None, None),
"stddev": (None, None),
}
def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {"a": (None, None), "b": (0, None)}
assert m.fixed == {"a": False, "b": True}
# Make a model instance that overrides the default constraints and values
m = MyModel(
3, 4, bounds={"a": (1, None), "b": (2, None)}, fixed={"a": True, "b": False}
)
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {"a": (1, None), "b": (2, None)}
assert m.fixed == {"a": True, "b": False}
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:divide by zero encountered.*")
@pytest.mark.parametrize("fitter", fitters)
def test_fit_with_fixed_and_bound_constraints(fitter):
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
# DogBoxLSQFitter causes failure on s390x, aremel possibly others (not x86_64 or arm64)
if fitter == fitting.DogBoxLSQFitter and (
platform.machine() not in ("x86_64", "arm64")
):
pytest.xfail(
"DogBoxLSQFitter can to be unstable on non-standard platforms leading to "
"random test failures"
)
fitter = fitter()
m = models.Gaussian1D(
amplitude=3,
mean=4,
stddev=1,
bounds={"mean": (4, 5)},
fixed={"amplitude": True},
)
x = np.linspace(0, 10, 10)
y = np.exp(-(x**2) / 2)
fitted_1 = fitter(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
_ = fitter(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fit_with_bound_constraints_estimate_jacobian(fitter):
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
fitter = fitter()
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
fitted_1 = fitter(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
_ = fitter(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
if fitter == fitting.LevMarLSQFitter:
assert np.any(fitter.fit_info["fjac"] != 0)
# https://github.com/astropy/astropy/issues/6014
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_gaussian2d_positive_stddev(fitter):
# This is 2D Gaussian with noise to be fitted, as provided by @ysBach
fitter = fitter()
# fmt: off
test = [
[-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9,
-30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29],
[-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14,
139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03],
[91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26,
7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41],
[33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94,
336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55],
[82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27,
242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74],
[113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8,
547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35],
[106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9,
781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36],
[183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78,
731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24],
[137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49,
814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19],
[35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0,
491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05],
[190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43,
188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31],
[-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38,
220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96],
[130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36,
105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9],
[-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82,
-33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1],
[109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22,
42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51],
[10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03,
23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79],
[46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08,
285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]
]
# fmt: on
g_init = models.Gaussian2D(x_mean=8, y_mean=8)
if isinstance(fitter, (fitting.TRFLSQFitter, fitting.DogBoxLSQFitter)):
pytest.xfail("TRFLSQFitter seems to be broken for this test.")
y, x = np.mgrid[:17, :17]
g_fit = fitter(g_init, x, y, test)
# Compare with @ysBach original result:
# - x_stddev was negative, so its abs value is used for comparison here.
# - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored.
assert_allclose(
[g_fit.amplitude.value, g_fit.y_stddev.value],
[984.7694929790363, 3.1840618351417307],
rtol=1.5e-6,
)
assert_allclose(g_fit.x_mean.value, 7.198391516587464)
assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7)
assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.parametrize("fitter", fitters)
def test_2d_model(fitter):
"""Issue #6403"""
from astropy.utils import NumpyRNGContext
fitter = fitter()
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
def test_set_prior_posterior():
model = models.Polynomial1D(1)
model.c0.prior = models.Gaussian1D(2.3, 2, 0.1)
assert model.c0.prior(2) == 2.3
model.c0.posterior = models.Linear1D(1, 0.2)
assert model.c0.posterior(1) == 1.2
def test_set_constraints():
g = models.Gaussian1D()
p = models.Polynomial1D(1)
# Set bounds before model combination
g.stddev.bounds = (0, 3)
m = g + p
assert m.bounds == {
"amplitude_0": (None, None),
"mean_0": (None, None),
"stddev_0": (0.0, 3.0),
"c0_1": (None, None),
"c1_1": (None, None),
}
# Set bounds on the compound model
m.stddev_0.bounds = (1, 3)
assert m.bounds == {
"amplitude_0": (None, None),
"mean_0": (None, None),
"stddev_0": (1.0, 3.0),
"c0_1": (None, None),
"c1_1": (None, None),
}
# Set the bounds of a Parameter directly in the bounds dict
m.bounds["stddev_0"] = (4, 5)
assert m.bounds == {
"amplitude_0": (None, None),
"mean_0": (None, None),
"stddev_0": (4, 5),
"c0_1": (None, None),
"c1_1": (None, None),
}
# Set the bounds of a Parameter on the child model bounds dict
g.bounds["stddev"] = (1, 5)
m = g + p
assert m.bounds == {
"amplitude_0": (None, None),
"mean_0": (None, None),
"stddev_0": (1, 5),
"c0_1": (None, None),
"c1_1": (None, None),
}
|
7d0793a553420d5fb1da0b5bddca17d6efa60e2c43955043d2f18a3078db91fa | import threading
import time
import xmlrpc.client as xmlrpc
from astropy.samp.client import SAMPClient
from astropy.samp.errors import SAMPClientError, SAMPHubError
from astropy.samp.hub import WebProfileDialog
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.utils import ServerProxyPool
class AlwaysApproveWebProfileDialog(WebProfileDialog):
def __init__(self):
self.polling = True
WebProfileDialog.__init__(self)
def show_dialog(self, *args):
self.consent()
def poll(self):
while self.polling:
self.handle_queue()
time.sleep(0.1)
def stop(self):
self.polling = False
class SAMPWebHubProxy(SAMPHubProxy):
"""
Proxy class to simplify the client interaction with a SAMP hub (via the web
profile).
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
"""
def connect(self, pool_size=20, web_port=21012):
"""
Connect to the current SAMP Hub on localhost:web_port.
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
try:
self.proxy = ServerProxyPool(
pool_size,
xmlrpc.ServerProxy,
f"http://127.0.0.1:{web_port}",
allow_none=1,
)
self.ping()
self._connected = True
except xmlrpc.ProtocolError as p:
raise SAMPHubError(f"Protocol Error {p.errcode}: {p.errmsg}")
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for both the standard and the web profile.
"""
return self.proxy.samp.webhub
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
raise NotImplementedError(
"set_xmlrpc_callback is not defined for the web profile"
)
def register(self, identity_info):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(identity_info)
def allow_reverse_callbacks(self, private_key, allow):
"""
Proxy to ``allowReverseCallbacks`` SAMP Hub method.
"""
return self._samp_hub.allowReverseCallbacks(private_key, allow)
def pull_callbacks(self, private_key, timeout):
"""
Proxy to ``pullCallbacks`` SAMP Hub method.
"""
return self._samp_hub.pullCallbacks(private_key, timeout)
class SAMPWebClient(SAMPClient):
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable web client application.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
Parameters
----------
hub : :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy`
An instance of :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` to
be used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, hub, name=None, description=None, metadata=None, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {
"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}],
}
self._response_bindings = {}
self.hub = hub
self._registration_lock = threading.Lock()
self._registered_event = threading.Event()
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
def _serve_forever(self):
while self.is_running:
# Wait until we are actually registered before trying to do
# anything, to avoid busy looping
# Watch for callbacks here
self._registered_event.wait()
with self._registration_lock:
if not self._is_registered:
return
results = self.hub.pull_callbacks(self.get_private_key(), 0)
for result in results:
if result["samp.methodName"] == "receiveNotification":
self.receive_notification(
self._private_key, *result["samp.params"]
)
elif result["samp.methodName"] == "receiveCall":
self.receive_call(self._private_key, *result["samp.params"])
elif result["samp.methodName"] == "receiveResponse":
self.receive_response(self._private_key, *result["samp.params"])
self.hub.disconnect()
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register("Astropy SAMP Web Client")
if result["samp.self-id"] == "":
raise SAMPClientError(
"Registation failed - samp.self-id was not set by the hub."
)
if result["samp.private-key"] == "":
raise SAMPClientError(
"Registation failed - samp.private-key was not set by the hub."
)
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._declare_subscriptions()
self.hub.allow_reverse_callbacks(self._private_key, True)
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
# Let the client thread proceed
self._registered_event.set()
else:
raise SAMPClientError(
"Unable to register to the SAMP Hub. Hub proxy not connected."
)
def unregister(self):
# We have to hold the registration lock if the client is callable
# to avoid a race condition where the client queries the hub for
# pushCallbacks after it has already been unregistered from the hub
with self._registration_lock:
super().unregister()
class SAMPIntegratedWebClient(SAMPIntegratedClient):
"""
A Simple SAMP web client.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
This class is meant to simplify the client usage providing a proxy class
that merges the :class:`~astropy.samp.client.SAMPWebClient` and
:class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` functionalities in a
simplified API.
Parameters
----------
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, name=None, description=None, metadata=None, callable=True):
self.hub = SAMPWebHubProxy()
self.client = SAMPWebClient(self.hub, name, description, metadata, callable)
def connect(self, pool_size=20, web_port=21012):
"""
Connect with the current or specified SAMP Hub, start and register the
client.
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self.hub.connect(pool_size, web_port=web_port)
self.client.start()
self.client.register()
|
512ab2afd7844c3e2207744f0f500c6b5f12af92dbb7d654fdefce8b59fb3871 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
import re
from datetime import datetime
from urllib.parse import urlparse
from warnings import warn
import erfa
import numpy as np
from astropy import config as _config
from astropy import units as u
from astropy import utils
from astropy.table import MaskedColumn, QTable
from astropy.time import Time, TimeDelta
from astropy.utils.data import (
clear_download_cache,
get_pkg_data_filename,
get_readable_fileobj,
is_url_in_cache,
)
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.state import ScienceState
__all__ = [
"Conf",
"conf",
"earth_orientation_table",
"IERS",
"IERS_B",
"IERS_A",
"IERS_Auto",
"FROM_IERS_B",
"FROM_IERS_A",
"FROM_IERS_A_PREDICTION",
"TIME_BEFORE_IERS_RANGE",
"TIME_BEYOND_IERS_RANGE",
"IERS_A_FILE",
"IERS_A_URL",
"IERS_A_URL_MIRROR",
"IERS_A_README",
"IERS_B_FILE",
"IERS_B_URL",
"IERS_B_README",
"IERSRangeError",
"IERSStaleWarning",
"IERSWarning",
"IERSDegradedAccuracyWarning",
"LeapSeconds",
"IERS_LEAP_SECOND_FILE",
"IERS_LEAP_SECOND_URL",
"IETF_LEAP_SECOND_URL",
]
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = "finals2000A.all"
IERS_A_URL = "https://datacenter.iers.org/data/9/finals2000A.all"
IERS_A_URL_MIRROR = "https://maia.usno.navy.mil/ser7/finals2000A.all"
IERS_A_README = get_pkg_data_filename("data/ReadMe.finals2000A")
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename("data/eopc04_IAU2000.62-now")
IERS_B_URL = "http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now"
IERS_B_README = get_pkg_data_filename("data/ReadMe.eopc04_IAU2000")
# LEAP SECONDS default file name, URL, and alternative format/URL
IERS_LEAP_SECOND_FILE = get_pkg_data_filename("data/Leap_Second.dat")
IERS_LEAP_SECOND_URL = "https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat"
IETF_LEAP_SECOND_URL = "https://www.ietf.org/timezones/data/leap-seconds.list"
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
MONTH_ABBR = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
class IERSWarning(AstropyWarning):
"""
Generic warning class for IERS.
"""
class IERSDegradedAccuracyWarning(AstropyWarning):
"""
IERS time conversion has degraded accuracy normally due to setting
``conf.auto_download = False`` and ``conf.iers_degraded_accuracy = 'warn'``.
"""
class IERSStaleWarning(IERSWarning):
"""
Downloaded IERS table may be stale.
"""
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
kwargs.setdefault(
"http_headers",
{
"User-Agent": "astropy/iers",
"Accept": "*/*",
},
)
with utils.data.conf.set_temp("remote_timeout", conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
def _none_to_float(value):
"""
Convert None to a valid floating point value. Especially
for auto_max_age = None.
"""
return value if value is not None else np.finfo(float).max
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
"Enable auto-downloading of the latest IERS data. If set to False "
"then the local IERS-B file will be used by default (even if the "
"full IERS file with predictions was already downloaded and cached). "
"This parameter also controls whether internet resources will be "
"queried to update the leap second table if the installed version is "
"out of date. Default is True.",
)
auto_max_age = _config.ConfigItem(
30.0,
"Maximum age (days) of predictive data before auto-downloading. "
'See "Auto refresh behavior" in astropy.utils.iers documentation for details. '
"Default is 30.",
)
iers_auto_url = _config.ConfigItem(
IERS_A_URL, "URL for auto-downloading IERS file data."
)
iers_auto_url_mirror = _config.ConfigItem(
IERS_A_URL_MIRROR, "Mirror URL for auto-downloading IERS file data."
)
remote_timeout = _config.ConfigItem(
10.0, "Remote timeout downloading IERS file data (seconds)."
)
iers_degraded_accuracy = _config.ConfigItem(
["error", "warn", "ignore"],
"IERS behavior if the range of available IERS data does not "
"cover the times when converting time scales, potentially leading "
"to degraded accuracy.",
)
system_leap_second_file = _config.ConfigItem("", "System file with leap seconds.")
iers_leap_second_auto_url = _config.ConfigItem(
IERS_LEAP_SECOND_URL, "URL for auto-downloading leap seconds."
)
ietf_leap_second_auto_url = _config.ConfigItem(
IETF_LEAP_SECOND_URL, "Alternate URL for auto-downloading leap seconds."
)
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS.
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
"""Cached table, returned if ``open`` is called without arguments."""
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
IERS
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
# TODO: the below is really ugly and probably a bad idea. Instead,
# there should probably be an IERSBase class, which provides
# useful methods but cannot really be used on its own, and then
# *perhaps* an IERS class which provides best defaults. But for
# backwards compatibility, we use the IERS_B reader for IERS here.
if cls is IERS:
cls.iers_table = IERS_B.read(**kwargs)
else:
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.0):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or `~astropy.time.Time`
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO + mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0.0, return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1, jd2, ["UT1_UTC"], self.ut1_utc_source if return_status else None
)
def dcip_xy(self, jd1, jd2=0.0, return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : `~astropy.units.Quantity` ['angle']
x component of CIP correction for the requested times.
D_y : `~astropy.units.Quantity` ['angle']
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1,
jd2,
["dX_2000A", "dY_2000A"],
self.dcip_source if return_status else None,
)
def pm_xy(self, jd1, jd2=0.0, return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : `~astropy.units.Quantity` ['angle']
x component of polar motion for the requested times.
PM_y : `~astropy.units.Quantity` ['angle']
y component of polar motion for the requested times.
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1, jd2, ["PM_x", "PM_y"], self.pm_source if return_status else None
)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
if conf.iers_degraded_accuracy == "error":
msg = (
"(some) times are outside of range covered by IERS table. Cannot"
" convert with full accuracy. To allow conversion with degraded"
" accuracy set astropy.utils.iers.conf.iers_degraded_accuracy to"
' "warn" or "silent". For more information about setting this'
" configuration parameter or controlling its value globally, see"
" the Astropy configuration system documentation"
" https://docs.astropy.org/en/stable/config/index.html."
)
raise IERSRangeError(msg)
elif conf.iers_degraded_accuracy == "warn":
# No IERS data covering the time(s) and user requested a warning.
msg = (
"(some) times are outside of range covered by IERS table, "
"accuracy is degraded."
)
warn(msg, IERSDegradedAccuracyWarning)
# No IERS data covering the time(s) and user is OK with no warning.
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, "__array__") or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
elif mjd.size == 0:
# Short-cut empty input.
return np.array([])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self["MJD"].value, mjd, side="right")
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self["MJD"][i0].value, self["MJD"][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == "UT1_UTC":
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# https://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
try:
return self._time_now
except Exception:
return Time.now()
def _convert_col_for_table(self, col):
# Fill masked columns with units to avoid dropped-mask warnings
# when converting to Quantity.
# TODO: Once we support masked quantities, we can drop this and
# in the code below replace b_bad with table['UT1_UTC_B'].mask, etc.
if getattr(col, "unit", None) is not None and isinstance(col, MaskedColumn):
col = col.filled(np.nan)
return super()._convert_col_for_table(col)
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See https://datacenter.iers.org/eop.php
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL`` or ``iers.IERS_A_URL_MIRROR``. See ``iers.__doc__``
for instructions on use in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[np.isfinite(iers_a["UT1_UTC_A"]) & (iers_a["PolPMFlag_A"] != "")]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Combine A and B columns, using B where possible.
b_bad = np.isnan(table["UT1_UTC_B"])
table["UT1_UTC"] = np.where(b_bad, table["UT1_UTC_A"], table["UT1_UTC_B"])
table["UT1Flag"] = np.where(b_bad, table["UT1Flag_A"], "B")
# Repeat for polar motions.
b_bad = np.isnan(table["PM_X_B"]) | np.isnan(table["PM_Y_B"])
table["PM_x"] = np.where(b_bad, table["PM_x_A"], table["PM_X_B"])
table["PM_y"] = np.where(b_bad, table["PM_y_A"], table["PM_Y_B"])
table["PolPMFlag"] = np.where(b_bad, table["PolPMFlag_A"], "B")
b_bad = np.isnan(table["dX_2000A_B"]) | np.isnan(table["dY_2000A_B"])
table["dX_2000A"] = np.where(b_bad, table["dX_2000A_A"], table["dX_2000A_B"])
table["dY_2000A"] = np.where(b_bad, table["dY_2000A_A"], table["dY_2000A_B"])
table["NutFlag"] = np.where(b_bad, table["NutFlag_A"], "B")
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
# Since only 'P' and 'I' are possible and 'P' is guaranteed to come
# after 'I', we can use searchsorted for 100 times speed up over
# finding the first index where the flag equals 'P'.
p_index = min(
np.searchsorted(table["UT1Flag_A"], "P"),
np.searchsorted(table["PolPMFlag_A"], "P"),
)
table.meta["predictive_index"] = p_index
table.meta["predictive_mjd"] = table["MJD"][p_index].value
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
iers_a = super().read(file, format="cds", readme=readme)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta["data_path"] = file
table.meta["readme_path"] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table."""
ut1flag = self["UT1Flag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == "I"] = FROM_IERS_A
source[ut1flag == "P"] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table."""
nutflag = self["NutFlag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == "I"] = FROM_IERS_A
source[nutflag == "P"] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table."""
pmflag = self["PolPMFlag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == "I"] = FROM_IERS_A
source[pmflag == "P"] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see https://www.iers.org/IERS/EN/Home/home_node.html
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=14):
"""Read IERS-B table from a eopc04_iau2000.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
starting row. Default is 14, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
table = super().read(file, format="cds", readme=readme, data_start=data_start)
table.meta["data_path"] = file
table.meta["readme_path"] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance
With IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS_B.open()
return cls.iers_table
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get("data_url") in all_urls:
return cls.iers_table
for url in all_urls:
try:
filename = download_file(url, cache=True)
except Exception as err:
warn(f"failed to download {url}: {err}", IERSWarning)
continue
try:
cls.iers_table = cls.read(file=filename)
except Exception as err:
warn(f"malformed IERS table from {url}: {err}", IERSWarning)
continue
cls.iers_table.meta["data_url"] = url
break
else:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream if actually trying to interpolate
# predictive values.
warn("unable to download valid IERS file, using local IERS-B", IERSWarning)
cls.iers_table = IERS_B.open()
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta["predictive_mjd"]
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = _none_to_float(conf.auto_max_age)
if (
max_input_mjd > predictive_mjd
and self.time_now.mjd - predictive_mjd > auto_max_age
):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta["predictive_index"]
predictive_mjd = self.meta["predictive_mjd"]
# Update table in place if necessary
auto_max_age = _none_to_float(conf.auto_max_age)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError(
"IERS auto_max_age configuration value must be larger than 10 days"
)
if max_input_mjd > predictive_mjd and (now_mjd - predictive_mjd) > auto_max_age:
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
# Get the latest version
try:
filename = download_file(all_urls[0], sources=all_urls, cache="update")
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(
AstropyWarning(
f'failed to download {" and ".join(all_urls)}: {err}.\nA'
" coordinate or time-related calculation might be compromised"
" or fail because the dates are not covered by the available"
' IERS file. See the "IERS data access" section of the'
" astropy documentation for additional information on working"
" offline."
)
)
return
new_table = self.__class__.read(file=filename)
new_table.meta["data_url"] = str(all_urls[0])
# New table has new values?
if new_table["MJD"][-1] > self["MJD"][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(
new_table["MJD"].value, predictive_mjd, side="right"
)
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi : new_fpi + n_replace]
# Sanity check for continuity
if new_table["MJD"][new_fpi + n_replace] - self["MJD"][-1] != 1.0 * u.d:
raise ValueError("unexpected gap in MJD when refreshing IERS table")
# Now add new rows in place
for row in new_table[new_fpi + n_replace :]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(
IERSStaleWarning(
"IERS_Auto predictive values are older than"
f" {conf.auto_max_age} days but downloading the latest table"
" did not find newer values"
)
)
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table["MJD"][np.isfinite(table["UT1_UTC_B"])]
i0 = np.searchsorted(iers_b["MJD"], mjd_b[0], side="left")
i1 = np.searchsorted(iers_b["MJD"], mjd_b[-1], side="right")
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not u.allclose(table["MJD"][:n_iers_b], iers_b["MJD"]):
raise ValueError(
"unexpected mismatch when copying IERS-B values into IERS-A table."
)
# Finally do the overwrite
table["UT1_UTC_B"][:n_iers_b] = iers_b["UT1_UTC"]
table["PM_X_B"][:n_iers_b] = iers_b["PM_x"]
table["PM_Y_B"][:n_iers_b] = iers_b["PM_y"]
table["dX_2000A_B"][:n_iers_b] = iers_b["dX_2000A"]
table["dY_2000A_B"][:n_iers_b] = iers_b["dY_2000A"]
return table
class earth_orientation_table(ScienceState):
"""Default IERS table for Earth rotation and reference systems service.
These tables are used to calculate the offsets between ``UT1`` and ``UTC``
and for conversion to Earth-based coordinate systems.
The state itself is an IERS table, as an instance of one of the
`~astropy.utils.iers.IERS` classes. The default, the auto-updating
`~astropy.utils.iers.IERS_Auto` class, should suffice for most
purposes.
Examples
--------
To temporarily use the IERS-B file packaged with astropy::
>>> from astropy.utils import iers
>>> from astropy.time import Time
>>> iers_b = iers.IERS_B.open(iers.IERS_B_FILE)
>>> with iers.earth_orientation_table.set(iers_b):
... print(Time('2000-01-01').ut1.isot)
2000-01-01T00:00:00.355
To use the most recent IERS-A file for the whole session::
>>> iers_a = iers.IERS_A.open(iers.IERS_A_URL) # doctest: +SKIP
>>> iers.earth_orientation_table.set(iers_a) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_A length=17463>...>
To go back to the default (of `~astropy.utils.iers.IERS_Auto`)::
>>> iers.earth_orientation_table.set(None) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_Auto length=17428>...>
"""
_value = None
@classmethod
def validate(cls, value):
if value is None:
value = IERS_Auto.open()
if not isinstance(value, IERS):
raise ValueError("earth_orientation_table requires an IERS Table.")
return value
class LeapSeconds(QTable):
"""Leap seconds class, holding TAI-UTC differences.
The table should hold columns 'year', 'month', 'tai_utc'.
Methods are provided to initialize the table from IERS ``Leap_Second.dat``,
IETF/ntp ``leap-seconds.list``, or built-in ERFA/SOFA, and to update the
list used by ERFA.
Notes
-----
Astropy has a built-in ``iers.IERS_LEAP_SECONDS_FILE``. Up to date versions
can be downloaded from ``iers.IERS_LEAP_SECONDS_URL`` or
``iers.LEAP_SECONDS_LIST_URL``. Many systems also store a version
of ``leap-seconds.list`` for use with ``ntp`` (e.g., on Debian/Ubuntu
systems, ``/usr/share/zoneinfo/leap-seconds.list``).
To prevent querying internet resources if the available local leap second
file(s) are out of date, set ``iers.conf.auto_download = False``. This
must be done prior to performing any ``Time`` scale transformations related
to UTC (e.g. converting from UTC to TAI).
"""
# Note: Time instances in this class should use scale='tai' to avoid
# needing leap seconds in their creation or interpretation.
_re_expires = re.compile(r"^#.*File expires on[:\s]+(\d+\s\w+\s\d+)\s*$")
_expires = None
_auto_open_files = [
"erfa",
IERS_LEAP_SECOND_FILE,
"system_leap_second_file",
"iers_leap_second_auto_url",
"ietf_leap_second_auto_url",
]
"""Files or conf attributes to try in auto_open."""
@classmethod
def open(cls, file=None, cache=False):
"""Open a leap-second list.
Parameters
----------
file : path-like or None
Full local or network path to the file holding leap-second data,
for passing on to the various ``from_`` class methods.
If 'erfa', return the data used by the ERFA library.
If `None`, use default locations from file and configuration to
find a table that is not expired.
cache : bool
Whether to use cache. Defaults to False, since leap-second files
are regularly updated.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Table with 'year', 'month', and 'tai_utc' columns, plus possibly
others.
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. For the auto-loading,
a list comprised of the table shipped with astropy, and files and
URLs in `~astropy.utils.iers.Conf` are tried, returning the first
that is sufficiently new, or the newest among them all.
"""
if file is None:
return cls.auto_open()
if file.lower() == "erfa":
return cls.from_erfa()
if urlparse(file).netloc:
file = download_file(file, cache=cache)
# Just try both reading methods.
try:
return cls.from_iers_leap_seconds(file)
except Exception:
return cls.from_leap_seconds_list(file)
@staticmethod
def _today():
# Get current day in scale='tai' without going through a scale change
# (so we do not need leap seconds).
s = "{0.year:04d}-{0.month:02d}-{0.day:02d}".format(datetime.utcnow())
return Time(s, scale="tai", format="iso", out_subfmt="date")
@classmethod
def auto_open(cls, files=None):
"""Attempt to get an up-to-date leap-second list.
The routine will try the files in sequence until it finds one
whose expiration date is "good enough" (see below). If none
are good enough, it returns the one with the most recent expiration
date, warning if that file is expired.
For remote files that are cached already, the cached file is tried
first before attempting to retrieve it again.
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses
``cls._auto_open_files``.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Up to date leap-second table
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. We look for a file
that expires more than 180 - `~astropy.utils.iers.Conf.auto_max_age`
after the present.
"""
offset = 180 - (30 if conf.auto_max_age is None else conf.auto_max_age)
good_enough = cls._today() + TimeDelta(offset, format="jd")
if files is None:
# Basic files to go over (entries in _auto_open_files can be
# configuration items, which we want to be sure are up to date).
files = [getattr(conf, f, f) for f in cls._auto_open_files]
# Remove empty entries.
files = [f for f in files if f]
# Our trials start with normal files and remote ones that are
# already in cache. The bools here indicate that the cache
# should be used.
trials = [
(f, True) for f in files if not urlparse(f).netloc or is_url_in_cache(f)
]
# If we are allowed to download, we try downloading new versions
# if none of the above worked.
if conf.auto_download:
trials += [(f, False) for f in files if urlparse(f).netloc]
self = None
err_list = []
# Go through all entries, and return the first one that
# is not expired, or the most up to date one.
for f, allow_cache in trials:
if not allow_cache:
clear_download_cache(f)
try:
trial = cls.open(f, cache=True)
except Exception as exc:
err_list.append(exc)
continue
if self is None or trial.expires > self.expires:
self = trial
self.meta["data_url"] = str(f)
if self.expires > good_enough:
break
if self is None:
raise ValueError(
"none of the files could be read. The "
f"following errors were raised:\n {err_list}"
)
if self.expires < self._today() and conf.auto_max_age is not None:
warn("leap-second file is expired.", IERSStaleWarning)
return self
@property
def expires(self):
"""The limit of validity of the table."""
return self._expires
@classmethod
def _read_leap_seconds(cls, file, **kwargs):
"""Read a file, identifying expiration by matching 'File expires'."""
expires = None
# Find expiration date.
with get_readable_fileobj(file) as fh:
lines = fh.readlines()
for line in lines:
match = cls._re_expires.match(line)
if match:
day, month, year = match.groups()[0].split()
month_nb = MONTH_ABBR.index(month[:3]) + 1
expires = Time(
f"{year}-{month_nb:02d}-{day}", scale="tai", out_subfmt="date"
)
break
else:
raise ValueError(f"did not find expiration date in {file}")
self = cls.read(lines, format="ascii.no_header", **kwargs)
self._expires = expires
return self
@classmethod
def from_iers_leap_seconds(cls, file=IERS_LEAP_SECOND_FILE):
"""Create a table from a file like the IERS ``Leap_Second.dat``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IERS. By default, uses
``iers.IERS_LEAP_SECOND_FILE``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on 28 June 2020'
"""
return cls._read_leap_seconds(
file, names=["mjd", "day", "month", "year", "tai_utc"]
)
@classmethod
def from_leap_seconds_list(cls, file):
"""Create a table from a file like the IETF ``leap-seconds.list``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IETF. Up to date versions
can be retrieved from ``iers.IETF_LEAP_SECOND_URL``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on: 28 June 2020'
"""
from astropy.io.ascii import convert_numpy # Here to avoid circular import
names = ["ntp_seconds", "tai_utc", "comment", "day", "month", "year"]
# Note: ntp_seconds does not fit in 32 bit, so causes problems on
# 32-bit systems without the np.int64 converter.
self = cls._read_leap_seconds(
file,
names=names,
include_names=names[:2],
converters={"ntp_seconds": [convert_numpy(np.int64)]},
)
self["mjd"] = (self["ntp_seconds"] / 86400 + 15020).round()
# Note: cannot use Time.ymdhms, since that might require leap seconds.
isot = Time(self["mjd"], format="mjd", scale="tai").isot
ymd = np.array(
[[int(part) for part in t.partition("T")[0].split("-")] for t in isot]
)
self["year"], self["month"], self["day"] = ymd.T
return self
@classmethod
def from_erfa(cls, built_in=False):
"""Create table from the leap-second list in ERFA.
Parameters
----------
built_in : bool
If `False` (default), retrieve the list currently used by ERFA,
which may have been updated. If `True`, retrieve the list shipped
with erfa.
"""
current = cls(erfa.leap_seconds.get())
current._expires = Time(
"{0.year:04d}-{0.month:02d}-{0.day:02d}".format(erfa.leap_seconds.expires),
scale="tai",
)
if not built_in:
return current
try:
erfa.leap_seconds.set(None) # reset to defaults
return cls.from_erfa(built_in=False)
finally:
erfa.leap_seconds.set(current)
def update_erfa_leap_seconds(self, initialize_erfa=False):
"""Add any leap seconds not already present to the ERFA table.
This method matches leap seconds with those present in the ERFA table,
and extends the latter as necessary.
Parameters
----------
initialize_erfa : bool, or 'only', or 'empty'
Initialize the ERFA leap second table to its built-in value before
trying to expand it. This is generally not needed but can help
in case it somehow got corrupted. If equal to 'only', the ERFA
table is reinitialized and no attempt it made to update it.
If 'empty', the leap second table is emptied before updating, i.e.,
it is overwritten altogether (note that this may break things in
surprising ways, as most leap second tables do not include pre-1970
pseudo leap-seconds; you were warned).
Returns
-------
n_update : int
Number of items updated.
Raises
------
ValueError
If the leap seconds in the table are not on 1st of January or July,
or if the matches are inconsistent. This would normally suggest
a corrupted leap second table, but might also indicate that the
ERFA table was corrupted. If needed, the ERFA table can be reset
by calling this method with an appropriate value for
``initialize_erfa``.
"""
if initialize_erfa == "empty":
# Initialize to empty and update is the same as overwrite.
erfa.leap_seconds.set(self)
return len(self)
if initialize_erfa:
erfa.leap_seconds.set()
if initialize_erfa == "only":
return 0
return erfa.leap_seconds.update(self)
|
8576015686639c17f824a12bcd32fa30561388f95b8d7cbb12a3945dcb293ac0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions to do XML schema and DTD validation. At the moment, this
makes a subprocess call to xmllint. This could use a Python-based
library at some point in the future, if something appropriate could be
found.
"""
import os
import subprocess
def validate_schema(filename, schema_file):
"""
Validates an XML file against a schema or DTD.
Parameters
----------
filename : str
The path to the XML file to validate
schema_file : str
The path to the XML schema or DTD
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings
"""
base, ext = os.path.splitext(schema_file)
if ext == ".xsd":
schema_part = "--schema"
elif ext == ".dtd":
schema_part = "--dtdvalid"
else:
raise TypeError("schema_file must be a path to an XML Schema or DTD")
p = subprocess.Popen(
["xmllint", "--noout", "--nonet", schema_part, schema_file, filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if p.returncode == 127:
raise OSError("xmllint not found, so can not validate schema")
elif p.returncode < 0:
from astropy.utils.misc import signal_number_to_name
raise OSError(
f"xmllint was terminated by signal '{signal_number_to_name(-p.returncode)}'"
)
return p.returncode, stdout, stderr
|
1859ef89a6762fb0944aa4bb1b9dc77c606c3fb2275e5a4705e2dc09264ddcc8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helpers for letting numpy functions interact with Masked arrays.
The module supplies helper routines for numpy functions that propagate
masks appropriately., for use in the ``__array_function__``
implementation of `~astropy.utils.masked.MaskedNDArray`. They are not
very useful on their own, but the ones with docstrings are included in
the documentation so that there is a place to find out how the mask is
interpreted.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import FunctionAssigner
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
# This module should not really be imported, but we define __all__
# such that sphinx can typeset the functions with docstrings.
# The latter are added to __all__ at the end.
__all__ = [
"MASKED_SAFE_FUNCTIONS",
"APPLY_TO_BOTH_FUNCTIONS",
"DISPATCHED_FUNCTIONS",
"UNSUPPORTED_FUNCTIONS",
]
MASKED_SAFE_FUNCTIONS = set()
"""Set of functions that work fine on Masked classes already.
Most of these internally use `numpy.ufunc` or other functions that
are already covered.
"""
APPLY_TO_BOTH_FUNCTIONS = {}
"""Dict of functions that should apply to both data and mask.
The `dict` is keyed by the numpy function and the values are functions
that take the input arguments of the numpy function and organize these
for passing the data and mask to the numpy function.
Returns
-------
data_args : tuple
Arguments to pass on to the numpy function for the unmasked data.
mask_args : tuple
Arguments to pass on to the numpy function for the masked data.
kwargs : dict
Keyword arguments to pass on for both unmasked data and mask.
out : `~astropy.utils.masked.Masked` instance or None
Optional instance in which to store the output.
Raises
------
NotImplementedError
When an arguments is masked when it should not be or vice versa.
"""
DISPATCHED_FUNCTIONS = {}
"""Dict of functions that provide the numpy function's functionality.
These are for more complicated versions where the numpy function itself
cannot easily be used. It should return either the result of the
function, or a tuple consisting of the unmasked result, the mask for the
result and a possible output instance.
It should raise `NotImplementedError` if one of the arguments is masked
when it should not be or vice versa.
"""
UNSUPPORTED_FUNCTIONS = set()
"""Set of numpy functions that are not supported for masked arrays.
For most, masked input simply makes no sense, but for others it may have
been lack of time. Issues or PRs for support for functions are welcome.
"""
# Almost all from np.core.fromnumeric defer to methods so are OK.
MASKED_SAFE_FUNCTIONS |= {
getattr(np, name)
for name in np.core.fromnumeric.__all__
if name not in {"choose", "put", "resize", "searchsorted", "where", "alen"}
}
MASKED_SAFE_FUNCTIONS |= {
# built-in from multiarray
np.may_share_memory, np.can_cast, np.min_scalar_type, np.result_type,
np.shares_memory,
# np.core.arrayprint
np.array_repr,
# np.core.function_base
np.linspace, np.logspace, np.geomspace,
# np.core.numeric
np.isclose, np.allclose, np.flatnonzero, np.argwhere,
# np.core.shape_base
np.atleast_1d, np.atleast_2d, np.atleast_3d, np.stack, np.hstack, np.vstack,
# np.lib.function_base
np.average, np.diff, np.extract, np.meshgrid, np.trapz, np.gradient,
# np.lib.index_tricks
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.fill_diagonal,
# np.lib.shape_base
np.column_stack, np.row_stack, np.dstack,
np.array_split, np.split, np.hsplit, np.vsplit, np.dsplit,
np.expand_dims, np.apply_along_axis, np.kron, np.tile,
np.take_along_axis, np.put_along_axis,
# np.lib.type_check (all but asfarray, nan_to_num)
np.iscomplexobj, np.isrealobj, np.imag, np.isreal, np.real,
np.real_if_close, np.common_type,
# np.lib.ufunclike
np.fix, np.isneginf, np.isposinf,
# np.lib.function_base
np.angle, np.i0,
} # fmt: skip
IGNORED_FUNCTIONS = {
# I/O - useless for Masked, since no way to store the mask.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
IGNORED_FUNCTIONS |= {
np.pad, np.searchsorted, np.digitize,
np.is_busday, np.busday_count, np.busday_offset,
# numpy.lib.function_base
np.cov, np.corrcoef, np.trim_zeros,
# numpy.core.numeric
np.correlate, np.convolve,
# numpy.lib.histograms
np.histogram, np.histogram2d, np.histogramdd, np.histogram_bin_edges,
# TODO!!
np.dot, np.vdot, np.inner, np.tensordot, np.cross,
np.einsum, np.einsum_path,
} # fmt: skip
# Really should do these...
IGNORED_FUNCTIONS |= {
getattr(np, setopsname) for setopsname in np.lib.arraysetops.__all__
}
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
# Explicitly unsupported functions
UNSUPPORTED_FUNCTIONS |= {
np.unravel_index,
np.ravel_multi_index,
np.ix_,
}
# No support for the functions also not supported by Quantity
# (io, polynomial, etc.).
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
apply_to_both = FunctionAssigner(APPLY_TO_BOTH_FUNCTIONS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
def _get_data_and_masks(*args):
"""Separate out arguments into tuples of data and masks.
An all-False mask is created if an argument does not have a mask.
"""
from .core import Masked
data, masks = Masked._get_data_and_masks(*args)
masks = tuple(
m if m is not None else np.zeros(np.shape(d), bool) for d, m in zip(data, masks)
)
return data, masks
# Following are simple ufunc-like functions which should just copy the mask.
@dispatched_function
def datetime_as_string(arr, *args, **kwargs):
return (np.datetime_as_string(arr.unmasked, *args, **kwargs), arr.mask.copy(), None)
@dispatched_function
def sinc(x):
return np.sinc(x.unmasked), x.mask.copy(), None
@dispatched_function
def iscomplex(x):
return np.iscomplex(x.unmasked), x.mask.copy(), None
@dispatched_function
def unwrap(p, *args, **kwargs):
return np.unwrap(p.unmasked, *args, **kwargs), p.mask.copy(), None
@dispatched_function
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
data = np.nan_to_num(x.unmasked, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
return (data, x.mask.copy(), None) if copy else x
# Following are simple functions related to shapes, where the same function
# should be applied to the data and the mask. They cannot all share the
# same helper, because the first arguments have different names.
@apply_to_both(
helps={np.copy, np.asfarray, np.resize, np.moveaxis, np.rollaxis, np.roll}
)
def masked_a_helper(a, *args, **kwargs):
data, mask = _get_data_and_masks(a)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.flip, np.flipud, np.fliplr, np.rot90, np.triu, np.tril})
def masked_m_helper(m, *args, **kwargs):
data, mask = _get_data_and_masks(m)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.diag, np.diagflat})
def masked_v_helper(v, *args, **kwargs):
data, mask = _get_data_and_masks(v)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.delete})
def masked_arr_helper(array, *args, **kwargs):
data, mask = _get_data_and_masks(array)
return data + args, mask + args, kwargs, None
@apply_to_both
def broadcast_to(array, shape, subok=False):
"""Broadcast array to the given shape.
Like `numpy.broadcast_to`, and applied to both unmasked data and mask.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and mask are allowed, i.e., for ``subok=False``,
a `~astropy.utils.masked.MaskedNDArray` will be returned.
"""
data, mask = _get_data_and_masks(array)
return data, mask, dict(shape=shape, subok=subok), None
@dispatched_function
def outer(a, b, out=None):
return np.multiply.outer(np.ravel(a), np.ravel(b), out=out)
@dispatched_function
def empty_like(prototype, dtype=None, order="K", subok=True, shape=None):
"""Return a new array with the same shape and type as a given array.
Like `numpy.empty_like`, but will add an empty mask.
"""
unmasked = np.empty_like(
prototype.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
if dtype is not None:
dtype = (
np.ma.make_mask_descr(unmasked.dtype)
if unmasked.dtype.names
else np.dtype("?")
)
mask = np.empty_like(
prototype.mask, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, mask, None
@dispatched_function
def zeros_like(a, dtype=None, order="K", subok=True, shape=None):
"""Return an array of zeros with the same shape and type as a given array.
Like `numpy.zeros_like`, but will add an all-false mask.
"""
unmasked = np.zeros_like(
a.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, False, None
@dispatched_function
def ones_like(a, dtype=None, order="K", subok=True, shape=None):
"""Return an array of ones with the same shape and type as a given array.
Like `numpy.ones_like`, but will add an all-false mask.
"""
unmasked = np.ones_like(
a.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, False, None
@dispatched_function
def full_like(a, fill_value, dtype=None, order="K", subok=True, shape=None):
"""Return a full array with the same shape and type as a given array.
Like `numpy.full_like`, but with a mask that is also set.
If ``fill_value`` is `numpy.ma.masked`, the data will be left unset
(i.e., as created by `numpy.empty_like`).
"""
result = np.empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
result[...] = fill_value
return result
@dispatched_function
def put(a, ind, v, mode="raise"):
"""Replaces specified elements of an array with given values.
Like `numpy.put`, but for masked array ``a`` and possibly masked
value ``v``. Masked indices ``ind`` are not supported.
"""
from astropy.utils.masked import Masked
if isinstance(ind, Masked) or not isinstance(a, Masked):
raise NotImplementedError
v_data, v_mask = a._get_data_and_mask(v)
if v_data is not None:
np.put(a.unmasked, ind, v_data, mode=mode)
# v_mask of None will be correctly interpreted as False.
np.put(a.mask, ind, v_mask, mode=mode)
return None
@dispatched_function
def putmask(a, mask, values):
"""Changes elements of an array based on conditional and input values.
Like `numpy.putmask`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(a, Masked):
raise NotImplementedError
values_data, values_mask = a._get_data_and_mask(values)
if values_data is not None:
np.putmask(a.unmasked, mask, values_data)
np.putmask(a.mask, mask, values_mask)
return None
@dispatched_function
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
Like `numpy.place`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
vals_data, vals_mask = arr._get_data_and_mask(vals)
if vals_data is not None:
np.place(arr.unmasked, mask, vals_data)
np.place(arr.mask, mask, vals_mask)
return None
@dispatched_function
def copyto(dst, src, casting="same_kind", where=True):
"""Copies values from one array to another, broadcasting as necessary.
Like `numpy.copyto`, but for masked destination ``dst`` and possibly
masked source ``src``.
"""
from astropy.utils.masked import Masked
if not isinstance(dst, Masked) or isinstance(where, Masked):
raise NotImplementedError
src_data, src_mask = dst._get_data_and_mask(src)
if src_data is not None:
np.copyto(dst.unmasked, src_data, casting=casting, where=where)
if src_mask is not None:
np.copyto(dst.mask, src_mask, where=where)
return None
@dispatched_function
def packbits(a, *args, **kwargs):
result = np.packbits(a.unmasked, *args, **kwargs)
mask = np.packbits(a.mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def unpackbits(a, *args, **kwargs):
result = np.unpackbits(a.unmasked, *args, **kwargs)
mask = np.zeros(a.shape, dtype="u1")
mask[a.mask] = 255
mask = np.unpackbits(mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def bincount(x, weights=None, minlength=0):
"""Count number of occurrences of each value in array of non-negative ints.
Like `numpy.bincount`, but masked entries in ``x`` will be skipped.
Any masked entries in ``weights`` will lead the corresponding bin to
be masked.
"""
from astropy.utils.masked import Masked
if weights is not None:
weights = np.asanyarray(weights)
if isinstance(x, Masked) and x.ndim <= 1:
# let other dimensions lead to errors.
if weights is not None and weights.ndim == x.ndim:
weights = weights[~x.mask]
x = x.unmasked[~x.mask]
mask = None
if weights is not None:
weights, w_mask = Masked._get_data_and_mask(weights)
if w_mask is not None:
mask = np.bincount(x, w_mask.astype(int), minlength=minlength).astype(bool)
result = np.bincount(x, weights, minlength=0)
return result, mask, None
@dispatched_function
def msort(a):
result = a.copy()
result.sort(axis=0)
return result
@dispatched_function
def sort_complex(a):
# Just a copy of function_base.sort_complex, to avoid the asarray.
b = a.copy()
b.sort()
if not issubclass(b.dtype.type, np.complexfloating): # pragma: no cover
if b.dtype.char in "bhBH":
return b.astype("F")
elif b.dtype.char == "g":
return b.astype("G")
else:
return b.astype("D")
else:
return b
@dispatched_function
def concatenate(arrays, axis=0, out=None, dtype=None, casting="same_kind"):
data, masks = _get_data_and_masks(*arrays)
if out is None:
return (
np.concatenate(data, axis=axis, dtype=dtype, casting=casting),
np.concatenate(masks, axis=axis),
None,
)
else:
from astropy.utils.masked import Masked
if not isinstance(out, Masked):
raise NotImplementedError
np.concatenate(masks, out=out.mask, axis=axis)
np.concatenate(data, out=out.unmasked, axis=axis, dtype=dtype, casting=casting)
return out
@apply_to_both
def append(arr, values, axis=None):
data, masks = _get_data_and_masks(arr, values)
return data, masks, dict(axis=axis), None
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
from astropy.utils.masked import Masked
arrays, list_ndim, result_ndim, final_size = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = Masked(np.empty(shape=shape, dtype=dtype, order=order))
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
@dispatched_function
def broadcast_arrays(*args, subok=True):
"""Broadcast arrays to a common shape.
Like `numpy.broadcast_arrays`, applied to both unmasked data and masks.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and masks are allowed, i.e., for ``subok=False``,
`~astropy.utils.masked.MaskedNDArray` instances will be returned.
"""
from .core import Masked
are_masked = [isinstance(arg, Masked) for arg in args]
data = [
(arg.unmasked if is_masked else arg) for arg, is_masked in zip(args, are_masked)
]
results = np.broadcast_arrays(*data, subok=subok)
shape = results[0].shape if isinstance(results, list) else results.shape
masks = [
(np.broadcast_to(arg.mask, shape, subok=subok) if is_masked else None)
for arg, is_masked in zip(args, are_masked)
]
results = [
(Masked(result, mask) if mask is not None else result)
for (result, mask) in zip(results, masks)
]
return results if len(results) > 1 else results[0]
@apply_to_both
def insert(arr, obj, values, axis=None):
"""Insert values along the given axis before the given indices.
Like `numpy.insert` but for possibly masked ``arr`` and ``values``.
Masked ``obj`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(obj, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
(arr_data, val_data), (arr_mask, val_mask) = _get_data_and_masks(arr, values)
return ((arr_data, obj, val_data, axis), (arr_mask, obj, val_mask, axis), {}, None)
@dispatched_function
def count_nonzero(a, axis=None, *, keepdims=False):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis, keepdims=keepdims)
def _masked_median_1d(a, overwrite_input):
# TODO: need an in-place mask-sorting option.
unmasked = a.unmasked[~a.mask]
if unmasked.size:
return a.from_unmasked(np.median(unmasked, overwrite_input=overwrite_input))
else:
return a.from_unmasked(np.zeros_like(a.unmasked, shape=(1,))[0], mask=True)
def _masked_median(a, axis=None, out=None, overwrite_input=False):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_median_1d(part, overwrite_input)
else:
result = np.apply_along_axis(_masked_median_1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
@dispatched_function
def median(a, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
if NUMPY_LT_1_24:
keepdims = kwargs.pop("keepdims", False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out, **kwargs
)
return (r.reshape(k) if keepdims else r) if out is None else out
else:
return np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out, **kwargs
)
def _masked_quantile_1d(a, q, **kwargs):
"""
Private function for rank 1 arrays. Compute quantile ignoring NaNs.
See nanpercentile for parameter usage.
"""
unmasked = a.unmasked[~a.mask]
if unmasked.size:
result = np.lib.function_base._quantile_unchecked(unmasked, q, **kwargs)
return a.from_unmasked(result)
else:
return a.from_unmasked(np.zeros_like(a.unmasked, shape=q.shape), True)
def _masked_quantile(a, q, axis=None, out=None, **kwargs):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_quantile_1d(part, q, **kwargs)
else:
result = np.apply_along_axis(_masked_quantile_1d, axis, a, q, **kwargs)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
@dispatched_function
def quantile(a, q, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if isinstance(q, Masked) or out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
q = np.asanyarray(q)
if not np.lib.function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
if NUMPY_LT_1_24:
keepdims = kwargs.pop("keepdims", False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs
)
return (r.reshape(q.shape + k) if keepdims else r) if out is None else out
else:
return np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs
)
@dispatched_function
def percentile(a, q, *args, **kwargs):
q = np.true_divide(q, 100)
return quantile(a, q, *args, **kwargs)
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
(a1d, a2d), (a1m, a2m) = _get_data_and_masks(a1, a2)
if a1d.shape != a2d.shape:
return False
equal = a1d == a2d
if equal_nan:
equal |= np.isnan(a1d) & np.isnan(a2d)
return bool((equal | a1m | a2m).all())
@dispatched_function
def array_equiv(a1, a2):
return bool((a1 == a2).all())
@dispatched_function
def where(condition, *args):
from astropy.utils.masked import Masked
if not args:
return condition.nonzero(), None, None
condition, c_mask = Masked._get_data_and_mask(condition)
data, masks = _get_data_and_masks(*args)
unmasked = np.where(condition, *data)
mask = np.where(condition, *masks)
if c_mask is not None:
mask |= c_mask
return Masked(unmasked, mask=mask)
@dispatched_function
def choose(a, choices, out=None, mode="raise"):
"""Construct an array from an index array and a set of arrays to choose from.
Like `numpy.choose`. Masked indices in ``a`` will lead to masked output
values and underlying data values are ignored if out of bounds (for
``mode='raise'``). Any values masked in ``choices`` will be propagated
if chosen.
"""
from astropy.utils.masked import Masked
a_data, a_mask = Masked._get_data_and_mask(a)
if a_mask is not None and mode == "raise":
# Avoid raising on masked indices.
a_data = a.filled(fill_value=0)
kwargs = {"mode": mode}
if out is not None:
if not isinstance(out, Masked):
raise NotImplementedError
kwargs["out"] = out.unmasked
data, masks = _get_data_and_masks(*choices)
data_chosen = np.choose(a_data, data, **kwargs)
if out is not None:
kwargs["out"] = out.mask
mask_chosen = np.choose(a_data, masks, **kwargs)
if a_mask is not None:
mask_chosen |= a_mask
return Masked(data_chosen, mask_chosen) if out is None else out
@apply_to_both
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Like `numpy.select`, with masks in ``choicelist`` are propagated.
Any masks in ``condlist`` are ignored.
"""
from astropy.utils.masked import Masked
condlist = [c.unmasked if isinstance(c, Masked) else c for c in condlist]
data_list, mask_list = _get_data_and_masks(*choicelist)
default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)
return (
(condlist, data_list, default.unmasked),
(condlist, mask_list, default.mask),
{},
None,
)
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
"""Evaluate a piecewise-defined function.
Like `numpy.piecewise` but for masked input array ``x``.
Any masks in ``condlist`` are ignored.
"""
# Copied implementation from numpy.lib.function_base.piecewise,
# just to ensure output is Masked.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
): # pragma: no cover
condlist = [condlist]
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
# The one real change...
y = np.zeros_like(x)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
for item, value in zip(where, what):
y[item] = value
return y
@dispatched_function
def interp(x, xp, fp, *args, **kwargs):
"""One-dimensional linear interpolation.
Like `numpy.interp`, but any masked points in ``xp`` and ``fp``
are ignored. Any masked values in ``x`` will still be evaluated,
but masked on output.
"""
from astropy.utils.masked import Masked
xd, xm = Masked._get_data_and_mask(x)
if isinstance(xp, Masked) or isinstance(fp, Masked):
(xp, fp), (xpm, fpm) = _get_data_and_masks(xp, fp)
if xp.ndim == fp.ndim == 1:
# Avoid making arrays 1-D; will just raise below.
m = xpm | fpm
xp = xp[~m]
fp = fp[~m]
result = np.interp(xd, xp, fp, *args, **kwargs)
return result if xm is None else Masked(result, xm.copy())
@dispatched_function
def lexsort(keys, axis=-1):
"""Perform an indirect stable sort using a sequence of keys.
Like `numpy.lexsort` but for possibly masked ``keys``. Masked
values are sorted towards the end for each key.
"""
# Sort masks to the end.
from .core import Masked
new_keys = []
for key in keys:
if isinstance(key, Masked):
# If there are other keys below, want to be sure that
# for masked values, those other keys set the order.
new_key = key.unmasked
if new_keys and key.mask.any():
new_key = new_key.copy()
new_key[key.mask] = new_key.flat[0]
new_keys.extend([new_key, key.mask])
else:
new_keys.append(key)
return np.lexsort(new_keys, axis=axis)
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is Masked.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
return val
class MaskedFormat:
"""Formatter for masked array scalars.
For use in `numpy.array2string`, wrapping the regular formatters such
that if a value is masked, its formatted string is replaced.
Typically initialized using the ``from_data`` class method.
"""
def __init__(self, format_function):
self.format_function = format_function
# Special case for structured void and subarray: we need to make all the
# format functions for the items masked as well.
# TODO: maybe is a separate class is more logical?
ffs = getattr(format_function, "format_functions", None)
if ffs:
# StructuredVoidFormat: multiple format functions to be changed.
self.format_function.format_functions = [MaskedFormat(ff) for ff in ffs]
ff = getattr(format_function, "format_function", None)
if ff:
# SubarrayFormat: change format function for the elements.
self.format_function.format_function = MaskedFormat(ff)
def __call__(self, x):
if x.dtype.names:
# The replacement of x with a list is needed because the function
# inside StructuredVoidFormat iterates over x, which works for an
# np.void but not an array scalar.
return self.format_function([x[field] for field in x.dtype.names])
if x.shape:
# For a subarray pass on the data directly, since the
# items will be iterated on inside the function.
return self.format_function(x)
# Single element: first just typeset it normally, replace with masked
# string if needed.
string = self.format_function(x.unmasked[()])
if x.mask:
# Strikethrough would be neat, but terminal needs a different
# formatting than, say, jupyter notebook.
# return "\x1B[9m"+string+"\x1B[29m"
# return ''.join(s+'\u0336' for s in string)
n = min(3, max(1, len(string)))
return " " * (len(string) - n) + "\u2014" * n
else:
return string
@classmethod
def from_data(cls, data, **options):
from numpy.core.arrayprint import _get_format_function
return cls(_get_format_function(data, **options))
def _array2string(a, options, separator=" ", prefix=""):
# Mostly copied from numpy.core.arrayprint, except:
# - The format function is wrapped in a mask-aware class;
# - Arrays scalars are not cast as arrays.
from numpy.core.arrayprint import _formatArray, _leading_trailing
data = np.asarray(a)
if a.size > options["threshold"]:
summary_insert = "..."
data = _leading_trailing(data, options["edgeitems"])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = MaskedFormat.from_data(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " " * len(prefix)
lst = _formatArray(
a,
format_function,
options["linewidth"],
next_line_prefix,
separator,
options["edgeitems"],
summary_insert,
options["legacy"],
)
return lst
@dispatched_function
def array2string(
a,
max_line_width=None,
precision=None,
suppress_small=None,
separator=" ",
prefix="",
style=np._NoValue,
formatter=None,
threshold=None,
edgeitems=None,
sign=None,
floatmode=None,
suffix="",
):
# Copied from numpy.core.arrayprint, but using _array2string above.
from numpy.core.arrayprint import _format_options, _make_options_dict
overrides = _make_options_dict(
precision,
threshold,
edgeitems,
max_line_width,
suppress_small,
None,
None,
sign,
formatter,
floatmode,
)
options = _format_options.copy()
options.update(overrides)
options["linewidth"] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
@dispatched_function
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
# Override to avoid special treatment of array scalars.
return array2string(a, max_line_width, precision, suppress_small, " ", "")
# For the nanfunctions, we just treat any nan as an additional mask.
_nanfunc_fill_values = {"nansum": 0, "nancumsum": 0, "nanprod": 1, "nancumprod": 1}
def masked_nanfunc(nanfuncname):
np_func = getattr(np, nanfuncname[3:])
fill_value = _nanfunc_fill_values.get(nanfuncname, None)
def nanfunc(a, *args, **kwargs):
from astropy.utils.masked import Masked
a, mask = Masked._get_data_and_mask(a)
if issubclass(a.dtype.type, np.inexact):
nans = np.isnan(a)
mask = nans if mask is None else (nans | mask)
if mask is not None:
a = Masked(a, mask)
if fill_value is not None:
a = a.filled(fill_value)
return np_func(a, *args, **kwargs)
doc = f"Like `numpy.{nanfuncname}`, skipping masked values as well.\n\n"
if fill_value is not None:
# sum, cumsum, prod, cumprod
doc += (
f"Masked/NaN values are replaced with {fill_value}. "
"The output is not masked."
)
elif "arg" in nanfuncname:
doc += (
"No exceptions are raised for fully masked/NaN slices.\n"
"Instead, these give index 0."
)
else:
doc += (
"No warnings are given for fully masked/NaN slices.\n"
"Instead, they are masked in the output."
)
nanfunc.__doc__ = doc
nanfunc.__name__ = nanfuncname
return nanfunc
for nanfuncname in np.lib.nanfunctions.__all__:
globals()[nanfuncname] = dispatched_function(
masked_nanfunc(nanfuncname), helps=getattr(np, nanfuncname)
)
# Add any dispatched or helper function that has a docstring to
# __all__, so they will be typeset by sphinx. The logic is that for
# those presumably the use of the mask is not entirely obvious.
__all__ += sorted(
helper.__name__
for helper in (
set(APPLY_TO_BOTH_FUNCTIONS.values()) | set(DISPATCHED_FUNCTIONS.values())
)
if helper.__doc__
)
|
07488a9199bb4df3b99d6341c679050b70100b7f98bf9ad47234f9875a1673c6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
import numpy as np
from astropy.utils import minversion
__all__ = [
"NUMPY_LT_1_21_1",
"NUMPY_LT_1_22",
"NUMPY_LT_1_22_1",
"NUMPY_LT_1_23",
"NUMPY_LT_1_24",
"NUMPY_LT_1_25",
]
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_21_1 = not minversion(np, "1.21.1")
NUMPY_LT_1_22 = not minversion(np, "1.22")
NUMPY_LT_1_22_1 = not minversion(np, "1.22.1")
NUMPY_LT_1_23 = not minversion(np, "1.23")
NUMPY_LT_1_24 = not minversion(np, "1.24")
NUMPY_LT_1_25 = not minversion(np, "1.25.0.dev0+151")
|
da4bc7380cc2853f9bc0516edae4cf2c58b9b4d96c28f1ad9df710b58637cea8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helpers functions for different kinds of WCSAxes instances.
"""
import numpy as np
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredEllipse, AnchoredSizeBar
import astropy.units as u
from astropy.wcs.utils import proj_plane_pixel_scales
__all__ = ["add_beam", "add_scalebar"]
CORNERS = {
"top right": 1,
"top left": 2,
"bottom left": 3,
"bottom right": 4,
"right": 5,
"left": 6,
"bottom": 8,
"top": 9,
}
def add_beam(
ax,
header=None,
major=None,
minor=None,
angle=None,
corner="bottom left",
frame=False,
borderpad=0.4,
pad=0.5,
**kwargs,
):
"""
Display the beam shape and size.
Parameters
----------
ax : :class:`~astropy.visualization.wcsaxes.WCSAxes`
WCSAxes instance in which the beam shape and size is displayed. The WCS
must be celestial.
header : :class:`~astropy.io.fits.Header`, optional
Header containing the beam parameters. If specified, the ``BMAJ``,
``BMIN``, and ``BPA`` keywords will be searched in the FITS header
to set the major and minor axes and the position angle on the sky.
major : float or :class:`~astropy.units.Quantity`, optional
Major axis of the beam in degrees or an angular quantity.
minor : float, or :class:`~astropy.units.Quantity`, optional
Minor axis of the beam in degrees or an angular quantity.
angle : float or :class:`~astropy.units.Quantity`, optional
Position angle of the beam on the sky in degrees or an angular
quantity in the anticlockwise direction.
corner : str, optional
The beam location. Acceptable values are ``'left'``, ``'right'``,
``'top'``, 'bottom', ``'top left'``, ``'top right'``, ``'bottom left'``
(default), and ``'bottom right'``.
frame : bool, optional
Whether to display a frame behind the beam (default is ``False``).
borderpad : float, optional
Border padding, in fraction of the font size. Default is 0.4.
pad : float, optional
Padding around the beam, in fraction of the font size. Default is 0.5.
kwargs
Additional arguments are passed to :class:`matplotlib.patches.Ellipse`.
Notes
-----
This function may be inaccurate when:
- The pixel scales at the reference pixel are different from the pixel scales
within the image extent (e.g., when the reference pixel is well outside of
the image extent and the projection is non-linear)
- The pixel scales in the two directions are very different from each other
(e.g., rectangular pixels)
"""
if header and major:
raise ValueError(
"Either header or major/minor/angle must be specified, not both."
)
if header:
major = header["BMAJ"]
minor = header["BMIN"]
angle = header["BPA"]
if isinstance(major, u.Quantity):
major = major.to(u.degree).value
if isinstance(minor, u.Quantity):
minor = minor.to(u.degree).value
if isinstance(angle, u.Quantity):
angle = angle.to(u.degree).value
if ax.wcs.is_celestial:
pix_scale = proj_plane_pixel_scales(ax.wcs)
sx = pix_scale[0]
sy = pix_scale[1]
degrees_per_pixel = np.sqrt(sx * sy)
else:
raise ValueError("Cannot show beam when WCS is not celestial")
minor /= degrees_per_pixel
major /= degrees_per_pixel
corner = CORNERS[corner]
beam = AnchoredEllipse(
ax.transData,
width=minor,
height=major,
angle=angle,
loc=corner,
pad=pad,
borderpad=borderpad,
frameon=frame,
)
beam.ellipse.set(**kwargs)
ax.add_artist(beam)
def add_scalebar(
ax,
length,
label=None,
corner="bottom right",
frame=False,
borderpad=0.4,
pad=0.5,
**kwargs,
):
"""Add a scale bar.
Parameters
----------
ax : :class:`~astropy.visualization.wcsaxes.WCSAxes`
WCSAxes instance in which the scale bar is displayed. The WCS must be
celestial.
length : float or :class:`~astropy.units.Quantity`
The length of the scalebar in degrees or an angular quantity
label : str, optional
Label to place below the scale bar
corner : str, optional
Where to place the scale bar. Acceptable values are:, ``'left'``,
``'right'``, ``'top'``, ``'bottom'``, ``'top left'``, ``'top right'``,
``'bottom left'`` and ``'bottom right'`` (default)
frame : bool, optional
Whether to display a frame behind the scale bar (default is ``False``)
borderpad : float, optional
Border padding, in fraction of the font size. Default is 0.4.
pad : float, optional
Padding around the scale bar, in fraction of the font size. Default is 0.5.
kwargs
Additional arguments are passed to
:class:`mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar`.
Notes
-----
This function may be inaccurate when:
- The pixel scales at the reference pixel are different from the pixel scales
within the image extent (e.g., when the reference pixel is well outside of
the image extent and the projection is non-linear)
- The pixel scales in the two directions are very different from each other
(e.g., rectangular pixels)
"""
if isinstance(length, u.Quantity):
length = length.to(u.degree).value
if ax.wcs.is_celestial:
pix_scale = proj_plane_pixel_scales(ax.wcs)
sx = pix_scale[0]
sy = pix_scale[1]
degrees_per_pixel = np.sqrt(sx * sy)
else:
raise ValueError("Cannot show scalebar when WCS is not celestial")
length = length / degrees_per_pixel
corner = CORNERS[corner]
scalebar = AnchoredSizeBar(
ax.transData,
length,
label,
corner,
pad=pad,
borderpad=borderpad,
sep=5,
frameon=frame,
**kwargs,
)
ax.add_artist(scalebar)
|
dd4e33c2bea21f2f43589caaac3e269db4c4c99ff73ea94e6b9d525cb3a9af1f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Note: This file includes code derived from pywcsgrid2
#
# This file contains Matplotlib transformation objects (e.g. from pixel to world
# coordinates, but also world-to-world).
import abc
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Transform
from astropy import units as u
from astropy.coordinates import (
BaseCoordinateFrame,
SkyCoord,
UnitSphericalRepresentation,
frame_transform_graph,
)
__all__ = [
"CurvedTransform",
"CoordinateTransform",
"World2PixelTransform",
"Pixel2WorldTransform",
]
class CurvedTransform(Transform, metaclass=abc.ABCMeta):
"""
Abstract base class for non-affine curved transforms.
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path.
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
def transform(self, input):
raise NotImplementedError("")
def inverted(self):
raise NotImplementedError("")
class CoordinateTransform(CurvedTransform):
has_inverse = True
def __init__(self, input_system, output_system):
super().__init__()
self._input_system_name = input_system
self._output_system_name = output_system
if isinstance(self._input_system_name, str):
frame_cls = frame_transform_graph.lookup_name(self._input_system_name)
if frame_cls is None:
raise ValueError(f"Frame {self._input_system_name} not found")
else:
self.input_system = frame_cls()
elif isinstance(self._input_system_name, BaseCoordinateFrame):
self.input_system = self._input_system_name
else:
raise TypeError(
"input_system should be a WCS instance, string, or a coordinate frame"
" instance"
)
if isinstance(self._output_system_name, str):
frame_cls = frame_transform_graph.lookup_name(self._output_system_name)
if frame_cls is None:
raise ValueError(f"Frame {self._output_system_name} not found")
else:
self.output_system = frame_cls()
elif isinstance(self._output_system_name, BaseCoordinateFrame):
self.output_system = self._output_system_name
else:
raise TypeError(
"output_system should be a WCS instance, string, or a coordinate frame"
" instance"
)
if self.output_system == self.input_system:
self.same_frames = True
else:
self.same_frames = False
@property
def same_frames(self):
return self._same_frames
@same_frames.setter
def same_frames(self, same_frames):
self._same_frames = same_frames
def transform(self, input_coords):
"""
Transform one set of coordinates to another.
"""
if self.same_frames:
return input_coords
input_coords = input_coords * u.deg
x_in, y_in = input_coords[:, 0], input_coords[:, 1]
c_in = SkyCoord(
UnitSphericalRepresentation(x_in, y_in), frame=self.input_system
)
# We often need to transform arrays that contain NaN values, and filtering
# out the NaN values would have a performance hit, so instead we just pass
# on all values and just ignore Numpy warnings
with np.errstate(all="ignore"):
c_out = c_in.transform_to(self.output_system)
lon = c_out.spherical.lon.deg
lat = c_out.spherical.lat.deg
return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1)
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform.
"""
return CoordinateTransform(self._output_system_name, self._input_system_name)
class World2PixelTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from world to pixel coordinates.
"""
has_inverse = True
frame_in = None
@property
@abc.abstractmethod
def input_dims(self):
"""
The number of input world dimensions.
"""
@abc.abstractmethod
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform.
"""
class Pixel2WorldTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from pixel to world coordinates.
"""
has_inverse = True
frame_out = None
@property
@abc.abstractmethod
def output_dims(self):
"""
The number of output world dimensions.
"""
@abc.abstractmethod
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform.
"""
|
28763df2a0e8bf6c8297f11b5b356d3fa0ddc3dd6a47a3fb2ddcbfa12a8b1588 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from .axislabels import AxisLabels
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .frame import EllipticalFrame, RectangularFrame1D
from .grid_paths import get_gridline_path, get_lon_lat_path
from .ticklabels import TickLabels
from .ticks import Ticks
__all__ = ["CoordinateHelper"]
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
LINES_TO_PATCHES_LINESTYLE = {
"-": "solid",
"--": "dashed",
"-.": "dashdot",
":": "dotted",
"none": "none",
"None": "none",
" ": "none",
"": "none",
}
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid="ignore"):
return np.mod(values - coord_wrap, 360.0) - (360.0 - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : `astropy.units.Quantity`
The angle at which the longitude wraps (defaults to 360 degrees).
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(
self,
parent_axes=None,
parent_map=None,
transform=None,
coord_index=None,
coord_type="scalar",
coord_unit=None,
coord_wrap=None,
frame=None,
format_unit=None,
default_label=None,
):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self._format_unit = format_unit
self.frame = frame
self.default_label = default_label or ""
self._auto_axislabel = True
# Disable auto label for elliptical frames as it puts labels in
# annoying places.
if issubclass(self.parent_axes.frame_class, EllipticalFrame):
self._auto_axislabel = False
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
self.ticks.display_minor_ticks(rcParams["xtick.minor.visible"])
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self.grid_lines_kwargs = {
"visible": False,
"facecolor": "none",
"edgecolor": rcParams["grid.color"],
"linestyle": LINES_TO_PATCHES_LINESTYLE[rcParams["grid.linestyle"]],
"linewidth": rcParams["grid.linewidth"],
"alpha": rcParams["grid.alpha"],
"transform": self.parent_axes.transData,
}
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == "lines" and not self.transform.has_inverse:
raise ValueError(
"The specified transform has no inverse, so the "
"grid cannot be drawn using grid_type='lines'"
)
if grid_type is None:
grid_type = "lines" if self.transform.has_inverse else "contours"
if grid_type in ("lines", "contours"):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if "color" in kwargs:
kwargs["edgecolor"] = kwargs.pop("color")
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs["visible"]:
if not draw_grid:
self.grid_lines_kwargs["visible"] = False
else:
self.grid_lines_kwargs["visible"] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : `~astropy.units.Quantity`, optional
The value to wrap at for angular coordinates.
"""
self.coord_type = coord_type
if coord_wrap is not None and not isinstance(coord_wrap, u.Quantity):
warnings.warn(
"Passing 'coord_wrap' as a number is deprecated. Use a Quantity with units convertible to angular degrees instead.",
AstropyDeprecationWarning,
)
coord_wrap = coord_wrap * u.deg
if coord_type == "longitude" and coord_wrap is None:
self.coord_wrap = 360 * u.deg
elif coord_type != "longitude" and coord_wrap is not None:
raise NotImplementedError(
"coord_wrap is not yet supported for non-longitude coordinates"
)
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == "scalar":
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ["longitude", "latitude"]:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(
unit=self.coord_unit, format_unit=self._format_unit
)
else:
raise ValueError(
"coord_type should be one of 'scalar', 'longitude', or 'latitude'"
)
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or `~matplotlib.ticker.Formatter`
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter instance")
def format_coord(self, value, format="auto"):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == "longitude":
value = wrap_angle_at(value, self.coord_wrap.to_value(u.deg))
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def get_format_unit(self):
"""
Get the unit for the major tick labels.
"""
return self._formatter_locator.format_unit
def set_ticks(
self,
values=None,
spacing=None,
number=None,
size=None,
width=None,
color=None,
alpha=None,
direction=None,
exclude_overlapping=None,
):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError(
"At most one of values, spacing, or number should be specified"
)
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
if direction is not None:
if direction in ("in", "out"):
self.ticks.set_tick_out(direction == "out")
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn(
"exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning,
)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear.
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(
self, color=None, size=None, pad=None, exclude_overlapping=None, **kwargs
):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
**kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self.ticklabels.set_size(size)
if color is not None:
self.ticklabels.set_color(color)
if pad is not None:
self.ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear.
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
**kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop("fontdict", None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
if fontdict is not None:
self.axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label.
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_auto_axislabel(self, auto_label):
"""
Render default axis labels if no explicit label is provided.
Parameters
----------
auto_label : `bool`
`True` if default labels will be rendered.
"""
self._auto_axislabel = bool(auto_label)
def get_auto_axislabel(self):
"""
Render default axis labels if no explicit label is provided.
Returns
-------
auto_axislabel : `bool`
`True` if default labels will be rendered.
"""
return self._auto_axislabel
def _get_default_axislabel(self):
unit = self.get_format_unit() or self.coord_unit
if not unit or unit is u.one or self.coord_type in ("longitude", "latitude"):
return f"{self.default_label}"
else:
return f"{self.default_label} [{unit:latex}]"
def set_axislabel_position(self, position):
"""
Set where axis labels should appear.
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group("grid lines")
self._update_ticks()
if self.grid_lines_kwargs["visible"]:
if isinstance(self.frame, RectangularFrame1D):
self._update_grid_lines_1d()
else:
if self._grid_type == "lines":
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == "lines":
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group("grid lines")
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox):
"""
Draw all ticks and ticklabels.
"""
renderer.open_group("ticks")
self.ticks.draw(renderer)
self.ticklabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
tick_out_size=self.ticks.out_size,
)
renderer.close_group("ticks")
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, visible_ticks):
# Render the default axis label if no axis label is set.
if self._auto_axislabel and not self.get_axislabel():
self.set_axislabel(self._get_default_axislabel())
renderer.open_group("axis labels")
self.axislabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=self.ticks.ticks_locs,
visible_ticks=visible_ticks,
)
renderer.close_group("axis labels")
def _update_ticks(self):
if self.coord_index is None:
return
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(
*coord_range[self.coord_index]
)
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(
self._fl_spacing,
self.get_minor_frequency(),
*coord_range[self.coord_index],
)
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# https://matplotlib.org/stable/tutorials/advanced/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
if spine.data.size == 0:
continue
if not isinstance(self.frame, RectangularFrame1D):
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
if np.isnan(world0).all():
continue
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
with np.errstate(invalid="ignore"):
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == "lower" else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
with np.errstate(invalid="ignore"):
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = world1 - world0
dy = world2 - world0
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.0)
dy = wrap_angle_at(dy, 180.0)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack(
[spine.normal_angle, spine.normal_angle[-1]]
)
with np.errstate(invalid="ignore"):
reset = ((normal_angle_full - tick_angle) % 360 > 90.0) & (
(tick_angle - normal_angle_full) % 360 > 90.0
)
tick_angle[reset] -= 180.0
else:
rotation = 90 if axis == "b" else -90
tick_angle = np.zeros((conf.frame_boundary_samples,)) + rotation
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap.to_value(u.deg))
w2 = wrap_angle_at(w2, self.coord_wrap.to_value(u.deg))
with np.errstate(invalid="ignore"):
w1[w2 - w1 > 180.0] += 360
w2[w1 - w2 > 180.0] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(
minor_ticks_w_coordinates,
spine,
axis,
w1,
w2,
tick_angle,
ticks="minor",
)
# format tick labels, add to scene
text = self.formatter(
self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing
)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(
self, tick_world_coordinates, spine, axis, w1, w2, tick_angle, ticks="major"
):
if self.coord_type == "longitude":
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack(
[tick_world_coordinates_values, tick_world_coordinates_values + 360]
)
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(
self.coord_unit
)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid="ignore"):
intersections = np.hstack(
[
np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0],
]
)
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.0e-13, atol=1.0e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (
spine.data[imax, 0] - spine.data[imin, 0]
)
y_data_i = spine.data[imin, 1] + frac * (
spine.data[imax, 1] - spine.data[imin, 1]
)
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.0:
delta_angle -= 360.0
elif delta_angle < -180.0:
delta_angle += 360.0
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap.to_value(u.deg))
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == "major":
self.ticks.add(
axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac,
)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(
dict(
axis=axis,
data=(x_data_i, y_data_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac,
)
)
self.lbl_world.append(world)
else:
self.ticks.add_minor(
minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac,
)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines_1d(self):
if self.coord_index is None:
return
x_ticks_pos = [a[0] for a in self.ticks.pixel["b"]]
ymin, ymax = self.parent_axes.get_ylim()
self.grid_lines = []
for x_coord in x_ticks_pos:
pixel = [[x_coord, ymin], [x_coord, ymax]]
self.grid_lines.append(Path(pixel))
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
if self.coord_index is None:
return
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(
coord_range[1][0], coord_range[1][1], n_samples
)
else:
xy_world[subset, 0] = np.linspace(
coord_range[0][0], coord_range[0][1], n_samples
)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(
self._get_gridline(
xy_world[subset], pixel[subset], xy_world_round[subset]
)
)
def add_tickable_gridline(self, name, constant):
"""
Define a gridline that can be used for ticks and labels.
This gridline is not itself drawn, but instead can be specified in calls to
methods such as
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
for drawing ticks and labels. Since the gridline has a constant value in this
coordinate, and thus would not have any ticks or labels for the same coordinate,
the call to
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
would typically be made on the complementary coordinate.
Parameters
----------
name : str
The name for the gridline, usually a single character, but can be longer
constant : `~astropy.units.Quantity`
The constant coordinate value of the gridline
Notes
-----
A limitation is that the tickable part of the gridline must be contiguous. If
the gridline consists of more than one disconnected segment within the plot
extent, only one of those segments will be made tickable.
"""
if self.coord_index is None:
return
if name in self.frame:
raise ValueError(f"The frame already has a spine with the name '{name}'")
coord_range = self.parent_map.get_coord_range()
constant = constant.to_value(self.coord_unit)
from . import conf
n_samples = conf.grid_samples
# See comment in _update_grid_lines() about a WCS with more than 2 axes
xy_world = np.zeros((n_samples, 2))
xy_world[:, self.coord_index] = np.repeat(constant, n_samples)
# If the complementary coordinate is longitude, we attempt to close the gridline
# If such closure is a discontinuity, it will be filtered out later
if self.parent_map[1 - self.coord_index].coord_type == "longitude":
xy_world[:-1, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples - 1,
)
xy_world[-1, 1 - self.coord_index] = coord_range[1 - self.coord_index][0]
else:
xy_world[:, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples,
)
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
# Get the path of the gridline, which masks hidden parts
gridline = self._get_gridline(xy_world, pixel, xy_world_round)
def data_for_spine(spine):
vertices = gridline.vertices.copy()
codes = gridline.codes.copy()
# Retain the parts of the gridline within the rectangular plot bounds.
# We ought to use the potentially non-rectangular plot frame, but
# calculating that patch requires updating all spines first, which is a
# catch-22.
xmin, xmax = spine.parent_axes.get_xlim()
ymin, ymax = spine.parent_axes.get_ylim()
keep = (
(vertices[:, 0] >= xmin)
& (vertices[:, 0] <= xmax)
& (vertices[:, 1] >= ymin)
& (vertices[:, 1] <= ymax)
)
codes[~keep] = Path.MOVETO
codes[1:][~keep[:-1]] = Path.MOVETO
# We isolate the last segment (the last run of LINETOs), which must be preceded
# by at least one MOVETO and may be succeeded by MOVETOs.
# We have to account for longitude wrapping as well.
# Bail out if there is no visible segment
lineto = np.flatnonzero(codes == Path.LINETO)
if np.size(lineto) == 0:
return np.zeros((0, 2))
# Find the start of the last segment (the last MOVETO before the LINETOs)
last_segment = np.flatnonzero(codes[: lineto[-1]] == Path.MOVETO)[-1]
# Double the gridline if it is closed (i.e., spans all longitudes)
if vertices[0, 0] == vertices[-1, 0] and vertices[0, 1] == vertices[-1, 1]:
codes = np.concatenate([codes, codes[1:]])
vertices = np.vstack([vertices, vertices[1:, :]])
# Stop the last segment before any trailing MOVETOs
moveto = np.flatnonzero(codes[last_segment + 1 :] == Path.MOVETO)
if np.size(moveto) > 0:
return vertices[last_segment : last_segment + moveto[0] + 1, :]
else:
return vertices[last_segment:n_samples, :]
self.frame[name] = self.frame.spine_class(
self.frame.parent_axes, self.frame.transform, data_func=data_for_spine
)
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == "scalar":
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _clear_grid_contour(self):
if hasattr(self, "_grid") and self._grid:
for line in self._grid.collections:
line.remove()
def _update_grid_contour(self):
if self.coord_index is None:
return
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res), np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == "longitude":
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (
tick_world_coordinates_values[0] + tick_world_coordinates_values[1]
)
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(
tick_world_coordinates_values, mid
)
# Replace wraps by NaN
with np.errstate(invalid="ignore"):
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (
np.abs(np.diff(field[:-1, :], axis=1)) > 180
)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
with np.errstate(invalid="ignore"):
self._grid = self.parent_axes.contour(
x,
y,
field.transpose(),
levels=np.sort(tick_world_coordinates_values),
)
else:
self._grid = None
def tick_params(self, which="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if "colors" in kwargs:
if "color" not in kwargs:
kwargs["color"] = kwargs["colors"]
if "labelcolor" not in kwargs:
kwargs["labelcolor"] = kwargs["colors"]
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == "minor":
if len(set(kwargs) - {"length"}) > 0:
raise ValueError(
"When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)"
)
else:
if "length" in kwargs:
self.ticks.set_minor_ticksize(kwargs["length"])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(
size=kwargs.get("length"),
width=kwargs.get("width"),
color=kwargs.get("color"),
direction=kwargs.get("direction"),
)
# Set the tick position
position = None
for arg in ("bottom", "left", "top", "right"):
if arg in kwargs and position is None:
position = ""
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(
color=kwargs.get("labelcolor"),
size=kwargs.get("labelsize"),
pad=kwargs.get("pad"),
)
# Set the tick label position
position = None
for arg in ("bottom", "left", "top", "right"):
if "label" + arg in kwargs and position is None:
position = ""
if kwargs.get("label" + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if "grid_color" in kwargs:
self.grid_lines_kwargs["edgecolor"] = kwargs["grid_color"]
if "grid_alpha" in kwargs:
self.grid_lines_kwargs["alpha"] = kwargs["grid_alpha"]
if "grid_linewidth" in kwargs:
self.grid_lines_kwargs["linewidth"] = kwargs["grid_linewidth"]
if "grid_linestyle" in kwargs:
if kwargs["grid_linestyle"] in LINES_TO_PATCHES_LINESTYLE:
self.grid_lines_kwargs["linestyle"] = LINES_TO_PATCHES_LINESTYLE[
kwargs["grid_linestyle"]
]
else:
self.grid_lines_kwargs["linestyle"] = kwargs["grid_linestyle"]
|
9758add9b394fedd9f27f1bc03a0e0d5271866d23d2cfb0d0fedfa773011ad66 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from astropy import units as u
from astropy.coordinates import Angle
from astropy.units import UnitsError
DMS_RE = re.compile("^dd(:mm(:ss(.(s)+)?)?)?$")
HMS_RE = re.compile("^hh(:mm(:ss(.(s)+)?)?)?$")
DDEC_RE = re.compile("^d(.(d)+)?$")
DMIN_RE = re.compile("^m(.(m)+)?$")
DSEC_RE = re.compile("^s(.(s)+)?$")
SCAL_RE = re.compile("^x(.(x)+)?$")
# Units with custom representations - see the note where it is used inside
# AngleFormatterLocator.formatter for more details.
CUSTOM_UNITS = {
u.degree: u.def_unit(
"custom_degree",
represents=u.degree,
format={"generic": "\xb0", "latex": r"^\circ", "unicode": "°"},
),
u.arcmin: u.def_unit(
"custom_arcmin",
represents=u.arcmin,
format={"generic": "'", "latex": r"^\prime", "unicode": "′"},
),
u.arcsec: u.def_unit(
"custom_arcsec",
represents=u.arcsec,
format={"generic": '"', "latex": r"^{\prime\prime}", "unicode": "″"},
),
u.hourangle: u.def_unit(
"custom_hourangle",
represents=u.hourangle,
format={
"generic": "h",
"latex": r"^{\mathrm{h}}",
"unicode": r"$\mathregular{^h}$",
},
),
}
class BaseFormatterLocator:
"""
A joint formatter/locator.
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
format_unit=None,
):
if len([x for x in (values, number, spacing) if x is None]) < 2:
raise ValueError("At most one of values/number/spacing can be specified")
self._unit = unit
self._format_unit = format_unit or unit
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError(
"value should be in units compatible with "
"coordinate units ({}) but found {}".format(self._unit, values.unit)
)
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator.
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
decimal=None,
format_unit=None,
show_decimal_unit=True,
):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is False:
raise UnitsError(
"Units should be degrees or hours when using non-decimal"
" (sexagesimal) mode"
)
self._decimal = decimal
self._sep = None
self.show_decimal_unit = show_decimal_unit
super().__init__(
values=values,
number=number,
spacing=spacing,
format=format,
unit=unit,
format_unit=format_unit,
)
@property
def decimal(self):
decimal = self._decimal
if self.format_unit not in (u.degree, u.hourangle, u.hour):
if self._decimal is None:
decimal = True
elif self._decimal is False:
raise UnitsError(
"Units should be degrees or hours when using non-decimal"
" (sexagesimal) mode"
)
elif self._decimal is None:
decimal = False
return decimal
@decimal.setter
def decimal(self, value):
self._decimal = value
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (
not isinstance(spacing, u.Quantity) or spacing.unit.physical_type != "angle"
):
raise TypeError(
"spacing should be an astropy.units.Quantity "
"instance with units of angle"
)
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if "." in value:
self._precision = len(value) - value.index(".") - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(":") + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if "." in value:
self._precision = len(value) - value.index(".") - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(":") + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
else:
raise ValueError(f"Invalid format: {value}")
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.0e-10:
warnings.warn(
"Spacing is not a multiple of base spacing - resetting spacing to"
" match format"
)
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self.decimal:
spacing = self._format_unit / (10.0**self._precision)
else:
if self._fields == 1:
spacing = 1.0 * u.degree
elif self._fields == 2:
spacing = 1.0 * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1.0 * u.arcsec
else:
spacing = u.arcsec / (10.0**self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced). We return a
# non-zero spacing in case the caller needs to format a single
# coordinate, e.g. for mousover.
if value_min == value_max:
return [] * self._unit, 1 * u.arcsec
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self.decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(
dv.to_value(self._format_unit)
) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format="auto"):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self.decimal
unit = self._format_unit
if unit is u.hour:
unit = u.hourangle
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len(
f"{spacing:.10f}".replace("0", " ").strip().split(".", 1)[1]
)
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
is_latex = format == "latex" or (
format == "auto" and rcParams["text.usetex"]
)
if decimal:
if self.show_decimal_unit:
sep = "fromunit"
if is_latex:
fmt = "latex"
else:
if unit is u.hourangle:
fmt = "unicode"
else:
fmt = "generic"
unit = CUSTOM_UNITS.get(unit, unit)
else:
sep = "fromunit"
fmt = None
elif self.sep is not None:
sep = self.sep
fmt = None
else:
sep = "fromunit"
if unit == u.degree:
if is_latex:
fmt = "latex"
else:
sep = ("\xb0", "'", '"')
fmt = None
else:
if format == "ascii":
fmt = None
elif is_latex:
fmt = "latex"
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (
r"$\mathregular{^h}$",
r"$\mathregular{^m}$",
r"$\mathregular{^s}$",
)
fmt = None
angles = Angle(values)
string = angles.to_string(
unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt,
).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator.
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
format_unit=None,
):
if unit is not None:
unit = unit
format_unit = format_unit or unit
elif spacing is not None:
unit = spacing.unit
format_unit = format_unit or spacing.unit
elif values is not None:
unit = values.unit
format_unit = format_unit or values.unit
super().__init__(
values=values,
number=number,
spacing=spacing,
format=format,
unit=unit,
format_unit=format_unit,
)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn(
"Spacing is too small - resetting spacing to match format"
)
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.0e-10:
warnings.warn(
"Spacing is not a multiple of base spacing - resetting spacing"
" to match format"
)
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith("%"):
raise ValueError(f"Invalid format: {value}")
@property
def base_spacing(self):
return self._format_unit / (10.0**self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if (
self.format is not None
and (not self.format.startswith("%"))
and dv < self.base_spacing
):
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(
dv.to_value(self._format_unit)
) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format="auto"):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.0:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith("%"):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [
("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit))
for x in values
]
else:
return []
|
84fc2e27aea5403bafc3c0b717103d2b8aaa5b6274e28dd3d4835fe0d6a48fd1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
from functools import partial
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
import astropy.units as u
from astropy.coordinates import BaseCoordinateFrame, SkyCoord
from astropy.utils import minversion
from astropy.utils.compat.optional_deps import HAS_PIL
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from .coordinates_map import CoordinatesMap
from .frame import RectangularFrame, RectangularFrame1D
from .transforms import CoordinateTransform
from .utils import get_coord_meta, transform_contour_set_inplace
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ["WCSAxes", "WCSAxesSubplot"]
VISUAL_PROPERTIES = ["facecolor", "edgecolor", "linewidth", "alpha", "linestyle"]
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way.
"""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
*args
``*args`` can be a single ``(left, bottom, width, height)``
rectangle or a single `matplotlib.transforms.Bbox`. This specifies
the rectangle (in figure coordinates) where the Axes is positioned.
``*args`` can also consist of three numbers or a single three-digit
number; in the latter case, the digits are considered as
independent numbers. The numbers are interpreted as ``(nrows,
ncols, index)``: ``(nrows, ncols)`` specifies the size of an array
of subplots, and ``index`` is the 1-based index of the subplot
being created. Finally, ``*args`` can also directly be a
`matplotlib.gridspec.SubplotSpec` instance.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(
self,
fig,
*args,
wcs=None,
transform=None,
coord_meta=None,
transData=None,
slices=None,
frame_class=None,
**kwargs,
):
""" """
super().__init__(fig, *args, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif wcs is not None and (
wcs.pixel_n_dim == 1 or (slices is not None and "y" not in slices)
):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if transData is not None:
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(
wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta
)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect("key_press_event", self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(
coord.format_coord(world[coord.coord_index], format="ascii")
)
coord_string = " ".join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == "w":
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop("origin", "lower")
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = "lower"
elif origin == "upper":
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
if HAS_PIL:
from PIL.Image import Image
if minversion("PIL", "9.1"):
from PIL.Image import Transpose
FLIP_TOP_BOTTOM = Transpose.FLIP_TOP_BOTTOM
else:
from PIL.Image import FLIP_TOP_BOTTOM
if isinstance(X, Image) or hasattr(X, "getpixel"):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop("transform", None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop("transform", None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def _transform_plot_args(self, *args, **kwargs):
"""
Apply transformations to arguments to ``plot_coord`` and
``scatter_coord``.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == "longitude":
plot_data.append(frame0.spherical.lon.to_value(u.deg))
elif coord.coord_type == "latitude":
plot_data.append(frame0.spherical.lat.to_value(u.deg))
else:
raise NotImplementedError(
"Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude."
)
if "transform" in kwargs.keys():
raise TypeError(
"The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame."
)
transform = self.get_transform(native_frame)
kwargs.update({"transform": transform})
args = tuple(plot_data) + args[1:]
return args, kwargs
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot :
This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().plot(*args, **kwargs)
def scatter_coord(self, *args, **kwargs):
"""
Scatter `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.scatter_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.scatter`. All other arguments are the same as
`matplotlib.axes.Axes.scatter`. If not specified a ``transform``
keyword argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to scatter on the axes. This is converted to
the first two arguments to `matplotlib.axes.Axes.scatter`.
See Also
--------
matplotlib.axes.Axes.scatter : This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().scatter(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, "coords"):
previous_frame = {
"path": self.coords.frame._path,
"color": self.coords.frame.get_color(),
"linewidth": self.coords.frame.get_linewidth(),
}
else:
previous_frame = {"path": None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(
self.wcs, self.frame_class, slices=slices
)
self.coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame["path"],
)
self._transform_pixel2world = transform
if previous_frame["path"] is not None:
self.coords.frame.set_color(previous_frame["color"])
self.coords.frame.set_linewidth(previous_frame["linewidth"])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(
coord_meta.get("default_axislabel_position", ["b", "l"])
):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticklabel_position", ["b", "l"])
):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticks_position", ["bltr", "bltr"])
):
self.coords[ind].set_ticks_position(pos)
if rcParams["axes.grid"]:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
# Draw grids
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
# Draw tick labels
for coord in coords:
coord._draw_ticks(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
)
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
# Draw axis labels
for coord in coords:
coord._draw_axislabels(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
visible_ticks=visible_ticks,
)
self.coords.frame.draw(renderer)
def draw(self, renderer, **kwargs):
"""Draw the axes."""
# Before we do any drawing, we need to remove any existing grid lines
# drawn with contours, otherwise if we try and remove the contours
# part way through drawing, we end up with the issue mentioned in
# https://github.com/astropy/astropy/issues/12446
for coords in self._all_coords:
for coord in coords:
coord._clear_grid_contour()
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, **kwargs)
self._drawn = True
# Matplotlib internally sometimes calls set_xlabel(label=...).
def set_xlabel(self, xlabel=None, labelpad=1, loc=None, **kwargs):
"""Set x-label."""
if xlabel is None:
xlabel = kwargs.pop("label", None)
if xlabel is None:
raise TypeError(
"set_xlabel() missing 1 required positional argument: 'xlabel'"
)
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, loc=None, **kwargs):
"""Set y-label."""
if ylabel is None:
ylabel = kwargs.pop("label", None)
if ylabel is None:
raise TypeError(
"set_ylabel() missing 1 required positional argument: 'ylabel'"
)
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
"""Get coordinates overlay on given frame.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame`
Frame to get overlay for. If a string must correspond to
one of the coordinate frames registered in the astropy
frame transform graph.
coord_meta : dict
Metadata for the coordinates overlay.
Returns
-------
overlay : `~astropy.visualization.wcsaxes.CoordinatesMap`
Coordinates overlay.
"""
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position("t")
coords[1].set_axislabel_position("r")
coords[0].set_ticklabel_position("t")
coords[1].set_ticklabel_position("r")
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame.
"""
if isinstance(frame, (BaseLowLevelWCS, BaseHighLevelWCS)):
if isinstance(frame, BaseHighLevelWCS):
frame = frame.low_level_wcs
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (
self._transform_pixel2world
+ CoordinateTransform(
self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in,
)
+ transform_world2pixel
)
elif isinstance(frame, str) and frame == "pixel":
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if isinstance(frame, str) and frame == "world":
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(
self._transform_pixel2world.frame_out, frame
)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x, but we only support 3.x now.
if not self.get_visible():
return
# Do a draw to populate the self._bboxes list
self.draw_wcsaxes(renderer)
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
bb.append(super().get_tightbbox(renderer, *args, **kwargs))
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis="both", *, which="major", **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
axis : 'both', 'x', 'y'
Which axis to turn the gridlines on/off for.
which : str
Currently only ``'major'`` is supported.
"""
if not hasattr(self, "coords"):
return
if which != "major":
raise NotImplementedError(
"Plotting the grid for the minor ticks is not supported."
)
if axis == "both":
self.coords.grid(draw_grid=b, **kwargs)
elif axis == "x":
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == "y":
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError("axis should be one of x/y/both")
def tick_params(self, axis="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, "coords"):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == "both":
for pos in ("bottom", "left", "top", "right"):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if "label" + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ("x", "y") and self.frame_class is RectangularFrame:
spine = "b" if axis == "x" else "l"
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes.
"""
pass
|
049e6d8f54e8ab05a45754d93c00c1a86228896356079647cfaae3b8eb128d59 | # Functions/classes for WCSAxes related to APE14 WCSes
import numpy as np
from astropy import units as u
from astropy.coordinates import ICRS, BaseCoordinateFrame, SkyCoord
from astropy.wcs import WCS
from astropy.wcs.utils import local_partial_pixel_derivatives
from astropy.wcs.wcsapi import SlicedLowLevelWCS
from .frame import EllipticalFrame, RectangularFrame, RectangularFrame1D
from .transforms import CurvedTransform
__all__ = [
"transform_coord_meta_from_wcs",
"WCSWorld2PixelTransform",
"WCSPixel2WorldTransform",
]
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0.0, 0.0]
IDENTITY.wcs.crpix = [1.0, 1.0]
IDENTITY.wcs.cdelt = [1.0, 1.0]
def transform_coord_meta_from_wcs(wcs, frame_class, slices=None):
if slices is not None:
slices = tuple(slices)
if wcs.pixel_n_dim > 2:
if slices is None:
raise ValueError(
"WCS has more than 2 pixel dimensions, so 'slices' should be set"
)
elif len(slices) != wcs.pixel_n_dim:
raise ValueError(
"'slices' should have as many elements as WCS "
"has pixel dimensions (should be {})".format(wcs.pixel_n_dim)
)
is_fits_wcs = isinstance(wcs, WCS) or (
isinstance(wcs, SlicedLowLevelWCS) and isinstance(wcs._wcs, WCS)
)
coord_meta = {}
coord_meta["name"] = []
coord_meta["type"] = []
coord_meta["wrap"] = []
coord_meta["unit"] = []
coord_meta["visible"] = []
coord_meta["format_unit"] = []
for idx in range(wcs.world_n_dim):
axis_type = wcs.world_axis_physical_types[idx]
axis_unit = u.Unit(wcs.world_axis_units[idx])
coord_wrap = None
format_unit = axis_unit
coord_type = "scalar"
if axis_type is not None:
axis_type_split = axis_type.split(".")
if "pos.helioprojective.lon" in axis_type:
coord_wrap = 180.0 * u.deg
format_unit = u.arcsec
coord_type = "longitude"
elif "pos.helioprojective.lat" in axis_type:
format_unit = u.arcsec
coord_type = "latitude"
elif "pos.heliographic.stonyhurst.lon" in axis_type:
coord_wrap = 180.0 * u.deg
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.stonyhurst.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos.heliographic.carrington.lon" in axis_type:
coord_wrap = 360.0 * u.deg
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.carrington.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos" in axis_type_split:
if "lon" in axis_type_split:
coord_type = "longitude"
elif "lat" in axis_type_split:
coord_type = "latitude"
elif "ra" in axis_type_split:
coord_type = "longitude"
format_unit = u.hourangle
elif "dec" in axis_type_split:
coord_type = "latitude"
elif "alt" in axis_type_split:
coord_type = "longitude"
elif "az" in axis_type_split:
coord_type = "latitude"
elif "long" in axis_type_split:
coord_type = "longitude"
coord_meta["type"].append(coord_type)
coord_meta["wrap"].append(coord_wrap)
coord_meta["format_unit"].append(format_unit)
coord_meta["unit"].append(axis_unit)
# For FITS-WCS, for backward-compatibility, we need to make sure that we
# provide aliases based on CTYPE for the name.
if is_fits_wcs:
name = []
if isinstance(wcs, WCS):
name.append(wcs.wcs.ctype[idx].lower())
name.append(wcs.wcs.ctype[idx][:4].replace("-", "").lower())
elif isinstance(wcs, SlicedLowLevelWCS):
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]].lower())
name.append(
wcs._wcs.wcs.ctype[wcs._world_keep[idx]][:4]
.replace("-", "")
.lower()
)
if name[0] == name[1]:
name = name[0:1]
if axis_type:
if axis_type not in name:
name.insert(0, axis_type)
if wcs.world_axis_names and wcs.world_axis_names[idx]:
if wcs.world_axis_names[idx] not in name:
name.append(wcs.world_axis_names[idx])
name = tuple(name) if len(name) > 1 else name[0]
else:
name = axis_type or ""
if wcs.world_axis_names:
name = (
(name, wcs.world_axis_names[idx])
if wcs.world_axis_names[idx]
else name
)
coord_meta["name"].append(name)
coord_meta["default_axislabel_position"] = [""] * wcs.world_n_dim
coord_meta["default_ticklabel_position"] = [""] * wcs.world_n_dim
coord_meta["default_ticks_position"] = [""] * wcs.world_n_dim
# If the world axis has a name use it, else display the world axis physical type.
fallback_labels = [
name[0] if isinstance(name, (list, tuple)) else name
for name in coord_meta["name"]
]
coord_meta["default_axis_label"] = [
wcs.world_axis_names[i] or fallback_label
for i, fallback_label in enumerate(fallback_labels)
]
transform_wcs, invert_xy, world_map = apply_slices(wcs, slices)
transform = WCSPixel2WorldTransform(transform_wcs, invert_xy=invert_xy)
for i in range(len(coord_meta["type"])):
coord_meta["visible"].append(i in world_map)
inv_all_corr = [False] * wcs.world_n_dim
m = transform_wcs.axis_correlation_matrix.copy()
if invert_xy:
inv_all_corr = np.all(m, axis=1)
m = m[:, ::-1]
if frame_class is RectangularFrame:
for i, spine_name in enumerate("bltr"):
pos = np.nonzero(m[:, i % 2])[0]
# If all the axes we have are correlated with each other and we
# have inverted the axes, then we need to reverse the index so we
# put the 'y' on the left.
if inv_all_corr[i % 2]:
pos = pos[::-1]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta["default_axislabel_position"][index] = spine_name
coord_meta["default_ticklabel_position"][index] = spine_name
coord_meta["default_ticks_position"][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 2:
for index in world_map:
coord_meta["default_ticks_position"][index] = "bltr"
elif frame_class is RectangularFrame1D:
derivs = np.abs(
local_partial_pixel_derivatives(
transform_wcs,
*[0] * transform_wcs.pixel_n_dim,
normalize_by_world=False,
)
)[:, 0]
for i, spine_name in enumerate("bt"):
# Here we are iterating over the correlated axes in world axis order.
# We want to sort the correlated axes by their partial derivatives,
# so we put the most rapidly changing world axis on the bottom.
pos = np.nonzero(m[:, 0])[0]
order = np.argsort(derivs[pos])[::-1] # Sort largest to smallest
pos = pos[order]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta["default_axislabel_position"][index] = spine_name
coord_meta["default_ticklabel_position"][index] = spine_name
coord_meta["default_ticks_position"][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 1:
for index in world_map:
coord_meta["default_ticks_position"][index] = "bt"
elif frame_class is EllipticalFrame:
if "longitude" in coord_meta["type"]:
lon_idx = coord_meta["type"].index("longitude")
coord_meta["default_axislabel_position"][lon_idx] = "h"
coord_meta["default_ticklabel_position"][lon_idx] = "h"
coord_meta["default_ticks_position"][lon_idx] = "h"
if "latitude" in coord_meta["type"]:
lat_idx = coord_meta["type"].index("latitude")
coord_meta["default_axislabel_position"][lat_idx] = "c"
coord_meta["default_ticklabel_position"][lat_idx] = "c"
coord_meta["default_ticks_position"][lat_idx] = "c"
else:
for index in range(len(coord_meta["type"])):
if index in world_map:
coord_meta["default_axislabel_position"][
index
] = frame_class.spine_names
coord_meta["default_ticklabel_position"][
index
] = frame_class.spine_names
coord_meta["default_ticks_position"][index] = frame_class.spine_names
return transform, coord_meta
def apply_slices(wcs, slices):
"""
Take the input WCS and slices and return a sliced WCS for the transform and
a mapping of world axes in the sliced WCS to the input WCS.
"""
if isinstance(wcs, SlicedLowLevelWCS):
world_keep = list(wcs._world_keep)
else:
world_keep = list(range(wcs.world_n_dim))
# world_map is the index of the world axis in the input WCS for a given
# axis in the transform_wcs
world_map = list(range(wcs.world_n_dim))
transform_wcs = wcs
invert_xy = False
if slices is not None:
wcs_slice = list(slices)
wcs_slice[wcs_slice.index("x")] = slice(None)
if "y" in slices:
wcs_slice[wcs_slice.index("y")] = slice(None)
invert_xy = slices.index("x") > slices.index("y")
transform_wcs = SlicedLowLevelWCS(wcs, wcs_slice[::-1])
world_map = tuple(world_keep.index(i) for i in transform_wcs._world_keep)
return transform_wcs, invert_xy, world_map
def wcsapi_to_celestial_frame(wcs):
for cls, _, kwargs, *_ in wcs.world_axis_object_classes.values():
if issubclass(cls, SkyCoord):
return kwargs.get("frame", ICRS())
elif issubclass(cls, BaseCoordinateFrame):
return cls(**kwargs)
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates.
"""
has_inverse = True
frame_in = None
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError("Only pixel_n_dim =< 2 is supported")
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_in = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.wcs is other.wcs
and self.invert_xy == other.invert_xy
)
@property
def input_dims(self):
return self.wcs.world_n_dim
def transform(self, world):
# Convert to a list of arrays
world = list(world.T)
if len(world) != self.wcs.world_n_dim:
raise ValueError(
f"Expected {self.wcs.world_n_dim} world coordinates, got {len(world)} "
)
if len(world[0]) == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.world_to_pixel_values(*world)
if self.invert_xy:
pixel = pixel[::-1]
pixel = np.array(pixel).T
return pixel
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform.
"""
return WCSPixel2WorldTransform(self.wcs, invert_xy=self.invert_xy)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates.
"""
has_inverse = True
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError("Only pixel_n_dim =< 2 is supported")
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_out = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.wcs is other.wcs
and self.invert_xy == other.invert_xy
)
@property
def output_dims(self):
return self.wcs.world_n_dim
def transform(self, pixel):
# Convert to a list of arrays
pixel = list(pixel.T)
if len(pixel) != self.wcs.pixel_n_dim:
raise ValueError(
f"Expected {self.wcs.pixel_n_dim} world coordinates, got {len(pixel)} "
)
if self.invert_xy:
pixel = pixel[::-1]
if len(pixel[0]) == 0:
world = np.zeros((0, self.wcs.world_n_dim))
else:
world = self.wcs.pixel_to_world_values(*pixel)
if self.wcs.world_n_dim == 1:
world = [world]
world = np.array(world).T
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform.
"""
return WCSWorld2PixelTransform(self.wcs, invert_xy=self.invert_xy)
|
cfc31042b07f463c8316f905a9014c699ac846d9c4c26a6eb9ef0eaff6f316d8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from matplotlib.patches import Polygon
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.coordinates.representation import (
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["Quadrangle", "SphericalCircle"]
# Monkey-patch the docs to fix CapStyle and JoinStyle subs.
# TODO! delete when upstream fix matplotlib/matplotlib#19839
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``"
)
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``"
)
Polygon.set_capstyle.__doc__ = Polygon.set_capstyle.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``"
)
Polygon.set_joinstyle.__doc__ = Polygon.set_joinstyle.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``"
)
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
transform_matrix = rotation_matrix(-lon0, axis="z") @ rotation_matrix(
-(0.5 * np.pi * u.radian - lat0), axis="y"
)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90.
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity` ['angle']
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements
or a `~astropy.coordinates.SkyCoord` object.
radius : `~astropy.units.Quantity` ['angle']
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a SkyCoord object, or
# from a tuple of two quantities or a single 2-element Quantity.
# The SkyCoord is converted to SphericalRepresentation, if not already.
if isinstance(center, SkyCoord):
rep_type = center.representation_type
if not issubclass(
rep_type, (SphericalRepresentation, UnitSphericalRepresentation)
):
warnings.warn(
f"Received `center` of representation type {rep_type} "
"will be converted to SphericalRepresentation ",
AstropyUserWarning,
)
longitude, latitude = center.spherical.lon, center.spherical.lat
else:
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0.0, 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
class Quadrangle(Polygon):
"""
Create a patch representing a latitude-longitude quadrangle.
The edges of the quadrangle lie on two lines of constant longitude and two
lines of constant latitude (or the equivalent component names in the
coordinate frame of interest, such as right ascension and declination).
Note that lines of constant latitude are not great circles.
Unlike `matplotlib.patches.Rectangle`, the edges of this patch will render
as curved lines if appropriate for the WCS transformation.
Parameters
----------
anchor : tuple or `~astropy.units.Quantity` ['angle']
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
width : `~astropy.units.Quantity` ['angle']
The width of the quadrangle in longitude (or, e.g., right ascension)
height : `~astropy.units.Quantity` ['angle']
The height of the quadrangle in latitude (or, e.g., declination)
resolution : int, optional
The number of points that make up each side of the quadrangle -
increase this to get a smoother quadrangle.
vertex_unit : `~astropy.units.Unit` ['angle']
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(
self, anchor, width, height, resolution=100, vertex_unit=u.degree, **kwargs
):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = u.Quantity(anchor).to_value(vertex_unit)
# Convert the quadrangle dimensions to the appropriate units
width = width.to_value(vertex_unit)
height = height.to_value(vertex_unit)
# Create progressions in longitude and latitude
lon_seq = longitude + np.linspace(0, width, resolution + 1)
lat_seq = latitude + np.linspace(0, height, resolution + 1)
# Trace the path of the quadrangle
lon = np.concatenate(
[
lon_seq[:-1],
np.repeat(lon_seq[-1], resolution),
np.flip(lon_seq[1:]),
np.repeat(lon_seq[0], resolution),
]
)
lat = np.concatenate(
[
np.repeat(lat_seq[0], resolution),
lat_seq[:-1],
np.repeat(lat_seq[-1], resolution),
np.flip(lat_seq[1:]),
]
)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.