hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
a763b9e765b942027e6f2a84bb714ccfe85e8524f90a1ab4a4c3417f5b1bdaa0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
from importlib.metadata import entry_points
import numpy as np
from astropy.units import Quantity
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyUserWarning
from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex
from .spline import (
SplineExactKnotsFitter,
SplineInterpolateFitter,
SplineSmoothingFitter,
SplineSplrepFitter,
)
from .statistic import leastsquare
from .utils import _combine_equivalency_dict, poly_map_domain
__all__ = [
"LinearLSQFitter",
"LevMarLSQFitter",
"TRFLSQFitter",
"DogBoxLSQFitter",
"LMLSQFitter",
"FittingWithOutlierRemoval",
"SLSQPLSQFitter",
"SimplexLSQFitter",
"JointFitter",
"Fitter",
"ModelLinearityError",
"ModelsError",
"SplineExactKnotsFitter",
"SplineInterpolateFitter",
"SplineSmoothingFitter",
"SplineSplrepFitter",
]
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class NonFiniteValueError(RuntimeError):
"""
Error raised when attempting to a non-finite value.
"""
class Covariance:
"""Class for covariance matrix calculated by fitter."""
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max(len(x) for x in self.param_names)
ret_str = "parameter variances / covariances \n"
fstring = f'{"": <{longest_name}}| {{0}}\n'
for i, row in enumerate(self.cov_matrix):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += fstring.replace(" " * len(param), param, 1).format(
repr(np.round(row[: i + 1], round_val))[7:-2]
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError("Covariance must be indexed by two values.")
if all(isinstance(item, str) for item in params):
i1, i2 = self.param_names.index(params[0]), self.param_names.index(
params[1]
)
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError(
"Covariance can be indexed by two parameter names or integer indices."
)
return self.cov_matrix[i1][i2]
class StandardDeviations:
"""Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max(len(x) for x in self.param_names)
ret_str = "standard deviations\n"
for i, std in enumerate(self.stds):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += (
f"{param}{' ' * (longest_name - len(param))}| "
f"{np.round(std, round_val)}\n"
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError(
"Standard deviation can be indexed by parameter name or integer."
)
return self.stds[i]
class ModelsError(Exception):
"""Base class for model exceptions."""
class ModelLinearityError(ModelsError):
"""Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith("_"):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop("equivalencies", None)
data_has_units = (
isinstance(x, Quantity)
or isinstance(y, Quantity)
or isinstance(z, Quantity)
)
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies
)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(
model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]],
)
if isinstance(y, Quantity) and z is not None:
y = y.to(
model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]],
)
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data["z"] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data["_left_kwargs"] = model[1]
rename_data["_right_kwargs"] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError(
"This model does not support being fit to data with units."
)
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if isinstance(optimizer, type):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if isinstance(statistic, type):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@staticmethod
def _add_fitting_uncertainties(*args):
"""
When available, calculate and sets the parameter covariance matrix
(model.cov_matrix) and standard deviations (model.stds).
"""
return None
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ["fixed"]
supports_masked_input = True
def __init__(self, calc_uncertainties=False):
self.fit_info = {
"residuals": None,
"rank": None,
"singular_values": None,
"params": None,
}
self._calc_uncertainties = calc_uncertainties
@staticmethod
def _is_invertible(m):
"""Check if inverse of matrix can be obtained."""
if m.shape[0] != m.shape[1]:
return False
if np.linalg.matrix_rank(m) < m.shape[0]:
return False
return True
def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None):
"""
Calculate and parameter covariance matrix and standard deviations
and set `cov_matrix` and `stds` attributes.
"""
x_dot_x_prime = np.dot(a.T, a)
masked = False or hasattr(y, "mask")
# check if invertible. if not, can't calc covariance.
if not self._is_invertible(x_dot_x_prime):
return model
inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime)
if z is None: # 1D models
if len(model) == 1: # single model
mask = None
if masked:
mask = y.mask
xx = np.ma.array(x, mask=mask)
RSS = [(1 / (xx.count() - n_coeff)) * resids]
if len(model) > 1: # model sets
RSS = [] # collect sum residuals squared for each model in set
for j in range(len(model)):
mask = None
if masked:
mask = y.mask[..., j].flatten()
xx = np.ma.array(x, mask=mask)
eval_y = model(xx, model_set_axis=False)
eval_y = np.rollaxis(eval_y, model.model_set_axis)[j]
RSS.append(
(1 / (xx.count() - n_coeff)) * np.sum((y[..., j] - eval_y) ** 2)
)
else: # 2D model
if len(model) == 1:
mask = None
if masked:
warnings.warn(
"Calculation of fitting uncertainties "
"for 2D models with masked values not "
"currently supported.\n",
AstropyUserWarning,
)
return
xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask)
# len(xx) instead of xx.count. this will break if values are masked?
RSS = [(1 / (len(xx) - n_coeff)) * resids]
else:
RSS = []
for j in range(len(model)):
eval_z = model(x, y, model_set_axis=False)
mask = None # need to figure out how to deal w/ masking here.
if model.model_set_axis == 1:
# model_set_axis passed when evaluating only refers to input shapes
# so output must be reshaped for model_set_axis=1.
eval_z = np.rollaxis(eval_z, 1)
eval_z = eval_z[j]
RSS.append(
[(1 / (len(x) - n_coeff)) * np.sum((z[j] - eval_z) ** 2)]
)
covs = [inv_x_dot_x_prime * r for r in RSS]
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
if len(covs) == 1:
model.cov_matrix = Covariance(covs[0], model.param_names)
model.stds = StandardDeviations(covs[0], free_param_names)
else:
model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs]
model.stds = [StandardDeviations(cov, free_param_names) for cov in covs]
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, "domain") and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, "window") and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, "x_domain") and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, "y_domain") and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, "x_window") and model.x_window is None:
model.x_window = [-1.0, 1.0]
if hasattr(model, "y_window") and model.y_window is None:
model.y_window = [-1.0, 1.0]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like, optional
Input coordinates.
If the dependent (``y`` or ``z``) coordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
coordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError(
"Model is not linear in parameters, "
"linear fit methods should not be used."
)
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
model_copy.sync_constraints = False
_, fitparam_indices, _ = model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(
x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis
)
n_fixed = sum(model_copy.fixed.values())
# This is also done by _convert_inputs, but we need it here to allow
# checking the array dimensionality before that gets called:
if weights is not None:
weights = np.asarray(weights, dtype=float)
if n_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [
idx
for idx in range(len(model_copy.param_names))
if idx not in fitparam_indices
]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray(
[
getattr(model_copy, model_copy.param_names[idx]).value
for idx in fixparam_indices
]
)
if len(farg) == 2:
x, y = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, weights = _convert_input(
x,
weights,
n_models=len(model_copy) if weights.ndim == y.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "domain"):
x = self._map_domain_window(model_copy, x)
if n_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x
)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, _, weights = _convert_input(
x,
y,
weights,
n_models=len(model_copy) if weights.ndim == z.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "x_domain"):
x, y = self._map_domain_window(model_copy, x, y)
if n_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x, y=y
)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
if weights is not None:
# Same for weights
if weights.ndim > 2:
# Separate 2D weights for each model:
weights = np.rollaxis(weights, model_axis, weights.ndim)
weights = weights.reshape(-1, weights.shape[-1])
elif weights.ndim == z.ndim:
# Separate, flattened weights for each model:
weights = weights.T if model_axis == 0 else weights
else:
# Common weights for all the models:
weights = weights.flatten()
else:
rhs = z.flatten()
if weights is not None:
weights = weights.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError(
f"{type(model_copy).__name__} gives unsupported >2D "
"derivative matrix for this x/y"
)
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if n_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input coordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
if rhs.ndim == 2:
if weights.shape == rhs.shape:
# separate weights for multiple models case: broadcast
# lhs to have more dimension (for each model)
lhs = lhs[..., np.newaxis] * weights[:, np.newaxis]
rhs = rhs * weights
else:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original
# dependent variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if weights is not None and not masked and np.any(np.isnan(lhs)):
raise ValueError(
"Found NaNs in the coefficient matrix, which "
"should not happen and would crash the lapack "
"routine. Maybe check that weights are not null."
)
a = None # need for calculating covarience
if (masked and len(model_copy) > 1) or (
weights is not None and weights.ndim > 1
):
# Separate masks or weights for multiple models case: Numpy's
# lstsq supports multiple dimensions only for rhs, so we need to
# loop manually on the models. This may be fixed in the future
# with https://github.com/numpy/numpy/pull/15777.
# Initialize empty array of coefficients and populate it one model
# at a time. The shape matches the number of coefficients from the
# Vandermonde matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype)
# Arrange the lhs as a stack of 2D matrices that we can iterate
# over to get the correctly-orientated lhs for each model:
if lhs.ndim > 2:
lhs_stack = np.rollaxis(lhs, -1, 0)
else:
lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask if masked else slice(None)
model_lhs = model_lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
a = model_lhs
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(
model_lhs, model_rhs, rcond
)
model_lacoef[:] = t_coef.T
else:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
a = lhs[good]
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond)
self.fit_info["residuals"] = resids
self.fit_info["rank"] = rank
self.fit_info["singular_values"] = sval
lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl
self.fit_info["params"] = lacoef
fitter_to_model_params(model_copy, lacoef.flatten())
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if (
hasattr(model_copy, "_order")
and len(model_copy) == 1
and rank < (model_copy._order - n_fixed)
):
warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning)
# calculate and set covariance matrix and standard devs. on model
if self._calc_uncertainties:
if len(y) > len(lacoef):
self._add_fitting_uncertainties(
model_copy, a * scl, len(lacoef), x, y, z, resids
)
model_copy.sync_constraints = True
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {"niter": None}
def __str__(self):
return (
f"Fitter: {self.fitter.__class__.__name__}\n"
f"Outlier function: {self.outlier_func.__name__}\n"
f"Num. of iterations: {self.niter}\n"
f"Outlier func. args.: {self.outlier_kwargs}"
)
def __repr__(self):
return (
f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, "
f"outlier_func: {self.outlier_func.__name__},"
f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})"
)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y coordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if (
not hasattr(self.fitter, "supports_masked_input")
or self.fitter.supports_masked_input is not True
):
raise ValueError(
f"{type(self.fitter).__name__} cannot fit model sets with masked "
"values"
)
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input coordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x,)
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if "axis" not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs["axis"] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop("axis", None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True,
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(
data_T, mask_T, model_vals_T
):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn(
"outlier_func did not accept axis argument; "
"reverted to slow loop over models.",
AstropyUserWarning,
)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(
fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights,
**kwargs,
)
else:
fitted_model = self.fitter(
fitted_model,
*coords,
filtered_data,
weights=filtered_weights,
**kwargs,
)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {"niter": n}
self.fit_info.update(getattr(self.fitter, "fit_info", {}))
return fitted_model, filtered_data.mask
class _NonLinearLSQFitter(metaclass=_FitterMeta):
"""
Base class for Non-Linear least-squares fitters.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds : bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition.
Default: True
"""
supported_constraints = ["fixed", "tied", "bounds"]
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=True):
self.fit_info = None
self._calc_uncertainties = calc_uncertainties
self._use_min_max_bounds = use_min_max_bounds
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
fitter_to_model_params(model, fps, self._use_min_max_bounds)
meas = args[-1]
if weights is None:
value = np.ravel(model(*args[2:-1]) - meas)
else:
value = np.ravel(weights * (model(*args[2:-1]) - meas))
if not np.all(np.isfinite(value)):
raise NonFiniteValueError(
"Objective function has encountered a non-finite value, "
"this will cause the fit to fail!\n"
"Please remove non-finite values from your input data before "
"fitting to avoid this error."
)
return value
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array(
[np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)]
)
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars], True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
fit_deriv = np.array(model.fit_deriv(x, *params))
try:
output = np.array(
[np.ravel(_) for _ in np.array(weights) * fit_deriv]
)
if output.shape != fit_deriv.shape:
output = np.array(
[np.ravel(_) for _ in np.atleast_2d(weights).T * fit_deriv]
)
return output
except ValueError:
return np.array(
[
np.ravel(_)
for _ in np.array(weights) * np.moveaxis(fit_deriv, -1, 0)
]
).transpose()
else:
if not model.col_fit_deriv:
return [
np.ravel(_)
for _ in (
np.ravel(weights)
* np.array(model.fit_deriv(x, y, *params)).T
).T
]
return [
np.ravel(_)
for _ in weights * np.array(model.fit_deriv(x, y, *params))
]
def _compute_param_cov(
self, model, y, init_values, cov_x, fitparams, farg, weights=None
):
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
self.fit_info["param_cov"] = cov_x
if weights is None:
# if there are no measurement uncertainties given in `weights`,
# fall back on the default behavior in scipy.optimize.curve_fit
# when `absolute_sigma == False`. If there are uncertainties,
# assume they are "absolute" and not "relative".
# For details, see curve_fit:
# https://github.com/scipy/scipy/blob/
# c1ed5ece8ffbf05356a22a8106affcd11bd3aee0/scipy/
# optimize/_minpack_py.py#L591-L602
sum_sqrs = np.sum(self.objective_function(fitparams, *farg) ** 2)
dof = len(y) - len(init_values)
self.fit_info["param_cov"] *= sum_sqrs / dof
else:
self.fit_info["param_cov"] = None
if self._calc_uncertainties is True:
if self.fit_info["param_cov"] is not None:
self._add_fitting_uncertainties(model, self.fit_info["param_cov"])
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
return None, None, None
def _filter_non_finite(self, x, y, z=None, weights=None):
"""
Filter out non-finite values in x, y, z.
Returns
-------
x, y, z : ndarrays
x, y, and z with non-finite values filtered out.
"""
MESSAGE = "Non-Finite input data has been removed by the fitter."
if z is None:
mask = np.isfinite(y)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
z_out = None
else:
mask = np.isfinite(z)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
z_out = z[mask]
return x[mask], y[mask], z_out, None if weights is None else weights[mask]
@fitter_unit_support
def __call__(
self,
model,
x,
y,
z=None,
weights=None,
maxiter=DEFAULT_MAXITER,
acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS,
estimate_jacobian=False,
filter_non_finite=False,
):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting. For data with Gaussian uncertainties, the weights
should be 1/sigma.
.. versionchanged:: 5.3
Calculate parameter covariances while accounting for ``weights``
as "absolute" inverse uncertainties. To recover the old behavior,
choose ``weights=None``.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
filter_non_finite : bool, optional
Whether or not to filter data with non-finite values. Default is False
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self.supported_constraints)
model_copy.sync_constraints = False
if filter_non_finite:
x, y, z, weights = self._filter_non_finite(x, y, z, weights)
farg = (
model_copy,
weights,
) + _convert_input(x, y, z)
init_values, fitparams, cov_x = self._run_fitter(
model_copy, farg, maxiter, acc, epsilon, estimate_jacobian
)
self._compute_param_cov(
model_copy, y, init_values, cov_x, fitparams, farg, weights
)
model.sync_constraints = True
return model_copy
class LevMarLSQFitter(_NonLinearLSQFitter):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
def __init__(self, calc_uncertainties=False):
super().__init__(calc_uncertainties)
self.fit_info = {
"nfev": None,
"fvec": None,
"fjac": None,
"ipvt": None,
"qtf": None,
"message": None,
"ierr": None,
"param_jac": None,
"param_cov": None,
}
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
if model.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _, _ = model_to_fit_params(model)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function,
init_values,
args=farg,
Dfun=dfunc,
col_deriv=model.col_fit_deriv,
maxfev=maxiter,
epsfcn=epsilon,
xtol=acc,
full_output=True,
)
fitter_to_model_params(model, fitparams)
self.fit_info.update(dinfo)
self.fit_info["cov_x"] = cov_x
self.fit_info["message"] = mess
self.fit_info["ierr"] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn(
"The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning,
)
return init_values, fitparams, cov_x
class _NLLSQFitter(_NonLinearLSQFitter):
"""
Wrapper class for `scipy.optimize.least_squares` method, which provides:
- Trust Region Reflective
- dogbox
- Levenberg-Marqueardt
algorithms using the least squares statistic.
Parameters
----------
method : str
‘trf’ : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
‘dogbox’ : dogleg algorithm with rectangular trust regions, typical
use case is small problems with bounds. Not recommended for
problems with rank-deficient Jacobian.
‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn’t handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__(calc_uncertainties, use_min_max_bounds)
self._method = method
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
from scipy.linalg import svd
if model.fit_deriv is None or estimate_jacobian:
dfunc = "2-point"
else:
def _dfunc(params, model, weights, x, y, z=None):
if model.col_fit_deriv:
return np.transpose(
self._wrap_deriv(params, model, weights, x, y, z)
)
else:
return self._wrap_deriv(params, model, weights, x, y, z)
dfunc = _dfunc
init_values, _, bounds = model_to_fit_params(model)
# Note, if use_min_max_bounds is True we are defaulting to enforcing bounds
# using the old method employed by LevMarLSQFitter, this is different
# from the method that optimize.least_squares employs to enforce bounds
# thus we override the bounds being passed to optimize.least_squares so
# that it will not enforce any bounding.
if self._use_min_max_bounds:
bounds = (-np.inf, np.inf)
self.fit_info = optimize.least_squares(
self.objective_function,
init_values,
args=farg,
jac=dfunc,
max_nfev=maxiter,
diff_step=np.sqrt(epsilon),
xtol=acc,
method=self._method,
bounds=bounds,
)
# Adapted from ~scipy.optimize.minpack, see:
# https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(self.fit_info.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
cov_x = np.dot(VT.T / s**2, VT)
fitter_to_model_params(model, self.fit_info.x, False)
if not self.fit_info.success:
warnings.warn(
f"The fit may be unsuccessful; check: \n {self.fit_info.message}",
AstropyUserWarning,
)
return init_values, self.fit_info.x, cov_x
class TRFLSQFitter(_NLLSQFitter):
"""
Trust Region Reflective algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("trf", calc_uncertainties, use_min_max_bounds)
class DogBoxLSQFitter(_NLLSQFitter):
"""
DogBox algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("dogbox", calc_uncertainties, use_min_max_bounds)
class LMLSQFitter(_NLLSQFitter):
"""
`scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False):
super().__init__("lm", calc_uncertainties, True)
class SLSQPLSQFitter(Fitter):
"""
Sequential Least Squares Programming (SLSQP) optimization algorithm and
least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
Notes
-----
See also the `~astropy.modeling.optimizers.SLSQP` optimizer.
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self.model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]["slice"]
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[: model.n_inputs + 1]
del lstsqargs[: model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError(f"Expected >1 models, {len(self.models)} is given")
if len(self.jointparams.keys()) < 2:
raise TypeError(
"At least two parameters are expected, "
f"{len(self.jointparams.keys())} is given"
)
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError(
f"{len(self.jointparams[j])} parameter(s) "
f"provided but {len(self.initvals)} expected"
)
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError(
f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} "
f"coordinates in args but {len(args)} provided"
)
self.fitparams[:], _ = optimize.leastsq(
self.objective_function, self.fitparams, args=args
)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1 :]
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def fitter_to_model_params(model, fps, use_min_max_bounds=True):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True
"""
_, fit_param_indices, _ = model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]["slice"]
shape = param_metrics[name]["shape"]
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset : offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None) and use_min_max_bounds:
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]["slice"]
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters()
@deprecated(
since="5.1",
message="private method: _fitter_to_model_params has been made public now",
)
def _fitter_to_model_params(model, fps):
return fitter_to_model_params(model, fps)
def model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
model_params = model.parameters
model_bounds = list(model.bounds.values())
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model_params)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]["slice"]
del params[slice_]
del model_bounds[slice_]
del fitparam_indices[idx]
model_params = np.array(params)
for idx, bound in enumerate(model_bounds):
if bound[0] is None:
lower = -np.inf
else:
lower = bound[0]
if bound[1] is None:
upper = np.inf
else:
upper = bound[1]
model_bounds[idx] = (lower, upper)
model_bounds = tuple(zip(*model_bounds))
return model_params, fitparam_indices, model_bounds
@deprecated(
since="5.1",
message="private method: _model_to_fit_params has been made public now",
)
def _model_to_fit_params(model):
return model_to_fit_params(model)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = "Optimizer cannot handle {0} constraints."
if any(model.fixed.values()) and "fixed" not in supported_constraints:
raise UnsupportedConstraintError(message.format("fixed parameter"))
if any(model.tied.values()) and "tied" not in supported_constraints:
raise UnsupportedConstraintError(message.format("tied parameter"))
if (
any(tuple(b) != (None, None) for b in model.bounds.values())
and "bounds" not in supported_constraints
):
raise UnsupportedConstraintError(message.format("bound parameter"))
if model.eqcons and "eqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("equality"))
if model.ineqcons and "ineqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("inequality"))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn(
"Model is linear in parameters; consider using linear fitting methods.",
AstropyUserWarning,
)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(
AstropyUserWarning(
f"{type(e).__name__} error occurred in entry point {name}."
)
)
else:
if not isinstance(entry_point, type):
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to be a Class."
)
)
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to extend "
"astropy.modeling.Fitter"
)
)
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, "select"):
populate_entry_points(ep.select(group="astropy.modeling"))
else:
populate_entry_points(ep.get("astropy.modeling", []))
_populate_ep()
|
7720767677710091c7ef4cb6d3e478decb1afc6db12a7bd60be9948bf7aa8b9d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
This module defines classes that deal with parameters.
It is unlikely users will need to work with these classes directly,
unless they define their own models.
"""
import functools
import numbers
import operator
import numpy as np
from astropy.units import MagUnit, Quantity
from astropy.utils import isiterable
from .utils import array_repr_oneline, get_inputs_and_params
__all__ = ["Parameter", "InputParameterError", "ParameterError"]
class ParameterError(Exception):
"""Generic exception class for all exceptions pertaining to Parameters."""
class InputParameterError(ValueError, ParameterError):
"""Used for incorrect input parameter values and definitions."""
class ParameterDefinitionError(ParameterError):
"""Exception in declaration of class-level Parameters."""
def _tofloat(value):
"""Convert a parameter to float or float array."""
if isiterable(value):
try:
value = np.asanyarray(value, dtype=float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
f"Parameter of {type(value)} could not be converted to float"
)
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)) and not isinstance(value, bool):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean"
)
else:
raise InputParameterError(
f"Don't know how to convert parameter of {type(value)} to float"
)
return value
# Helpers for implementing operator overloading on Parameter
def _binary_arithmetic_operation(op, reflected=False):
@functools.wraps(op)
def wrapper(self, val):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
if reflected:
return op(val, self_value)
else:
return op(self_value, val)
return wrapper
def _binary_comparison_operation(op):
@functools.wraps(op)
def wrapper(self, val):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value, val)
return wrapper
def _unary_arithmetic_operation(op):
@functools.wraps(op)
def wrapper(self):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value)
return wrapper
class Parameter:
"""
Wraps individual parameters.
Since 4.0 Parameters are no longer descriptors and are based on a new
implementation of the Parameter class. Parameters now (as of 4.0) store
values locally (as instead previously in the associated model)
This class represents a model's parameter (in a somewhat broad sense). It
serves a number of purposes:
1) A type to be recognized by models and treated specially at class
initialization (i.e., if it is found that there is a class definition
of a Parameter, the model initializer makes a copy at the instance level).
2) Managing the handling of allowable parameter values and once defined,
ensuring updates are consistent with the Parameter definition. This
includes the optional use of units and quantities as well as transforming
values to an internally consistent representation (e.g., from degrees to
radians through the use of getters and setters).
3) Holding attributes of parameters relevant to fitting, such as whether
the parameter may be varied in fitting, or whether there are constraints
that must be satisfied.
See :ref:`astropy:modeling-parameters` for more details.
Parameters
----------
name : str
parameter name
.. warning::
The fact that `Parameter` accepts ``name`` as an argument is an
implementation detail, and should not be used directly. When
defining a new `Model` class, parameter names are always
automatically defined by the class attribute they're assigned to.
description : str
parameter description
default : float or array
default value to use for this parameter
unit : `~astropy.units.Unit`
if specified, the parameter will be in these units, and when the
parameter is updated in future, it should be set to a
:class:`~astropy.units.Quantity` that has equivalent units.
getter : callable or `None`, optional
A function that wraps the raw (internal) value of the parameter
when returning the value through the parameter proxy (e.g., a
parameter may be stored internally as radians but returned to
the user as degrees). The internal value is what is used for
computations while the proxy value is what users will interact
with (passing and viewing). If ``getter`` is not `None`, then a
``setter`` must also be input.
setter : callable or `None`, optional
A function that wraps any values assigned to this parameter; should
be the inverse of ``getter``. If ``setter`` is not `None`, then a
``getter`` must also be input.
fixed : bool
if True the parameter is not varied during fitting
tied : callable or False
if callable is supplied it provides a way to link the value of this
parameter to another parameter (or some other arbitrary function)
min : float
the lower bound of a parameter
max : float
the upper bound of a parameter
bounds : tuple
specify min and max as a single tuple--bounds may not be specified
simultaneously with min or max
mag : bool
Specify if the unit of the parameter can be a Magnitude unit or not
"""
constraints = ("fixed", "tied", "bounds")
"""
Types of constraints a parameter can have. Excludes 'min' and 'max'
which are just aliases for the first and second elements of the 'bounds'
constraint (which is represented as a 2-tuple). 'prior' and 'posterior'
are available for use by user fitters but are not used by any built-in
fitters as of this writing.
"""
def __init__(
self,
name="",
description="",
default=None,
unit=None,
getter=None,
setter=None,
fixed=False,
tied=False,
min=None,
max=None,
bounds=None,
prior=None,
posterior=None,
mag=False,
):
super().__init__()
self._model = None
self._model_required = False
if (setter is not None and getter is None) or (
getter is not None and setter is None
):
raise ValueError("setter and getter must both be input")
self._setter = self._create_value_wrapper(setter, None)
self._getter = self._create_value_wrapper(getter, None)
self._name = name
self.__doc__ = self._description = description.strip()
# We only need to perform this check on unbound parameters
if isinstance(default, Quantity):
if unit is not None and not unit.is_equivalent(default.unit):
raise ParameterDefinitionError(
f"parameter default {default} does not have units equivalent to "
f"the required unit {unit}"
)
unit = default.unit
default = default.value
self._default = default
self._mag = mag
self._set_unit(unit, force=True)
# Internal units correspond to raw_units held by the model in the
# previous implementation. The private _getter and _setter methods
# use this to convert to and from the public unit defined for the
# parameter.
self._internal_unit = None
if not self._model_required:
if self._default is not None:
self.value = self._default
else:
self._value = None
# NOTE: These are *default* constraints--on model instances constraints
# are taken from the model if set, otherwise the defaults set here are
# used
if bounds is not None:
if min is not None or max is not None:
raise ValueError(
"bounds may not be specified simultaneously with min or "
f"max when instantiating Parameter {name}"
)
else:
bounds = (min, max)
self._fixed = fixed
self._tied = tied
self._bounds = bounds
self._order = None
self._validator = None
self._prior = prior
self._posterior = posterior
self._std = None
def __set_name__(self, owner, name):
self._name = name
def __len__(self):
val = self.value
if val.shape == ():
return 1
else:
return val.shape[0]
def __getitem__(self, key):
value = self.value
if len(value.shape) == 0:
# Wrap the value in a list so that getitem can work for sensible
# indices like [0] and [-1]
value = [value]
return value[key]
def __setitem__(self, key, value):
# Get the existing value and check whether it even makes sense to
# apply this index
oldvalue = self.value
if isinstance(key, slice):
if len(oldvalue[key]) == 0:
raise InputParameterError(
"Slice assignment outside the parameter dimensions for "
f"'{self.name}'"
)
for idx, val in zip(range(*key.indices(len(self))), value):
self.__setitem__(idx, val)
else:
try:
oldvalue[key] = value
except IndexError:
raise InputParameterError(
f"Input dimension {key} invalid for {self.name!r} parameter with "
f"dimension {value.shape[0]}"
) # likely wrong
def __repr__(self):
args = f"'{self._name}'"
args += f", value={self.value}"
if self.unit is not None:
args += f", unit={self.unit}"
for cons in self.constraints:
val = getattr(self, cons)
if val not in (None, False, (None, None)):
# Maybe non-obvious, but False is the default for the fixed and
# tied constraints
args += f", {cons}={val}"
return f"{self.__class__.__name__}({args})"
@property
def name(self):
"""Parameter name."""
return self._name
@property
def default(self):
"""Parameter default value."""
return self._default
@property
def value(self):
"""The unadorned value proxied by this parameter."""
if self._getter is None and self._setter is None:
value = self._value
else:
# This new implementation uses the names of internal_unit
# in place of raw_unit used previously. The contrast between
# internal values and units is that between the public
# units that the parameter advertises to what it actually
# uses internally.
if self.internal_unit:
value = self._getter(
self._internal_value, self.internal_unit, self.unit
).value
else:
value = self._getter(self._internal_value)
if value.size == 1:
# return scalar number as np.float64 object
return np.float64(value.item())
return np.float64(value)
@value.setter
def value(self, value):
if isinstance(value, Quantity):
raise TypeError(
"The .value property on parameters should be set"
" to unitless values, not Quantity objects. To set"
"a parameter to a quantity simply set the "
"parameter directly without using .value"
)
if self._setter is None:
self._value = np.array(value, dtype=np.float64)
else:
self._internal_value = np.array(self._setter(value), dtype=np.float64)
@property
def unit(self):
"""
The unit attached to this parameter, if any.
On unbound parameters (i.e. parameters accessed through the
model class, rather than a model instance) this is the required/
default unit for the parameter.
"""
return self._unit
@unit.setter
def unit(self, unit):
if self.unit is None:
raise ValueError(
"Cannot attach units to parameters that were "
"not initially specified with units"
)
else:
raise ValueError(
"Cannot change the unit attribute directly, "
"instead change the parameter to a new quantity"
)
def _set_unit(self, unit, force=False):
if force:
if isinstance(unit, MagUnit) and not self._mag:
raise ValueError(
"This parameter does not support the magnitude units such as"
f" {unit}"
)
self._unit = unit
else:
self.unit = unit
@property
def internal_unit(self):
"""
Return the internal unit the parameter uses for the internal value stored.
"""
return self._internal_unit
@internal_unit.setter
def internal_unit(self, internal_unit):
"""
Set the unit the parameter will convert the supplied value to the
representation used internally.
"""
self._internal_unit = internal_unit
@property
def input_unit(self):
"""Unit for the input value."""
if self.internal_unit is not None:
return self.internal_unit
elif self.unit is not None:
return self.unit
else:
return None
@property
def quantity(self):
"""
This parameter, as a :class:`~astropy.units.Quantity` instance.
"""
if self.unit is None:
return None
return self.value * self.unit
@quantity.setter
def quantity(self, quantity):
if not isinstance(quantity, Quantity):
raise TypeError(
"The .quantity attribute should be set to a Quantity object"
)
self.value = quantity.value
self._set_unit(quantity.unit, force=True)
@property
def shape(self):
"""The shape of this parameter's value array."""
if self._setter is None:
return self._value.shape
return self._internal_value.shape
@shape.setter
def shape(self, value):
if isinstance(self.value, np.generic):
if value not in ((), (1,)):
raise ValueError("Cannot assign this shape to a scalar quantity")
else:
self.value.shape = value
@property
def size(self):
"""The size of this parameter's value array."""
return np.size(self.value)
@property
def std(self):
"""Standard deviation, if available from fit."""
return self._std
@std.setter
def std(self, value):
self._std = value
@property
def prior(self):
return self._prior
@prior.setter
def prior(self, val):
self._prior = val
@property
def posterior(self):
return self._posterior
@posterior.setter
def posterior(self, val):
self._posterior = val
@property
def fixed(self):
"""
Boolean indicating if the parameter is kept fixed during fitting.
"""
return self._fixed
@fixed.setter
def fixed(self, value):
"""Fix a parameter."""
if not isinstance(value, bool):
raise ValueError("Value must be boolean")
self._fixed = value
@property
def tied(self):
"""
Indicates that this parameter is linked to another one.
A callable which provides the relationship of the two parameters.
"""
return self._tied
@tied.setter
def tied(self, value):
"""Tie a parameter."""
if not callable(value) and value not in (False, None):
raise TypeError("Tied must be a callable or set to False or None")
self._tied = value
@property
def bounds(self):
"""The minimum and maximum values of a parameter as a tuple."""
return self._bounds
@bounds.setter
def bounds(self, value):
"""Set the minimum and maximum values of a parameter from a tuple."""
_min, _max = value
if _min is not None:
if not isinstance(_min, (numbers.Number, Quantity)):
raise TypeError("Min value must be a number or a Quantity")
if isinstance(_min, Quantity):
_min = float(_min.value)
else:
_min = float(_min)
if _max is not None:
if not isinstance(_max, (numbers.Number, Quantity)):
raise TypeError("Max value must be a number or a Quantity")
if isinstance(_max, Quantity):
_max = float(_max.value)
else:
_max = float(_max)
self._bounds = (_min, _max)
@property
def min(self):
"""A value used as a lower bound when fitting a parameter."""
return self.bounds[0]
@min.setter
def min(self, value):
"""Set a minimum value of a parameter."""
self.bounds = (value, self.max)
@property
def max(self):
"""A value used as an upper bound when fitting a parameter."""
return self.bounds[1]
@max.setter
def max(self, value):
"""Set a maximum value of a parameter."""
self.bounds = (self.min, value)
@property
def validator(self):
"""
Used as a decorator to set the validator method for a `Parameter`.
The validator method validates any value set for that parameter.
It takes two arguments--``self``, which refers to the `Model`
instance (remember, this is a method defined on a `Model`), and
the value being set for this parameter. The validator method's
return value is ignored, but it may raise an exception if the value
set on the parameter is invalid (typically an `InputParameterError`
should be raised, though this is not currently a requirement).
Note: Using this method as a decorator will cause problems with
pickling the model. An alternative is to assign the actual validator
function to ``Parameter._validator`` (see examples in modeling).
"""
def validator(func, self=self):
if callable(func):
self._validator = func
return self
else:
raise ValueError(
"This decorator method expects a callable.\n"
"The use of this method as a direct validator is\n"
"deprecated; use the new validate method instead\n"
)
return validator
def validate(self, value):
"""Run the validator on this parameter."""
if self._validator is not None and self._model is not None:
self._validator(self._model, value)
def copy(
self,
name=None,
description=None,
default=None,
unit=None,
getter=None,
setter=None,
fixed=False,
tied=False,
min=None,
max=None,
bounds=None,
prior=None,
posterior=None,
):
"""
Make a copy of this `Parameter`, overriding any of its core attributes
in the process (or an exact copy).
The arguments to this method are the same as those for the `Parameter`
initializer. This simply returns a new `Parameter` instance with any
or all of the attributes overridden, and so returns the equivalent of:
.. code:: python
Parameter(self.name, self.description, ...)
"""
kwargs = locals().copy()
del kwargs["self"]
for key, value in kwargs.items():
if value is None:
# Annoying special cases for min/max where are just aliases for
# the components of bounds
if key in ("min", "max"):
continue
else:
if hasattr(self, key):
value = getattr(self, key)
elif hasattr(self, "_" + key):
value = getattr(self, "_" + key)
kwargs[key] = value
return self.__class__(**kwargs)
@property
def model(self):
"""Return the model this parameter is associated with."""
return self._model
@model.setter
def model(self, value):
self._model = value
self._setter = self._create_value_wrapper(self._setter, value)
self._getter = self._create_value_wrapper(self._getter, value)
if self._model_required:
if self._default is not None:
self.value = self._default
else:
self._value = None
@property
def _raw_value(self):
"""
Currently for internal use only.
Like Parameter.value but does not pass the result through
Parameter.getter. By design this should only be used from bound
parameters.
This will probably be removed are retweaked at some point in the
process of rethinking how parameter values are stored/updated.
"""
if self._setter:
return self._internal_value
return self.value
def _create_value_wrapper(self, wrapper, model):
"""Wraps a getter/setter function to support optionally passing in
a reference to the model object as the second argument.
If a model is tied to this parameter and its getter/setter supports
a second argument then this creates a partial function using the model
instance as the second argument.
"""
if isinstance(wrapper, np.ufunc):
if wrapper.nin != 1:
raise TypeError(
"A numpy.ufunc used for Parameter "
"getter/setter may only take one input "
"argument"
)
return _wrap_ufunc(wrapper)
elif wrapper is None:
# Just allow non-wrappers to fall through silently, for convenience
return None
else:
inputs, _ = get_inputs_and_params(wrapper)
nargs = len(inputs)
if nargs == 1:
pass
elif nargs == 2:
self._model_required = True
if model is not None:
# Don't make a partial function unless we're tied to a
# specific model instance
model_arg = inputs[1].name
wrapper = functools.partial(wrapper, **{model_arg: model})
else:
raise TypeError(
"Parameter getter/setter must be a function "
"of either one or two arguments"
)
return wrapper
def __array__(self, dtype=None):
# Make np.asarray(self) work a little more straightforwardly
arr = np.asarray(self.value, dtype=dtype)
if self.unit is not None:
arr = Quantity(arr, self.unit, copy=False, subok=True)
return arr
def __bool__(self):
return bool(np.all(self.value))
__add__ = _binary_arithmetic_operation(operator.add)
__radd__ = _binary_arithmetic_operation(operator.add, reflected=True)
__sub__ = _binary_arithmetic_operation(operator.sub)
__rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True)
__mul__ = _binary_arithmetic_operation(operator.mul)
__rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True)
__pow__ = _binary_arithmetic_operation(operator.pow)
__rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True)
__truediv__ = _binary_arithmetic_operation(operator.truediv)
__rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__eq__ = _binary_comparison_operation(operator.eq)
__ne__ = _binary_comparison_operation(operator.ne)
__lt__ = _binary_comparison_operation(operator.lt)
__gt__ = _binary_comparison_operation(operator.gt)
__le__ = _binary_comparison_operation(operator.le)
__ge__ = _binary_comparison_operation(operator.ge)
__neg__ = _unary_arithmetic_operation(operator.neg)
__abs__ = _unary_arithmetic_operation(operator.abs)
def param_repr_oneline(param):
"""
Like array_repr_oneline but works on `Parameter` objects and supports
rendering parameters with units like quantities.
"""
out = array_repr_oneline(param.value)
if param.unit is not None:
out = f"{out} {param.unit!s}"
return out
def _wrap_ufunc(ufunc):
def _wrapper(value, raw_unit=None, orig_unit=None):
"""
Wrap ufuncs to support passing in units
raw_unit is the unit of the value
orig_unit is the value after the ufunc has been applied
it is assumed ufunc(raw_unit) == orig_unit
"""
if orig_unit is not None:
return ufunc(value) * orig_unit
elif raw_unit is not None:
return ufunc(value * raw_unit)
return ufunc(value)
return _wrapper
|
807b9b662c1656310d3702a65944004fb3080e063328fa3e185b80cbfd9adf2d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
# pylint: disable=invalid-name, protected-access, redefined-outer-name
import abc
import copy
import functools
import inspect
import itertools
import operator
import types
from collections import defaultdict, deque
from inspect import signature
from itertools import chain
import numpy as np
from astropy.nddata.utils import add_array, extract_array
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (
IncompatibleShapeError,
check_broadcast,
find_current_module,
indent,
isiterable,
metadata,
sharedmethod,
)
from astropy.utils.codegen import make_function_with_signature
from .bounding_box import CompoundBoundingBox, ModelBoundingBox
from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from .utils import (
_combine_equivalency_dict,
_ConstraintsDict,
_SpecialOperatorsDict,
combine_labels,
get_inputs_and_params,
make_binary_operator_eval,
)
__all__ = [
"Model",
"FittableModel",
"Fittable1DModel",
"Fittable2DModel",
"CompoundModel",
"fix_inputs",
"custom_model",
"ModelDefinitionError",
"bind_bounding_box",
"bind_compound_bounding_box",
]
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
"""
return lambda left, right: CompoundModel(oper, left, right, **kwargs)
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions."""
class _ModelMeta(abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
def __new__(mcls, name, bases, members, **kwds):
# See the docstring for _is_dynamic above
if "_is_dynamic" not in members:
members["_is_dynamic"] = mcls._is_dynamic
opermethods = [
("__add__", _model_oper("+")),
("__sub__", _model_oper("-")),
("__mul__", _model_oper("*")),
("__truediv__", _model_oper("/")),
("__pow__", _model_oper("**")),
("__or__", _model_oper("|")),
("__and__", _model_oper("&")),
("_fix_inputs", _model_oper("fix_inputs")),
]
members["_parameters_"] = {
k: v for k, v in members.items() if isinstance(v, Parameter)
}
for opermethod, opercall in opermethods:
members[opermethod] = opercall
cls = super().__new__(mcls, name, bases, members, **kwds)
param_names = list(members["_parameters_"])
# Need to walk each base MRO to collect all parameter names
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
# Preserve order of definitions
param_names = list(tbase._parameters_) + param_names
# Remove duplicates (arising from redefinition in subclass).
param_names = list(dict.fromkeys(param_names))
if cls._parameters_:
if hasattr(cls, "_param_names"):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(param_names)
else:
cls.param_names = tuple(param_names)
return cls
def __init__(cls, name, bases, members, **kwds):
super().__init__(name, bases, members, **kwds)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
pdict = {}
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
for parname, val in cls._parameters_.items():
pdict[parname] = val
cls._handle_special_methods(members, pdict)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith("_abc_"):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ("__init__", "__call__"):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith("_") or inspect.isabstract(cls))
def rename(cls, name=None, inputs=None, outputs=None):
"""
Creates a copy of this model class with a new name, inputs or outputs.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class 'astropy.modeling.core.SkyRotation'>
Name: SkyRotation (Rotation2D)
N_inputs: 2
N_outputs: 2
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
if name is None:
name = cls.name
if inputs is None:
inputs = cls.inputs
else:
if not isinstance(inputs, tuple):
raise TypeError("Expected 'inputs' to be a tuple of strings.")
elif len(inputs) != len(cls.inputs):
raise ValueError(f"{cls.name} expects {len(cls.inputs)} inputs")
if outputs is None:
outputs = cls.outputs
else:
if not isinstance(outputs, tuple):
raise TypeError("Expected 'outputs' to be a tuple of strings.")
elif len(outputs) != len(cls.outputs):
raise ValueError(f"{cls.name} expects {len(cls.outputs)} outputs")
new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs})
new_cls.__module__ = modname
new_cls.__qualname__ = name
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get("inverse")
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get("bounding_box")
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = ModelBoundingBox.validate(
cls, bounding_box, _preserve_ignore=True
)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of ModelBoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
f"The bounding_box method for {cls.name} is not correctly "
"defined: If defined as a method all arguments to that "
"method (besides self) must be keyword arguments with "
"default values that can be used to compute a default "
"bounding box."
)
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type(
f"{cls.name}ModelBoundingBox", (ModelBoundingBox,), {"__call__": __call__}
)
def _handle_special_methods(cls, members, pdict):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, "__qualname__"):
wrapper.__qualname__ = f"{cls.__qualname__}.{wrapper.__name__}"
if (
"__call__" not in members
and "n_inputs" in members
and isinstance(members["n_inputs"], int)
and members["n_inputs"] > 0
):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
args = ("self",)
kwargs = {
"model_set_axis": None,
"with_bounding_box": False,
"fill_value": np.nan,
"equivalencies": None,
"inputs_map": None,
}
new_call = make_function_with_signature(
__call__, args, kwargs, varargs="inputs", varkwargs="new_inputs"
)
# The following makes it look like __call__
# was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if (
"__init__" not in members
and not inspect.isabstract(cls)
and cls._parameters_
):
# Build list of all parameters including inherited ones
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional
# arguments
if all(p.default is not None for p in pdict.values()):
args = ("self",)
kwargs = []
for param_name, param_val in pdict.items():
default = param_val.default
unit = param_val.unit
# If the unit was specified in the parameter but the
# default is not a Quantity, attach the unit to the
# default.
if unit is not None:
default = Quantity(default, unit, copy=False, subok=True)
kwargs.append((param_name, default))
else:
args = ("self",) + tuple(pdict.keys())
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs="kwargs"
)
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
_fix_inputs = _model_oper("fix_inputs")
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif inspect.isabstract(base) or base.__name__.startswith("_"):
break
bases.append(base.name)
if bases:
return f"{cls.name} ({' -> '.join(bases)})"
return cls.name
try:
default_keywords = [
("Name", format_inheritance(cls)),
("N_inputs", cls.n_inputs),
("N_outputs", cls.n_outputs),
]
if cls.param_names:
default_keywords.append(("Fittable parameters", cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append(f"{keyword}: {value}")
return "\n".join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ("eqcons", "ineqcons")
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
n_inputs = 0
"""The number of inputs."""
n_outputs = 0
""" The number of outputs."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
_has_inverse_bounding_box = False
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
# Covariance matrix can be set by fitter if available.
# If cov_matrix is available, then std will set as well
_cov_matrix = None
_stds = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
self._default_inputs_outputs()
if meta is not None:
self.meta = meta
self._name = name
# add parameters to instance level by walking MRO list
mro = self.__class__.__mro__
for cls in mro:
if issubclass(cls, Model):
for parname, val in cls._parameters_.items():
newpar = copy.deepcopy(val)
newpar.model = self
if parname not in self.__dict__:
self.__dict__[parname] = newpar
self._initialize_constraints(kwargs)
kwargs = self._initialize_setters(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_slices()
self._initialize_unit_support()
def _default_inputs_outputs(self):
if self.n_inputs == 1 and self.n_outputs == 1:
self._inputs = ("x",)
self._outputs = ("y",)
elif self.n_inputs == 2 and self.n_outputs == 1:
self._inputs = ("x", "y")
self._outputs = ("z",)
else:
try:
self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs))
self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs))
except TypeError:
# self.n_inputs and self.n_outputs are properties
# This is the case when subclasses of Model do not define
# ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.
self._inputs = ()
self._outputs = ()
def _initialize_setters(self, kwargs):
"""
This exists to inject defaults for settable properties for models
originating from `custom_model`.
"""
if hasattr(self, "_settable_properties"):
setters = {
name: kwargs.pop(name, default)
for name, default in self._settable_properties.items()
}
for name, value in setters.items():
setattr(self, name, value)
return kwargs
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
if len(val) != self.n_inputs:
raise ValueError(
f"Expected {self.n_inputs} number of inputs, got {len(val)}."
)
self._inputs = val
self._initialize_unit_support()
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, val):
if len(val) != self.n_outputs:
raise ValueError(
f"Expected {self.n_outputs} number of outputs, got {len(val)}."
)
self._outputs = val
@property
def n_inputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``inputs`` as class variables is removed.
if hasattr(self.__class__, "n_inputs") and isinstance(
self.__class__.n_inputs, property
):
try:
return len(self.__class__.inputs)
except TypeError:
try:
return len(self.inputs)
except AttributeError:
return 0
return self.__class__.n_inputs
@property
def n_outputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``outputs`` as class variables is removed.
if hasattr(self.__class__, "n_outputs") and isinstance(
self.__class__.n_outputs, property
):
try:
return len(self.__class__.outputs)
except TypeError:
try:
return len(self.outputs)
except AttributeError:
return 0
return self.__class__.n_outputs
def _calculate_separability_matrix(self):
"""
This is a hook which customises the behavior of modeling.separable.
This allows complex subclasses to customise the separability matrix.
If it returns `NotImplemented` the default behavior is used.
"""
return NotImplemented
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolean value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {
key: self._input_units_strict for key in self.inputs
}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {
key: self._input_units_allow_dimensionless for key in self.inputs
}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
@staticmethod
def _strip_ones(intup):
return tuple(item for item in intup if item != 1)
def __setattr__(self, attr, value):
if isinstance(self, CompoundModel):
param_names = self._param_names
param_names = self.param_names
if param_names is not None and attr in self.param_names:
param = self.__dict__[attr]
value = _tofloat(value)
if param._validator is not None:
param._validator(self, value)
# check consistency with previous shape and size
eshape = self._param_metrics[attr]["shape"]
if eshape == ():
eshape = (1,)
vshape = np.array(value).shape
if vshape == ():
vshape = (1,)
esize = self._param_metrics[attr]["size"]
if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones(
eshape
):
raise InputParameterError(
f"Value for parameter {attr} does not match shape or size\nexpected"
f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})"
)
if param.unit is None:
if isinstance(value, Quantity):
param._unit = value.unit
param.value = value.value
else:
param.value = value
else:
if not isinstance(value, Quantity):
raise UnitsError(
f"The '{param.name}' parameter should be given as a"
" Quantity because it was originally "
"initialized as a Quantity"
)
param._unit = value.unit
param.value = value.value
else:
if attr in ["fittable", "linear"]:
self.__dict__[attr] = value
else:
super().__setattr__(attr, value)
def _pre_evaluate(self, *args, **kwargs):
"""
Model specific input setup that needs to occur prior to model evaluation.
"""
# Broadcast inputs into common size
inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs)
# Setup actual model evaluation method
parameters = self._param_sets(raw=True, units=True)
def evaluate(_inputs):
return self.evaluate(*chain(_inputs, parameters))
return evaluate, inputs, broadcasted_shapes, kwargs
def get_bounding_box(self, with_bbox=True):
"""
Return the ``bounding_box`` of a model if it exists or ``None``
otherwise.
Parameters
----------
with_bbox :
The value of the ``with_bounding_box`` keyword argument
when calling the model. Default is `True` for usage when
looking up the model's ``bounding_box`` without risk of error.
"""
bbox = None
if not isinstance(with_bbox, bool) or with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
pass
if isinstance(bbox, CompoundBoundingBox) and not isinstance(
with_bbox, bool
):
bbox = bbox[with_bbox]
return bbox
@property
def _argnames(self):
"""The inputs used to determine input_shape for bounding_box evaluation."""
return self.inputs
def _validate_input_shape(
self, _input, idx, argnames, model_set_axis, check_model_set_axis
):
"""Perform basic validation of a single model input's shape.
The shape has the minimum dimensions for the given model_set_axis.
Returns the shape of the input if validation succeeds.
"""
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
f"For model_set_axis={model_set_axis}, all inputs must be at "
f"least {model_set_axis + 1}-dimensional."
)
if input_shape[model_set_axis] != self._n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
f"Input argument '{argname}' does not have the correct dimensions"
f" in model_set_axis={model_set_axis} for a model set with"
f" n_models={self._n_models}."
)
return input_shape
def _validate_input_shapes(self, inputs, argnames, model_set_axis):
"""
Perform basic validation of model inputs
--that they are mutually broadcastable and that they have
the minimum dimensions for the given model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = self._n_models > 1 and model_set_axis is not False
all_shapes = []
for idx, _input in enumerate(inputs):
all_shapes.append(
self._validate_input_shape(
_input, idx, argnames, model_set_axis, check_model_set_axis
)
)
input_shape = check_broadcast(*all_shapes)
if input_shape is None:
raise ValueError(
"All inputs must have identical shapes or must be scalars."
)
return input_shape
def input_shape(self, inputs):
"""Get input shape for bounding_box evaluation."""
return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)
def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox):
"""Generic model evaluation routine.
Selects and evaluates model with or without bounding_box enforcement.
"""
# Evaluate the model using the prepared evaluation method either
# enforcing the bounding_box or not.
bbox = self.get_bounding_box(with_bbox)
if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None:
outputs = bbox.evaluate(evaluate, _inputs, fill_value)
else:
outputs = evaluate(_inputs)
return outputs
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
Model specific post evaluation processing of outputs.
"""
if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
return outputs
@property
def bbox_with_units(self):
return not isinstance(self, CompoundModel)
def __call__(self, *args, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
# Turn any keyword arguments into positional arguments.
args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)
# Read model evaluation related parameters
with_bbox = kwargs.pop("with_bounding_box", False)
fill_value = kwargs.pop("fill_value", np.nan)
# prepare for model evaluation (overridden in CompoundModel)
evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(
*args, **kwargs
)
outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox)
# post-process evaluation results (overridden in CompoundModel)
return self._post_evaluate(
inputs, outputs, broadcasted_shapes, with_bbox, **kwargs
)
def _get_renamed_inputs_as_positional(self, *args, **kwargs):
def _keyword2positional(kwargs):
# Inputs were passed as keyword (not positional) arguments.
# Because the signature of the ``__call__`` is defined at
# the class level, the name of the inputs cannot be changed at
# the instance level and the old names are always present in the
# signature of the method. In order to use the new names of the
# inputs, the old names are taken out of ``kwargs``, the input
# values are sorted in the order of self.inputs and passed as
# positional arguments to ``__call__``.
# These are the keys that are always present as keyword arguments.
keys = [
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
]
new_inputs = {}
# kwargs contain the names of the new inputs + ``keys``
allkeys = list(kwargs.keys())
# Remove the names of the new inputs from kwargs and save them
# to a dict ``new_inputs``.
for key in allkeys:
if key not in keys:
new_inputs[key] = kwargs[key]
del kwargs[key]
return new_inputs, kwargs
n_args = len(args)
new_inputs, kwargs = _keyword2positional(kwargs)
n_all_args = n_args + len(new_inputs)
if n_all_args < self.n_inputs:
raise ValueError(
f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}"
)
elif n_all_args > self.n_inputs:
raise ValueError(
f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}"
)
if n_args == 0:
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
new_args.append(new_inputs[k])
elif n_args != self.n_inputs:
# Some inputs are passed as positional, others as keyword arguments.
args = list(args)
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
if k in new_inputs:
new_args.append(new_inputs[k])
else:
new_args.append(args[0])
del args[0]
else:
new_args = args
return new_args, kwargs
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on :ref:`astropy:modeling-model-sets`
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
self._parameters_to_array()
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
f"parameters array: {e!r}"
)
self._array_to_parameters()
@property
def sync_constraints(self):
"""
This is a boolean property that indicates whether or not accessing constraints
automatically check the constituent models current values. It defaults to True
on creation of a model, but for fitting purposes it should be set to False
for performance reasons.
"""
if not hasattr(self, "_sync_constraints"):
self._sync_constraints = True
return self._sync_constraints
@sync_constraints.setter
def sync_constraints(self, value):
if not isinstance(value, bool):
raise ValueError("sync_constraints only accepts True or False as values")
self._sync_constraints = value
@property
def fixed(self):
"""
A ``dict`` mapping parameter names to their fixed constraint.
"""
if not hasattr(self, "_fixed") or self.sync_constraints:
self._fixed = _ConstraintsDict(self, "fixed")
return self._fixed
@property
def bounds(self):
"""
A ``dict`` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
if not hasattr(self, "_bounds") or self.sync_constraints:
self._bounds = _ConstraintsDict(self, "bounds")
return self._bounds
@property
def tied(self):
"""
A ``dict`` mapping parameter names to their tied constraint.
"""
if not hasattr(self, "_tied") or self.sync_constraints:
self._tied = _ConstraintsDict(self, "tied")
return self._tied
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._mconstraints["eqcons"]
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._mconstraints["ineqcons"]
def has_inverse(self):
"""
Returns True if the model has an analytic or user
inverse defined.
"""
try:
self.inverse # noqa: B018
except NotImplementedError:
return False
return True
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
result = self._inverse()
if result is not NotImplemented:
if not self._has_inverse_bounding_box:
result.bounding_box = None
return result
raise NotImplementedError(
"No analytical or user-supplied inverse transform "
"has been implemented for this model."
)
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse."
)
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
try:
del self._user_inverse
except AttributeError:
pass
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
raise `NotImplementedError` for no bounding_box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`astropy:bounding-boxes`
The limits are ordered according to the `numpy` ``'C'`` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: No bounding box is defined for this model
(note: the bounding box was explicitly disabled for this model;
use `del model.bounding_box` to restore the default bounding box,
if one is defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model)."
)
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError("No bounding box is defined for this model.")
elif isinstance(self._bounding_box, ModelBoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return ModelBoundingBox.validate(self, self._bounding_box())
else:
# The only other allowed possibility is that it's a ModelBoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), model=self)()
return self._bounding_box(bounding_box, model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif isinstance(bounding_box, (CompoundBoundingBox, dict)):
cls = CompoundBoundingBox
elif isinstance(self._bounding_box, type) and issubclass(
self._bounding_box, ModelBoundingBox
):
cls = self._bounding_box
else:
cls = ModelBoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
def set_slice_args(self, *args):
if isinstance(self._user_bounding_box, CompoundBoundingBox):
self._user_bounding_box.slice_args = args
else:
raise RuntimeError("The bounding_box for this model is not compound")
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def cov_matrix(self):
"""
Fitter should set covariance matrix, if available.
"""
return self._cov_matrix
@cov_matrix.setter
def cov_matrix(self, cov):
self._cov_matrix = cov
unfix_untied_params = [
p
for p in self.param_names
if (self.fixed[p] is False) and (self.tied[p] is False)
]
if type(cov) == list: # model set
param_stds = []
for c in cov:
param_stds.append(
[np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)]
)
for p, param_name in enumerate(unfix_untied_params):
par = getattr(self, param_name)
par.std = [item[p] for item in param_stds]
setattr(self, param_name, par)
else:
param_stds = [
np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)
]
for param_name in unfix_untied_params:
par = getattr(self, param_name)
par.std = param_stds.pop(0)
setattr(self, param_name, par)
@property
def stds(self):
"""
Standard deviation of parameters, if covariance matrix is available.
"""
return self._stds
@stds.setter
def stds(self, stds):
self._stds = stds
@property
def separable(self):
"""A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
raise NotImplementedError(
'The "separable" property is not defined for '
f"model {self.__class__.__name__}"
)
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have
been converted to the right units for the data, then the units have
been stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not
necessarily the units of the input data, but are derived from them.
Model subclasses that want fitting to work in the presence of
quantities need to define a ``_parameter_units_for_data_units`` method
that takes the input and output units (as two dictionaries) and
returns a dictionary giving the target units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, CompoundModel):
model.strip_units_from_tree()
return model
def output_units(self, **kwargs):
"""
Return a dictionary of output units for this model given a dictionary
of fitting inputs and outputs.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
This method will force extra model evaluations, which maybe computationally
expensive. To avoid this, one can add a return_units property to the model,
see :ref:`astropy:models_return_units`.
"""
units = self.return_units
if units is None or units == {}:
inputs = {inp: kwargs[inp] for inp in self.inputs}
values = self(**inputs)
if self.n_outputs == 1:
values = (values,)
units = {
out: getattr(values[index], "unit", dimensionless_unscaled)
for index, out in enumerate(self.outputs)
}
return units
def strip_units_from_tree(self):
for item in self._leaflist:
for parname in item.param_names:
par = getattr(item, parname)
par._set_unit(None, force=True)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units
of the input data, but are derived from them. Model subclasses that
want fitting to work in the presence of quantities need to define a
``_parameter_units_for_data_units`` method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly,
# hence the call to ``_set_unit``.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
return any(getattr(self, param).unit is not None for param in self.param_names)
@property
def _supports_unit_fitting(self):
# If the model has a ``_parameter_units_for_data_units`` method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, "_parameter_units_for_data_units")
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel,
# important when using add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, "_input_units"):
return self._input_units
elif hasattr(self.evaluate, "__annotations__"):
annotations = self.evaluate.__annotations__.copy()
annotations.pop("return", None)
if annotations:
# If there are not annotations for all inputs this will error.
return {name: annotations[name] for name in self.inputs}
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the
output of evaluate should be in, and returns a dictionary mapping
outputs to units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, "_return_units"):
return self._return_units
elif hasattr(self.evaluate, "__annotations__"):
return self.evaluate.__annotations__.get("return", None)
else:
# None means any unit is accepted
return None
def _prepare_inputs_single_model(self, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if self.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
f"self input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} cannot be broadcast with parameter"
f" {param.name!r} of shape {param.shape!r}."
)
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if self.n_outputs > self.n_inputs:
extra_outputs = self.n_outputs - self.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_self)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
@staticmethod
def _remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis + 1 :]
if axis >= len(shape):
axis = len(shape) - 1
shape = shape[axis + 1 :]
return shape
def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs):
reshaped = []
pivots = []
model_set_axis_param = self.model_set_axis # needed to reshape param
for idx, _input in enumerate(inputs):
max_param_shape = ()
if self._n_models > 1 and model_set_axis_input is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (
_input.shape[:model_set_axis_input]
+ _input.shape[model_set_axis_input + 1 :]
)
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(
input_shape,
self._remove_axes_from_shape(param.shape, model_set_axis_param),
)
except IncompatibleShapeError:
raise ValueError(
f"Model input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} "
f"cannot be broadcast with parameter {param.name!r} of shape "
f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}."
)
if len(param.shape) - 1 > len(max_param_shape):
max_param_shape = self._remove_axes_from_shape(
param.shape, model_set_axis_param
)
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis_input is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model_set_axis_param
else:
pivot = input_ndim - len(max_param_shape)
new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:]
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = self.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (
_input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :]
)
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if self.n_inputs < self.n_outputs:
pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs))
return reshaped, (pivots,)
def prepare_inputs(
self, *inputs, model_set_axis=None, equivalencies=None, **kwargs
):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
self._validate_input_shapes(inputs, self.inputs, model_set_axis)
inputs_map = kwargs.get("inputs_map", None)
inputs = self._validate_input_units(inputs, equivalencies, inputs_map)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if self._n_models == 1:
return self._prepare_inputs_single_model(params, inputs, **kwargs)
else:
return self._prepare_inputs_model_set(
params, inputs, model_set_axis, **kwargs
)
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# If a leaflist is provided that means this is in the context of
# a compound model and it is necessary to create the appropriate
# alias for the input coordinate name for the equivalencies dict
if inputs_map:
edict = {}
for mod, mapping in inputs_map:
if self is mod:
edict[mapping[0]] = equivalencies[mapping[1]]
else:
edict = equivalencies
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
self.inputs, edict, self.input_units_equivalencies
)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(
input_unit, equivalencies=input_units_equivalencies[input_name]
):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is
# because some equivalencies are non-linear, and
# we need to be sure that we evaluate the model in
# its own frame of reference. If input_units_strict
# is set, we also need to convert to the input units.
if (
len(input_units_equivalencies) > 0
or self.input_units_strict[input_name]
):
inputs[i] = inputs[i].to(
input_unit,
equivalencies=input_units_equivalencies[input_name],
)
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
"could not be converted to "
"required dimensionless "
"input"
)
else:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
" could not be "
"converted to required input"
f" units of {input_unit} ({input_unit.physical_type})"
)
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (
not self.input_units_allow_dimensionless[input_name]
and input_unit is not dimensionless_unscaled
and input_unit is not None
):
if np.any(inputs[i] != 0):
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}',"
" (dimensionless), could not be converted to required "
f"input units of {input_unit} "
f"({input_unit.physical_type})"
)
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any(isinstance(i, Quantity) for i in inputs)
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple(
Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)
)
return outputs
@staticmethod
def _prepare_output_single_model(output, broadcast_shape):
if broadcast_shape is not None:
if not broadcast_shape:
return output.item()
else:
try:
return output.reshape(broadcast_shape)
except ValueError:
try:
return output.item()
except ValueError:
return output
return output
def _prepare_outputs_single_model(self, outputs, broadcasted_shapes):
outputs = list(outputs)
for idx, output in enumerate(outputs):
try:
broadcast_shape = check_broadcast(*broadcasted_shapes[0])
except (IndexError, TypeError):
broadcast_shape = broadcasted_shapes[0][idx]
outputs[idx] = self._prepare_output_single_model(output, broadcast_shape)
return tuple(outputs)
def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis):
pivots = broadcasted_shapes[0]
# If model_set_axis = False was passed then use
# self._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = self.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot, model_set_axis)
return tuple(outputs)
def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs):
model_set_axis = kwargs.get("model_set_axis", None)
if len(self) == 1:
return self._prepare_outputs_single_model(outputs, broadcasted_shapes)
else:
return self._prepare_outputs_model_set(
outputs, broadcasted_shapes, model_set_axis
)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return self.copy()
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
def coerce_units(
self,
input_units=None,
return_units=None,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
):
"""
Attach units to this (unitless) model.
Parameters
----------
input_units : dict or tuple, optional
Input units to attach. If dict, each key is the name of a model input,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.inputs`.
return_units : dict or tuple, optional
Output units to attach. If dict, each key is the name of a model output,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.outputs`.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : bool or dict, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
Returns
-------
`CompoundModel`
A `CompoundModel` composed of the current model plus
`~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units.
Raises
------
ValueError
If the current model already has units.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
from .mappings import UnitsMapping
result = self
if input_units is not None:
if self.input_units is not None:
model_units = self.input_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.inputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify input_units for model with existing input units"
)
if isinstance(input_units, dict):
if input_units.keys() != set(self.inputs):
message = (
f"""input_units keys ({", ".join(input_units.keys())}) """
f"""do not match model inputs ({", ".join(self.inputs)})"""
)
raise ValueError(message)
input_units = [input_units[i] for i in self.inputs]
if len(input_units) != self.n_inputs:
message = (
"input_units length does not match n_inputs: "
f"expected {self.n_inputs}, received {len(input_units)}"
)
raise ValueError(message)
mapping = tuple(
(unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units)
)
input_mapping = UnitsMapping(
mapping,
input_units_equivalencies=input_units_equivalencies,
input_units_allow_dimensionless=input_units_allow_dimensionless,
)
input_mapping.inputs = self.inputs
input_mapping.outputs = self.inputs
result = input_mapping | result
if return_units is not None:
if self.return_units is not None:
model_units = self.return_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.outputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify return_units for model "
"with existing output units"
)
if isinstance(return_units, dict):
if return_units.keys() != set(self.outputs):
message = (
f"""return_units keys ({", ".join(return_units.keys())}) """
f"""do not match model outputs ({", ".join(self.outputs)})"""
)
raise ValueError(message)
return_units = [return_units[i] for i in self.outputs]
if len(return_units) != self.n_outputs:
message = (
"return_units length does not match n_outputs: "
f"expected {self.n_outputs}, received {len(return_units)}"
)
raise ValueError(message)
mapping = tuple(
(model_units.get(i), unit)
for i, unit in zip(self.outputs, return_units)
)
return_mapping = UnitsMapping(mapping)
return_mapping.inputs = self.outputs
return_mapping.outputs = self.outputs
result = result | return_mapping
return result
@property
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
for ckey, cvalue in values.items():
param = getattr(self, ckey)
setattr(param, constraint, cvalue)
self._mconstraints = {}
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._mconstraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
n_models = kwargs.pop("n_models", None)
if not (
n_models is None
or (isinstance(n_models, (int, np.integer)) and n_models >= 1)
):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
f"(got {n_models!r})"
)
model_set_axis = kwargs.pop("model_set_axis", None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (
model_set_axis is False
or np.issubdtype(type(model_set_axis), np.integer)
):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
f"model in a set of models (got {model_set_axis!r})."
)
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = set()
if len(args) > len(self.param_names):
raise TypeError(
f"{self.__class__.__name__}.__init__() takes at most "
f"{len(self.param_names)} positional arguments ({len(args)} given)"
)
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
param_name = self.param_names[idx]
params.add(param_name)
if not isinstance(arg, Parameter):
value = quantity_asanyarray(arg, dtype=float)
else:
value = arg
self._initialize_parameter_value(param_name, value)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
f"{self.__class__.__name__}.__init__() got multiple values for"
f" parameter {param_name!r}"
)
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray
# because if any of the arguments are quantities, we need
# to return a Quantity object not a plain Numpy array.
value = quantity_asanyarray(value, dtype=float)
params.add(param_name)
self._initialize_parameter_value(param_name, value)
# Now deal with case where param_name is not supplied by args or kwargs
for param_name in self.param_names:
if param_name not in params:
self._initialize_parameter_value(param_name, None)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
f"{self.__class__.__name__}.__init__() got an unrecognized"
f" parameter {kwarg!r}"
)
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name in self.param_names:
value = getattr(self, name)
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension at least"
f" {min_ndim} for model_set_axis={model_set_axis} (the value"
f" given for {name!r} is only {param_ndim}-dimensional)"
)
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
f"Inconsistent dimensions for parameter {name!r} for"
f" {n_models} model sets. The length of axis"
f" {model_set_axis} must be the same for all input parameter"
" values"
)
self._check_param_broadcast(max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(None)
self._n_models = n_models
# now validate parameters
for name in params:
param = getattr(self, name)
if param._validator is not None:
param._validator(self, param.value)
def _initialize_parameter_value(self, param_name, value):
"""Mostly deals with consistency checks and determining unit issues."""
if isinstance(value, Parameter):
self.__dict__[param_name] = value
return
param = getattr(self, param_name)
# Use default if value is not provided
if value is None:
default = param.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
f"{self.__class__.__name__}.__init__() requires a value for "
f"parameter {param_name!r}"
)
value = default
unit = param.unit
else:
if isinstance(value, Quantity):
unit = value.unit
value = value.value
else:
unit = None
if unit is None and param.unit is not None:
raise InputParameterError(
f"{self.__class__.__name__}.__init__() requires a Quantity for"
f" parameter {param_name!r}"
)
param._unit = unit
param._set_unit(unit, force=True)
param.internal_unit = None
if param._setter is not None:
if unit is not None:
_val = param._setter(value * unit)
else:
_val = param._setter(value)
if isinstance(_val, Quantity):
param.internal_unit = _val.unit
param._internal_value = np.array(_val.value)
else:
param.internal_unit = None
param._internal_value = np.array(_val)
else:
param._value = np.array(value)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
def _parameters_to_array(self):
# Now set the parameter values (this will also fill
# self._parameters)
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = param.value
if not isinstance(value, np.ndarray):
value = np.array([value])
self._parameters[param_metrics[name]["slice"]] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
def _array_to_parameters(self):
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = self._parameters[param_metrics[name]["slice"]]
value.shape = param_metrics[name]["shape"]
param.value = value
def _check_param_broadcast(self, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
model_set_axis = self._model_set_axis
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (
param_shape[: model_set_axis + 1]
+ new_axes
+ param_shape[model_set_axis + 1 :]
)
self._param_metrics[name]["broadcast_shape"] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = self.param_names[shape_a_idx]
param_b = self.param_names[shape_b_idx]
raise InputParameterError(
f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with "
f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules."
)
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw and param._setter:
value = param._internal_value
else:
value = param.value
broadcast_shape = self._param_metrics[name].get("broadcast_shape")
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and param.internal_unit is not None:
unit = param.internal_unit
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit, subok=True)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
parts = [repr(a) for a in args]
parts.extend(
f"{name}={param_repr_oneline(getattr(self, name))}"
for name in self.param_names
)
if self.name is not None:
parts.append(f"name={self.name!r}")
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] == value:
continue
parts.append(f"{kwarg}={value!r}")
if len(self) > 1:
parts.append(f"n_models={len(self)}")
return f"<{self.__class__.__name__}({', '.join(parts)})>"
def _format_str(self, keywords=[], defaults={}):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
("Model", self.__class__.__name__),
("Name", self.name),
("Inputs", self.inputs),
("Outputs", self.outputs),
("Model set size", len(self)),
]
parts = [
f"{keyword}: {value}"
for keyword, value in default_keywords
if value is not None
]
for keyword, value in keywords:
if keyword.lower() in defaults and defaults[keyword.lower()] == value:
continue
parts.append(f"{keyword}: {value}")
parts.append("Parameters:")
if len(self) == 1:
columns = [[getattr(self, name).value] for name in self.param_names]
else:
columns = [getattr(self, name).value for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return "\n".join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 1
n_outputs = 1
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 2
n_outputs = 1
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (
lambda inputs, params: (
f[0](inputs[: f[1]], params) + g[0](inputs[f[1] :], params)
),
f[1] + g[1],
f[2] + g[2],
)
BINARY_OPERATORS = {
"+": _make_arithmetic_operator(operator.add),
"-": _make_arithmetic_operator(operator.sub),
"*": _make_arithmetic_operator(operator.mul),
"/": _make_arithmetic_operator(operator.truediv),
"**": _make_arithmetic_operator(operator.pow),
"|": _composition_operator,
"&": _join_operator,
}
SPECIAL_OPERATORS = _SpecialOperatorsDict()
def _add_special_operator(sop_name, sop):
return SPECIAL_OPERATORS.add(sop_name, sop)
class CompoundModel(Model):
"""
Base class for compound models.
While it can be used directly, the recommended way
to combine models is through the model operators.
"""
def __init__(self, op, left, right, name=None):
self.__dict__["_param_names"] = None
self._n_submodels = None
self.op = op
self.left = left
self.right = right
self._bounding_box = None
self._user_bounding_box = None
self._leaflist = None
self._tdict = None
self._parameters = None
self._parameters_ = None
self._param_metrics = None
if op != "fix_inputs" and len(left) != len(right):
raise ValueError("Both operands must have equal values for n_models")
self._n_models = len(left)
if op != "fix_inputs" and (
(left.model_set_axis != right.model_set_axis) or left.model_set_axis
): # not False and not 0
raise ValueError(
"model_set_axis must be False or 0 and consistent for operands"
)
self._model_set_axis = left.model_set_axis
if op in ["+", "-", "*", "/", "**"] or op in SPECIAL_OPERATORS:
if left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs:
raise ModelDefinitionError(
"Both operands must match numbers of inputs and outputs"
)
self.n_inputs = left.n_inputs
self.n_outputs = left.n_outputs
self.inputs = left.inputs
self.outputs = left.outputs
elif op == "&":
self.n_inputs = left.n_inputs + right.n_inputs
self.n_outputs = left.n_outputs + right.n_outputs
self.inputs = combine_labels(left.inputs, right.inputs)
self.outputs = combine_labels(left.outputs, right.outputs)
elif op == "|":
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |:"
f" {left.name} (n_inputs={left.n_inputs},"
f" n_outputs={left.n_outputs}) and"
f" {right.name} (n_inputs={right.n_inputs},"
f" n_outputs={right.n_outputs}); n_outputs for the left-hand model"
" must match n_inputs for the right-hand model."
)
self.n_inputs = left.n_inputs
self.n_outputs = right.n_outputs
self.inputs = left.inputs
self.outputs = right.outputs
elif op == "fix_inputs":
if not isinstance(left, Model):
raise ValueError(
'First argument to "fix_inputs" must be an instance of '
"an astropy Model."
)
if not isinstance(right, dict):
raise ValueError(
'Expected a dictionary for second argument of "fix_inputs".'
)
# Dict keys must match either possible indices
# for model on left side, or names for inputs.
self.n_inputs = left.n_inputs - len(right)
# Assign directly to the private attribute (instead of using the setter)
# to avoid asserting the new number of outputs matches the old one.
self._outputs = left.outputs
self.n_outputs = left.n_outputs
newinputs = list(left.inputs)
keys = right.keys()
input_ind = []
for key in keys:
if np.issubdtype(type(key), np.integer):
if key >= left.n_inputs or key < 0:
raise ValueError(
"Substitution key integer value "
"not among possible input choices."
)
if key in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(key)
elif isinstance(key, str):
if key not in left.inputs:
raise ValueError(
"Substitution key string not among possible input choices."
)
# Check to see it doesn't match positional
# specification.
ind = left.inputs.index(key)
if ind in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(ind)
# Remove substituted inputs
input_ind.sort()
input_ind.reverse()
for ind in input_ind:
del newinputs[ind]
self.inputs = tuple(newinputs)
# Now check to see if the input model has bounding_box defined.
# If so, remove the appropriate dimensions and set it for this
# instance.
try:
self.bounding_box = self.left.bounding_box.fix_inputs(self, right)
except NotImplementedError:
pass
else:
raise ModelDefinitionError("Illegal operator: ", self.op)
self.name = name
self._fittable = None
self.fit_deriv = None
self.col_fit_deriv = None
if op in ("|", "+", "-"):
self.linear = left.linear and right.linear
else:
self.linear = False
self.eqcons = []
self.ineqcons = []
self.n_left_params = len(self.left.parameters)
self._map_parameters()
def _get_left_inputs_from_args(self, args):
return args[: self.left.n_inputs]
def _get_right_inputs_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs : self.left.n_inputs + self.right.n_inputs]
elif op == "|" or op == "fix_inputs":
return None
else:
return args[: self.left.n_inputs]
def _get_left_params_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
n_inputs = self.left.n_inputs + self.right.n_inputs
return args[n_inputs : n_inputs + self.n_left_params]
else:
return args[self.left.n_inputs : self.left.n_inputs + self.n_left_params]
def _get_right_params_from_args(self, args):
op = self.op
if op == "fix_inputs":
return None
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params :]
else:
return args[self.left.n_inputs + self.n_left_params :]
def _get_kwarg_model_parameters_as_positional(self, args, kwargs):
# could do it with inserts but rebuilding seems like simpilist way
# TODO: Check if any param names are in kwargs maybe as an intersection of sets?
if self.op == "&":
new_args = list(args[: self.left.n_inputs + self.right.n_inputs])
args_pos = self.left.n_inputs + self.right.n_inputs
else:
new_args = list(args[: self.left.n_inputs])
args_pos = self.left.n_inputs
for param_name in self.param_names:
kw_value = kwargs.pop(param_name, None)
if kw_value is not None:
value = kw_value
else:
try:
value = args[args_pos]
except IndexError:
raise IndexError("Missing parameter or input")
args_pos += 1
new_args.append(value)
return new_args, kwargs
def _apply_operators_to_value_lists(self, leftval, rightval, **kw):
op = self.op
if op == "+":
return binary_operation(operator.add, leftval, rightval)
elif op == "-":
return binary_operation(operator.sub, leftval, rightval)
elif op == "*":
return binary_operation(operator.mul, leftval, rightval)
elif op == "/":
return binary_operation(operator.truediv, leftval, rightval)
elif op == "**":
return binary_operation(operator.pow, leftval, rightval)
elif op == "&":
if not isinstance(leftval, tuple):
leftval = (leftval,)
if not isinstance(rightval, tuple):
rightval = (rightval,)
return leftval + rightval
elif op in SPECIAL_OPERATORS:
return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval)
else:
raise ModelDefinitionError("Unrecognized operator {op}")
def evaluate(self, *args, **kw):
op = self.op
args, kw = self._get_kwarg_model_parameters_as_positional(args, kw)
left_inputs = self._get_left_inputs_from_args(args)
left_params = self._get_left_params_from_args(args)
if op == "fix_inputs":
pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs)))
fixed_inputs = {
key if np.issubdtype(type(key), np.integer) else pos_index[key]: value
for key, value in self.right.items()
}
left_inputs = [
fixed_inputs[ind] if ind in fixed_inputs.keys() else inp
for ind, inp in enumerate(left_inputs)
]
leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params))
if op == "fix_inputs":
return leftval
right_inputs = self._get_right_inputs_from_args(args)
right_params = self._get_right_params_from_args(args)
if op == "|":
if isinstance(leftval, tuple):
return self.right.evaluate(*itertools.chain(leftval, right_params))
else:
return self.right.evaluate(leftval, *right_params)
else:
rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params))
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
@property
def n_submodels(self):
if self._leaflist is None:
self._make_leaflist()
return len(self._leaflist)
@property
def submodel_names(self):
"""Return the names of submodels in a ``CompoundModel``."""
if self._leaflist is None:
self._make_leaflist()
names = [item.name for item in self._leaflist]
nonecount = 0
newnames = []
for item in names:
if item is None:
newnames.append(f"None_{nonecount}")
nonecount += 1
else:
newnames.append(item)
return tuple(newnames)
def both_inverses_exist(self):
"""
if both members of this compound model have inverses return True.
"""
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"CompoundModel.both_inverses_exist is deprecated. Use has_inverse instead.",
AstropyDeprecationWarning,
)
try:
self.left.inverse # noqa: B018
self.right.inverse # noqa: B018
except NotImplementedError:
return False
return True
def _pre_evaluate(self, *args, **kwargs):
"""
CompoundModel specific input setup that needs to occur prior to
model evaluation.
Note
----
All of the _pre_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
# If equivalencies are provided, necessary to map parameters and pass
# the leaflist as a keyword input for use by model evaluation so that
# the compound model input names can be matched to the model input
# names.
if "equivalencies" in kwargs:
# Restructure to be useful for the individual model lookup
kwargs["inputs_map"] = [
(value[0], (value[1], key)) for key, value in self.inputs_map().items()
]
# Setup actual model evaluation method
def evaluate(_inputs):
return self._evaluate(*_inputs, **kwargs)
return evaluate, args, None, kwargs
@property
def _argnames(self):
"""
No inputs should be used to determine input_shape when handling compound models.
"""
return ()
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
CompoundModel specific post evaluation processing of outputs.
Note
----
All of the _post_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1:
return outputs[0]
return outputs
def _evaluate(self, *args, **kw):
op = self.op
if op != "fix_inputs":
if op != "&":
leftval = self.left(*args, **kw)
if op != "|":
rightval = self.right(*args, **kw)
else:
rightval = None
else:
leftval = self.left(*(args[: self.left.n_inputs]), **kw)
rightval = self.right(*(args[self.left.n_inputs :]), **kw)
if op != "|":
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
elif op == "|":
if isinstance(leftval, tuple):
return self.right(*leftval, **kw)
else:
return self.right(leftval, **kw)
else:
subs = self.right
newargs = list(args)
subinds = []
subvals = []
for key in subs.keys():
if np.issubdtype(type(key), np.integer):
subinds.append(key)
elif isinstance(key, str):
ind = self.left.inputs.index(key)
subinds.append(ind)
subvals.append(subs[key])
# Turn inputs specified in kw into positional indices.
# Names for compound inputs do not propagate to sub models.
kwind = []
kwval = []
for kwkey in list(kw.keys()):
if kwkey in self.inputs:
ind = self.inputs.index(kwkey)
if ind < len(args):
raise ValueError(
"Keyword argument duplicates positional value supplied."
)
kwind.append(ind)
kwval.append(kw[kwkey])
del kw[kwkey]
# Build new argument list
# Append keyword specified args first
if kwind:
kwargs = list(zip(kwind, kwval))
kwargs.sort()
kwindsorted, kwvalsorted = list(zip(*kwargs))
newargs = newargs + list(kwvalsorted)
if subinds:
subargs = list(zip(subinds, subvals))
subargs.sort()
# subindsorted, subvalsorted = list(zip(*subargs))
# The substitutions must be inserted in order
for ind, val in subargs:
newargs.insert(ind, val)
return self.left(*newargs, **kw)
@property
def param_names(self):
"""An ordered list of parameter names."""
return self._param_names
def _make_leaflist(self):
tdict = {}
leaflist = []
make_subtree_dict(self, "", tdict, leaflist)
self._leaflist = leaflist
self._tdict = tdict
def __getattr__(self, name):
"""
If someone accesses an attribute not already defined, map the
parameters, and then see if the requested attribute is one of
the parameters.
"""
# The following test is needed to avoid infinite recursion
# caused by deepcopy. There may be other such cases discovered.
if name == "__setstate__":
raise AttributeError
if name in self._param_names:
return self.__dict__[name]
else:
raise AttributeError(f'Attribute "{name}" not found')
def __getitem__(self, index):
if self._leaflist is None:
self._make_leaflist()
leaflist = self._leaflist
tdict = self._tdict
if isinstance(index, slice):
if index.step:
raise ValueError("Steps in slices not supported for compound models")
if index.start is not None:
if isinstance(index.start, str):
start = self._str_index_to_int(index.start)
else:
start = index.start
else:
start = 0
if index.stop is not None:
if isinstance(index.stop, str):
stop = self._str_index_to_int(index.stop)
else:
stop = index.stop - 1
else:
stop = len(leaflist) - 1
if index.stop == 0:
raise ValueError("Slice endpoint cannot be 0")
if start < 0:
start = len(leaflist) + start
if stop < 0:
stop = len(leaflist) + stop
# now search for matching node:
if stop == start: # only single value, get leaf instead in code below
index = start
else:
for key in tdict:
node, leftind, rightind = tdict[key]
if leftind == start and rightind == stop:
return node
raise IndexError("No appropriate subtree matches slice")
if np.issubdtype(type(index), np.integer):
return leaflist[index]
elif isinstance(index, str):
return leaflist[self._str_index_to_int(index)]
else:
raise TypeError("index must be integer, slice, or model name string")
def _str_index_to_int(self, str_index):
# Search through leaflist for item with that name
found = []
for nleaf, leaf in enumerate(self._leaflist):
if getattr(leaf, "name", None) == str_index:
found.append(nleaf)
if len(found) == 0:
raise IndexError(f"No component with name '{str_index}' found")
if len(found) > 1:
raise IndexError(
f"Multiple components found using '{str_index}' as name\n"
f"at indices {found}"
)
return found[0]
@property
def n_inputs(self):
"""The number of inputs of a model."""
return self._n_inputs
@n_inputs.setter
def n_inputs(self, value):
self._n_inputs = value
@property
def n_outputs(self):
"""The number of outputs of a model."""
return self._n_outputs
@n_outputs.setter
def n_outputs(self, value):
self._n_outputs = value
@property
def eqcons(self):
return self._eqcons
@eqcons.setter
def eqcons(self, value):
self._eqcons = value
@property
def ineqcons(self):
return self._eqcons
@ineqcons.setter
def ineqcons(self, value):
self._eqcons = value
def traverse_postorder(self, include_operator=False):
"""Postorder traversal of the CompoundModel tree."""
res = []
if isinstance(self.left, CompoundModel):
res = res + self.left.traverse_postorder(include_operator)
else:
res = res + [self.left]
if isinstance(self.right, CompoundModel):
res = res + self.right.traverse_postorder(include_operator)
else:
res = res + [self.right]
if include_operator:
res.append(self.op)
else:
res.append(self)
return res
def _format_expression(self, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: f"[{i}]"
for node in self.traverse_postorder():
if not isinstance(node, CompoundModel):
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
right = operands.pop()
left = operands.pop()
if node.op in OPERATOR_PRECEDENCE:
oper_order = OPERATOR_PRECEDENCE[node.op]
if isinstance(node, CompoundModel):
if (
isinstance(node.left, CompoundModel)
and OPERATOR_PRECEDENCE[node.left.op] < oper_order
):
left = f"({left})"
if (
isinstance(node.right, CompoundModel)
and OPERATOR_PRECEDENCE[node.right.op] < oper_order
):
right = f"({right})"
operands.append(f"{left} {node.op} {right}")
else:
left = f"(({left}),"
right = f"({right}))"
operands.append(" ".join((node.op[0], left, right)))
return "".join(operands)
def _format_components(self):
if self._parameters_ is None:
self._map_parameters()
return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist))
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
("Expression", expression),
("Components", "\n" + indent(components)),
]
return super()._format_str(keywords=keywords)
def rename(self, name):
self.name = name
return self
@property
def isleaf(self):
return False
@property
def inverse(self):
if self.op == "|":
return self.right.inverse | self.left.inverse
elif self.op == "&":
return self.left.inverse & self.right.inverse
else:
return NotImplemented
@property
def fittable(self):
"""Set the fittable attribute on a compound model."""
if self._fittable is None:
if self._leaflist is None:
self._map_parameters()
self._fittable = all(m.fittable for m in self._leaflist)
return self._fittable
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
def _map_parameters(self):
"""
Map all the constituent model parameters to the compound object,
renaming as necessary by appending a suffix number.
This can be an expensive operation, particularly for a complex
expression tree.
All the corresponding parameter attributes are created that one
expects for the Model class.
The parameter objects that the attributes point to are the same
objects as in the constiutent models. Changes made to parameter
values to either are seen by both.
Prior to calling this, none of the associated attributes will
exist. This method must be called to make the model usable by
fitting engines.
If oldnames=True, then parameters are named as in the original
implementation of compound models.
"""
if self._parameters is not None:
# do nothing
return
if self._leaflist is None:
self._make_leaflist()
self._parameters_ = {}
param_map = {}
self._param_names = []
for lindex, leaf in enumerate(self._leaflist):
if not isinstance(leaf, dict):
for param_name in leaf.param_names:
param = getattr(leaf, param_name)
new_param_name = f"{param_name}_{lindex}"
self.__dict__[new_param_name] = param
self._parameters_[new_param_name] = param
self._param_names.append(new_param_name)
param_map[new_param_name] = (lindex, param_name)
self._param_metrics = {}
self._param_map = param_map
self._param_map_inverse = {v: k for k, v in param_map.items()}
self._initialize_slices()
self._param_names = tuple(self._param_names)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name] = {}
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, CompoundModel):
return adict[key]
return branch, key
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {inp: (self, inp) for inp in self.inputs}
elif self.op == "|":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
if isinstance(self.right, CompoundModel):
r_inputs_map = self.right.inputs_map()
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[self.left.inputs[i]]
else:
inputs_map[inp] = self.left, self.left.inputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
inputs_map[inp] = r_inputs_map[
self.right.inputs[i - len(self.left.inputs)]
]
else:
inputs_map[inp] = (
self.right,
self.right.inputs[i - len(self.left.inputs)],
)
elif self.op == "fix_inputs":
fixed_ind = list(self.right.keys())
ind = [
list(self.left.inputs).index(i) if isinstance(i, str) else i
for i in fixed_ind
]
inp_ind = list(range(self.left.n_inputs))
for i in ind:
inp_ind.remove(i)
for i in inp_ind:
inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i]
else:
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.left.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
return inputs_map
def _parameter_units_for_data_units(self, input_units, output_units):
if self._leaflist is None:
self._map_parameters()
units_for_data = {}
for imodel, model in enumerate(self._leaflist):
units_for_data_leaf = model._parameter_units_for_data_units(
input_units, output_units
)
for param_leaf in units_for_data_leaf:
param = self._param_map_inverse[(imodel, param_leaf)]
units_for_data[param] = units_for_data_leaf[param_leaf]
return units_for_data
@property
def input_units(self):
inputs_map = self.inputs_map()
input_units_dict = {
key: inputs_map[key][0].input_units[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units is not None
}
if input_units_dict:
return input_units_dict
return None
@property
def input_units_equivalencies(self):
inputs_map = self.inputs_map()
input_units_equivalencies_dict = {
key: inputs_map[key][0].input_units_equivalencies[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units_equivalencies is not None
}
if not input_units_equivalencies_dict:
return None
return input_units_equivalencies_dict
@property
def input_units_allow_dimensionless(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_allow_dimensionless[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def input_units_strict(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_strict[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def return_units(self):
outputs_map = self.outputs_map()
return {
key: outputs_map[key][0].return_units[orig_key]
for key, (mod, orig_key) in outputs_map.items()
if outputs_map[key][0].return_units is not None
}
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {out: (self, out) for out in self.outputs}
elif self.op == "|":
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for out in self.outputs:
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[out]
else:
outputs_map[out] = self.right, out
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map[self.left.outputs[i]]
else:
outputs_map[out] = self.left, self.left.outputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[
self.right.outputs[i - len(self.left.outputs)]
]
else:
outputs_map[out] = (
self.right,
self.right.outputs[i - len(self.left.outputs)],
)
elif self.op == "fix_inputs":
return self.left.outputs_map()
else:
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
for out in self.left.outputs:
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map()[out]
else:
outputs_map[out] = self.left, out
return outputs_map
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = self.get_bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel, important when using
# add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
def replace_submodel(self, name, model):
"""
Construct a new `~astropy.modeling.CompoundModel` instance from an
existing CompoundModel, replacing the named submodel with a new model.
In order to ensure that inverses and names are kept/reconstructed, it's
necessary to rebuild the CompoundModel from the replaced node all the
way back to the base. The original CompoundModel is left untouched.
Parameters
----------
name : str
name of submodel to be replaced
model : `~astropy.modeling.Model`
replacement model
"""
submodels = [
m for m in self.traverse_postorder() if getattr(m, "name", None) == name
]
if submodels:
if len(submodels) > 1:
raise ValueError(f"More than one submodel named {name}")
old_model = submodels.pop()
if len(old_model) != len(model):
raise ValueError(
"New and old models must have equal values for n_models"
)
# Do this check first in order to raise a more helpful Exception,
# although it would fail trying to construct the new CompoundModel
if (
old_model.n_inputs != model.n_inputs
or old_model.n_outputs != model.n_outputs
):
raise ValueError(
"New model must match numbers of inputs and "
"outputs of existing model"
)
tree = _get_submodel_path(self, name)
while tree:
branch = self.copy()
for node in tree[:-1]:
branch = getattr(branch, node)
setattr(branch, tree[-1], model)
model = CompoundModel(
branch.op, branch.left, branch.right, name=branch.name
)
tree = tree[:-1]
return model
else:
raise ValueError(f"No submodels found named {name}")
def _set_sub_models_and_parameter_units(self, left, right):
"""
Provides a work-around to properly set the sub models and respective
parameters's units/values when using ``without_units_for_data``
or ``without_units_for_data`` methods.
"""
model = CompoundModel(self.op, left, right)
self.left = left
self.right = right
for name in model.param_names:
model_parameter = getattr(model, name)
parameter = getattr(self, name)
parameter.value = model_parameter.value
parameter._set_unit(model_parameter.unit, force=True)
def without_units_for_data(self, **kwargs):
"""
See `~astropy.modeling.Model.without_units_for_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. It does this
by modifying the output units of each sub model by using the output
units of the other sub model so that we can apply the original function
and get the desired result.
Additional data has to be output in the mixed output unit case
so that the units can be properly rebuilt by
`~astropy.modeling.CompoundModel.with_units_from_data`.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
model = self.copy()
inputs = {inp: kwargs[inp] for inp in self.inputs}
left_units = self.left.output_units(**kwargs)
right_units = self.right.output_units(**kwargs)
if self.op == "*":
left_kwargs = {
out: kwargs[out] / right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: kwargs[out] / left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
else:
left_kwargs = {
out: kwargs[out] * right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: 1 / kwargs[out] * left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
left_kwargs.update(inputs.copy())
right_kwargs.update(inputs.copy())
left = self.left.without_units_for_data(**left_kwargs)
if isinstance(left, tuple):
left_kwargs["_left_kwargs"] = left[1]
left_kwargs["_right_kwargs"] = left[2]
left = left[0]
right = self.right.without_units_for_data(**right_kwargs)
if isinstance(right, tuple):
right_kwargs["_left_kwargs"] = right[1]
right_kwargs["_right_kwargs"] = right[2]
right = right[0]
model._set_sub_models_and_parameter_units(left, right)
return model, left_kwargs, right_kwargs
else:
return super().without_units_for_data(**kwargs)
def with_units_from_data(self, **kwargs):
"""
See `~astropy.modeling.Model.with_units_from_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. In order to
do this it requires some additional information output by
`~astropy.modeling.CompoundModel.without_units_for_data` passed as
keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
left_kwargs = kwargs.pop("_left_kwargs")
right_kwargs = kwargs.pop("_right_kwargs")
left = self.left.with_units_from_data(**left_kwargs)
right = self.right.with_units_from_data(**right_kwargs)
model = self.copy()
model._set_sub_models_and_parameter_units(left, right)
return model
else:
return super().with_units_from_data(**kwargs)
def _get_submodel_path(model, name):
"""Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not).
"""
if getattr(model, "name", None) == name:
return []
try:
return ["left"] + _get_submodel_path(model.left, name)
except (AttributeError, TypeError):
pass
try:
return ["right"] + _get_submodel_path(model.right, name)
except (AttributeError, TypeError):
pass
def binary_operation(binoperator, left, right):
"""
Perform binary operation. Operands may be matching tuples of operands.
"""
if isinstance(left, tuple) and isinstance(right, tuple):
return tuple(binoperator(item[0], item[1]) for item in zip(left, right))
return binoperator(left, right)
def get_ops(tree, opset):
"""
Recursive function to collect operators used.
"""
if isinstance(tree, CompoundModel):
opset.add(tree.op)
get_ops(tree.left, opset)
get_ops(tree.right, opset)
else:
return
def make_subtree_dict(tree, nodepath, tdict, leaflist):
"""Traverse a tree noting each node by a key.
The key indicates all the left/right choices necessary to reach that node.
Each key will reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree
"""
# if this is a leaf, just append it to the leaflist
if not hasattr(tree, "isleaf"):
leaflist.append(tree)
else:
leftmostind = len(leaflist)
make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist)
make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist)
rightmostind = len(leaflist) - 1
tdict[nodepath] = (tree, leftmostind, rightmostind)
_ORDER_OF_OPERATORS = [("fix_inputs",), ("|",), ("&",), ("+", "-"), ("*", "/"), ("**",)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None):
"""
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that one or more of the
model input values will be fixed to some constant value.
values : dict
A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)
"""
model = CompoundModel("fix_inputs", modelinstance, values)
if bounding_boxes is not None:
if selector_args is None:
selector_args = tuple((key, True) for key in values.keys())
bbox = CompoundBoundingBox.validate(
modelinstance, bounding_boxes, selector_args
)
_selector = bbox.selector_args.get_fixed_values(modelinstance, values)
new_bbox = bbox[_selector]
new_bbox = new_bbox.__class__.validate(model, new_bbox)
model.bounding_box = new_bbox
return model
def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"):
"""
Set a validated bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated bounding box will be set on.
bounding_box : tuple
A bounding box tuple, see :ref:`astropy:bounding-boxes` for details
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = ModelBoundingBox.validate(
modelinstance, bounding_box, ignored=ignored, order=order
)
def bind_compound_bounding_box(
modelinstance,
bounding_boxes,
selector_args,
create_selector=None,
ignored=None,
order="C",
):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = CompoundBoundingBox.validate(
modelinstance,
bounding_boxes,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
def custom_model(*args, fit_deriv=None):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
The model is separable only if there is a single input.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
The standard settable model properties can be configured by default
using keyword arguments matching the name of the property; however,
these values are not set as model "parameters". Moreover, users
cannot use keyword arguments matching non-settable model properties,
with the exception of ``n_outputs`` which should be set to the number of
outputs of your function.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25) # doctest: +FLOAT_CMP
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
f"{__name__} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any)."
)
def _custom_model_inputs(func):
"""
Processes the inputs to the `custom_model`'s function into the appropriate
categories.
Parameters
----------
func : callable
Returns
-------
inputs : list
list of evaluation inputs
special_params : dict
dictionary of model properties which require special treatment
settable_params : dict
dictionary of defaults for settable model properties
params : dict
dictionary of model parameters set by `custom_model`'s function
"""
inputs, parameters = get_inputs_and_params(func)
special = ["n_outputs"]
settable = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is not None
]
properties = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is None and attr not in special
]
special_params = {}
settable_params = {}
params = {}
for param in parameters:
if param.name in special:
special_params[param.name] = param.default
elif param.name in settable:
settable_params[param.name] = param.default
elif param.name in properties:
raise ValueError(
f"Parameter '{param.name}' cannot be a model property: {properties}."
)
else:
params[param.name] = param.default
return inputs, special_params, settable_params, params
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable object"
)
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other callable object"
)
model_name = func.__name__
inputs, special_params, settable_params, params = _custom_model_inputs(func)
if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params):
raise ModelDefinitionError(
"derivative function should accept same number of parameters as func."
)
params = {
param: Parameter(param, default=default) for param, default in params.items()
}
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
members = {
"__module__": str(modname),
"__doc__": func.__doc__,
"n_inputs": len(inputs),
"n_outputs": special_params.pop("n_outputs", 1),
"evaluate": staticmethod(func),
"_settable_properties": settable_params,
}
if fit_deriv is not None:
members["fit_deriv"] = staticmethod(fit_deriv)
members.update(params)
cls = type(model_name, (FittableModel,), members)
cls._separable = len(inputs) == 1
return cls
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError("If no bounding_box is set, coords or arr must be input.")
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError(
"number of array dimensions inconsistent with number of model inputs."
)
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError(
"coordinate length inconsistent with the number of model inputs."
)
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError("coordinate shape inconsistent with the array shape.")
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input"
" arr in one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def hide_inverse(model):
"""
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
For example::
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later.
"""
del model.inverse
return model
|
8d7f21a178d9240d59e3801757e97162b90be06414429ec520ebb486241a93f4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Spline models and fitters."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import abc
import functools
import warnings
import numpy as np
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
from .core import FittableModel, ModelDefinitionError
from .parameters import Parameter
__all__ = [
"Spline1D",
"SplineInterpolateFitter",
"SplineSmoothingFitter",
"SplineExactKnotsFitter",
"SplineSplrepFitter",
]
__doctest_requires__ = {"Spline1D": ["scipy"]}
class _Spline(FittableModel):
"""Base class for spline models."""
_knot_names = ()
_coeff_names = ()
optional_inputs = {}
def __init__(
self,
knots=None,
coeffs=None,
degree=None,
bounds=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta
)
self._user_knots = False
self._init_tck(degree)
# Hack to allow an optional model argument
self._create_optional_inputs()
if knots is not None:
self._init_spline(knots, coeffs, bounds)
elif coeffs is not None:
raise ValueError(
"If one passes a coeffs vector one needs to also pass knots!"
)
@property
def param_names(self):
"""
Coefficient names generated based on the spline's degree and
number of knots.
"""
return tuple(list(self._knot_names) + list(self._coeff_names))
@staticmethod
def _optional_arg(arg):
return f"_{arg}"
def _create_optional_inputs(self):
for arg in self.optional_inputs:
attribute = self._optional_arg(arg)
if hasattr(self, attribute):
raise ValueError(
f"Optional argument {arg} already exists in this class!"
)
else:
setattr(self, attribute, None)
def _intercept_optional_inputs(self, **kwargs):
new_kwargs = kwargs
for arg in self.optional_inputs:
if arg in kwargs:
attribute = self._optional_arg(arg)
if getattr(self, attribute) is None:
setattr(self, attribute, kwargs[arg])
del new_kwargs[arg]
else:
raise RuntimeError(
f"{arg} has already been set, something has gone wrong!"
)
return new_kwargs
def evaluate(self, *args, **kwargs):
"""Extract the optional kwargs passed to call."""
optional_inputs = kwargs
for arg in self.optional_inputs:
attribute = self._optional_arg(arg)
if arg in kwargs:
# Options passed in
optional_inputs[arg] = kwargs[arg]
elif getattr(self, attribute) is not None:
# No options passed in and Options set
optional_inputs[arg] = getattr(self, attribute)
setattr(self, attribute, None)
else:
# No options passed in and No options set
optional_inputs[arg] = self.optional_inputs[arg]
return optional_inputs
def __call__(self, *args, **kwargs):
"""
Make model callable to model evaluation.
"""
# Hack to allow an optional model argument
kwargs = self._intercept_optional_inputs(**kwargs)
return super().__call__(*args, **kwargs)
def _create_parameter(self, name: str, index: int, attr: str, fixed=False):
"""
Create a spline parameter linked to an attribute array.
Parameters
----------
name : str
Name for the parameter
index : int
The index of the parameter in the array
attr : str
The name for the attribute array
fixed : optional, bool
If the parameter should be fixed or not
"""
# Hack to allow parameters and attribute array to freely exchange values
# _getter forces reading value from attribute array
# _setter forces setting value to attribute array
def _getter(value, model: "_Spline", index: int, attr: str):
return getattr(model, attr)[index]
def _setter(value, model: "_Spline", index: int, attr: str):
getattr(model, attr)[index] = value
return value
getter = functools.partial(_getter, index=index, attr=attr)
setter = functools.partial(_setter, index=index, attr=attr)
default = getattr(self, attr)
param = Parameter(
name=name, default=default[index], fixed=fixed, getter=getter, setter=setter
)
# setter/getter wrapper for parameters in this case require the
# parameter to have a reference back to its parent model
param.model = self
param.value = default[index]
# Add parameter to model
self.__dict__[name] = param
def _create_parameters(self, base_name: str, attr: str, fixed=False):
"""
Create a spline parameters linked to an attribute array for all
elements in that array.
Parameters
----------
base_name : str
Base name for the parameters
attr : str
The name for the attribute array
fixed : optional, bool
If the parameters should be fixed or not
"""
names = []
for index in range(len(getattr(self, attr))):
name = f"{base_name}{index}"
names.append(name)
self._create_parameter(name, index, attr, fixed)
return tuple(names)
@abc.abstractmethod
def _init_parameters(self):
raise NotImplementedError("This needs to be implemented")
@abc.abstractmethod
def _init_data(self, knots, coeffs, bounds=None):
raise NotImplementedError("This needs to be implemented")
def _init_spline(self, knots, coeffs, bounds=None):
self._init_data(knots, coeffs, bounds)
self._init_parameters()
# fill _parameters and related attributes
self._initialize_parameters((), {})
self._initialize_slices()
# Calling this will properly fill the _parameter vector, which is
# used directly sometimes without being properly filled.
_ = self.parameters
def _init_tck(self, degree):
self._c = None
self._t = None
self._degree = degree
def __getstate__(self):
return {
"t": self._t,
"c": self._c,
"k": self._degree,
}
def __setstate__(self, state):
return self.__init__(knots=state["t"], coeffs=state["c"], degree=state["k"])
class Spline1D(_Spline):
"""
One dimensional Spline Model.
Parameters
----------
knots : optional
Define the knots for the spline. Can be 1) the number of interior
knots for the spline, 2) the array of all knots for the spline, or
3) If both bounds are defined, the interior knots for the spline
coeffs : optional
The array of knot coefficients for the spline
degree : optional
The degree of the spline. It must be 1 <= degree <= 5, default is 3.
bounds : optional
The upper and lower bounds of the spline.
Notes
-----
Much of the functionality of this model is provided by
`scipy.interpolate.BSpline` which can be directly accessed via the
bspline property.
Fitting for this model is provided by wrappers for:
`scipy.interpolate.UnivariateSpline`,
`scipy.interpolate.InterpolatedUnivariateSpline`,
and `scipy.interpolate.LSQUnivariateSpline`.
If one fails to define any knots/coefficients, no parameters will
be added to this model until a fitter is called. This is because
some of the fitters for splines vary the number of parameters and so
we cannot define the parameter set until after fitting in these cases.
Since parameters are not necessarily known at model initialization,
setting model parameters directly via the model interface has been
disabled.
Direct constructors are provided for this model which incorporate the
fitting to data directly into model construction.
Knot parameters are declared as "fixed" parameters by default to
enable the use of other `astropy.modeling` fitters to be used to
fit this model.
Examples
--------
>>> import numpy as np
>>> from astropy.modeling.models import Spline1D
>>> from astropy.modeling import fitting
>>> np.random.seed(42)
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> xs = np.linspace(-3, 3, 1000)
A 1D interpolating spline can be fit to data:
>>> fitter = fitting.SplineInterpolateFitter()
>>> spl = fitter(Spline1D(), x, y)
Similarly, a smoothing spline can be fit to data:
>>> fitter = fitting.SplineSmoothingFitter()
>>> spl = fitter(Spline1D(), x, y, s=0.5)
Similarly, a spline can be fit to data using an exact set of interior knots:
>>> t = [-1, 0, 1]
>>> fitter = fitting.SplineExactKnotsFitter()
>>> spl = fitter(Spline1D(), x, y, t=t)
"""
n_inputs = 1
n_outputs = 1
_separable = True
optional_inputs = {"nu": 0}
def __init__(
self,
knots=None,
coeffs=None,
degree=3,
bounds=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
super().__init__(
knots=knots,
coeffs=coeffs,
degree=degree,
bounds=bounds,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
)
@property
def t(self):
"""
The knots vector.
"""
if self._t is None:
return np.concatenate(
(np.zeros(self._degree + 1), np.ones(self._degree + 1))
)
else:
return self._t
@t.setter
def t(self, value):
if self._t is None:
raise ValueError(
"The model parameters must be initialized before setting knots."
)
elif len(value) == len(self._t):
self._t = value
else:
raise ValueError(
"There must be exactly as many knots as previously defined."
)
@property
def t_interior(self):
"""
The interior knots.
"""
return self.t[self.degree + 1 : -(self.degree + 1)]
@property
def c(self):
"""
The coefficients vector.
"""
if self._c is None:
return np.zeros(len(self.t))
else:
return self._c
@c.setter
def c(self, value):
if self._c is None:
raise ValueError(
"The model parameters must be initialized before setting coeffs."
)
elif len(value) == len(self._c):
self._c = value
else:
raise ValueError(
"There must be exactly as many coeffs as previously defined."
)
@property
def degree(self):
"""
The degree of the spline polynomials.
"""
return self._degree
@property
def _initialized(self):
return self._t is not None and self._c is not None
@property
def tck(self):
"""
Scipy 'tck' tuple representation.
"""
return (self.t, self.c, self.degree)
@tck.setter
def tck(self, value):
if self._initialized:
if value[2] != self.degree:
raise ValueError("tck has incompatible degree!")
self.t = value[0]
self.c = value[1]
else:
self._init_spline(value[0], value[1])
# Calling this will properly fill the _parameter vector, which is
# used directly sometimes without being properly filled.
_ = self.parameters
@property
def bspline(self):
"""
Scipy bspline object representation.
"""
from scipy.interpolate import BSpline
return BSpline(*self.tck)
@bspline.setter
def bspline(self, value):
from scipy.interpolate import BSpline
if isinstance(value, BSpline):
self.tck = value.tck
else:
self.tck = value
@property
def knots(self):
"""
Dictionary of knot parameters.
"""
return [getattr(self, knot) for knot in self._knot_names]
@property
def user_knots(self):
"""If the knots have been supplied by the user."""
return self._user_knots
@user_knots.setter
def user_knots(self, value):
self._user_knots = value
@property
def coeffs(self):
"""
Dictionary of coefficient parameters.
"""
return [getattr(self, coeff) for coeff in self._coeff_names]
def _init_parameters(self):
self._knot_names = self._create_parameters("knot", "t", fixed=True)
self._coeff_names = self._create_parameters("coeff", "c")
def _init_bounds(self, bounds=None):
if bounds is None:
bounds = [None, None]
if bounds[0] is None:
lower = np.zeros(self._degree + 1)
else:
lower = np.array([bounds[0]] * (self._degree + 1))
if bounds[1] is None:
upper = np.ones(self._degree + 1)
else:
upper = np.array([bounds[1]] * (self._degree + 1))
if bounds[0] is not None and bounds[1] is not None:
self.bounding_box = bounds
has_bounds = True
else:
has_bounds = False
return has_bounds, lower, upper
def _init_knots(self, knots, has_bounds, lower, upper):
if np.issubdtype(type(knots), np.integer):
self._t = np.concatenate((lower, np.zeros(knots), upper))
elif isiterable(knots):
self._user_knots = True
if has_bounds:
self._t = np.concatenate((lower, np.array(knots), upper))
else:
if len(knots) < 2 * (self._degree + 1):
raise ValueError(
f"Must have at least {2*(self._degree + 1)} knots."
)
self._t = np.array(knots)
else:
raise ValueError(f"Knots: {knots} must be iterable or value")
# check that knots form a viable spline
self.bspline # noqa: B018
def _init_coeffs(self, coeffs=None):
if coeffs is None:
self._c = np.zeros(len(self._t))
else:
self._c = np.array(coeffs)
# check that coeffs form a viable spline
self.bspline # noqa: B018
def _init_data(self, knots, coeffs, bounds=None):
self._init_knots(knots, *self._init_bounds(bounds))
self._init_coeffs(coeffs)
def evaluate(self, *args, **kwargs):
"""
Evaluate the spline.
Parameters
----------
x :
(positional) The points where the model is evaluating the spline at
nu : optional
(kwarg) The derivative of the spline for evaluation, 0 <= nu <= degree + 1.
Default: 0.
"""
kwargs = super().evaluate(*args, **kwargs)
x = args[0]
if "nu" in kwargs:
if kwargs["nu"] > self.degree + 1:
raise RuntimeError(
"Cannot evaluate a derivative of "
f"order higher than {self.degree + 1}"
)
return self.bspline(x, **kwargs)
def derivative(self, nu=1):
"""
Create a spline that is the derivative of this one.
Parameters
----------
nu : int, optional
Derivative order, default is 1.
"""
if nu <= self.degree:
bspline = self.bspline.derivative(nu=nu)
derivative = Spline1D(degree=bspline.k)
derivative.bspline = bspline
return derivative
else:
raise ValueError(f"Must have nu <= {self.degree}")
def antiderivative(self, nu=1):
"""
Create a spline that is an antiderivative of this one.
Parameters
----------
nu : int, optional
Antiderivative order, default is 1.
Notes
-----
Assumes constant of integration is 0
"""
if (nu + self.degree) <= 5:
bspline = self.bspline.antiderivative(nu=nu)
antiderivative = Spline1D(degree=bspline.k)
antiderivative.bspline = bspline
return antiderivative
else:
raise ValueError(
"Supported splines can have max degree 5, "
f"antiderivative degree will be {nu + self.degree}"
)
class _SplineFitter(abc.ABC):
"""
Base Spline Fitter.
"""
def __init__(self):
self.fit_info = {"resid": None, "spline": None}
def _set_fit_info(self, spline):
self.fit_info["resid"] = spline.get_residual()
self.fit_info["spline"] = spline
@abc.abstractmethod
def _fit_method(self, model, x, y, **kwargs):
raise NotImplementedError("This has not been implemented for _SplineFitter.")
def __call__(self, model, x, y, z=None, **kwargs):
model_copy = model.copy()
if isinstance(model_copy, Spline1D):
if z is not None:
raise ValueError("1D model can only have 2 data points.")
spline = self._fit_method(model_copy, x, y, **kwargs)
else:
raise ModelDefinitionError(
"Only spline models are compatible with this fitter."
)
self._set_fit_info(spline)
return model_copy
class SplineInterpolateFitter(_SplineFitter):
"""
Fit an interpolating spline.
"""
def _fit_method(self, model, x, y, **kwargs):
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if model.user_knots:
warnings.warn(
"The current user specified knots maybe ignored for interpolating data",
AstropyUserWarning,
)
model.user_knots = False
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import InterpolatedUnivariateSpline
spline = InterpolatedUnivariateSpline(
x, y, w=weights, bbox=bbox, k=model.degree
)
model.tck = spline._eval_args
return spline
class SplineSmoothingFitter(_SplineFitter):
"""
Fit a smoothing spline.
"""
def _fit_method(self, model, x, y, **kwargs):
s = kwargs.pop("s", None)
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if model.user_knots:
warnings.warn(
"The current user specified knots maybe ignored for smoothing data",
AstropyUserWarning,
)
model.user_knots = False
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(x, y, w=weights, bbox=bbox, k=model.degree, s=s)
model.tck = spline._eval_args
return spline
class SplineExactKnotsFitter(_SplineFitter):
"""
Fit a spline using least-squares regression.
"""
def _fit_method(self, model, x, y, **kwargs):
t = kwargs.pop("t", None)
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if t is not None:
if model.user_knots:
warnings.warn(
"The current user specified knots will be "
"overwritten for by knots passed into this function",
AstropyUserWarning,
)
else:
if model.user_knots:
t = model.t_interior
else:
raise RuntimeError("No knots have been provided")
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import LSQUnivariateSpline
spline = LSQUnivariateSpline(x, y, t, w=weights, bbox=bbox, k=model.degree)
model.tck = spline._eval_args
return spline
class SplineSplrepFitter(_SplineFitter):
"""
Fit a spline using the `scipy.interpolate.splrep` function interface.
"""
def __init__(self):
super().__init__()
self.fit_info = {"fp": None, "ier": None, "msg": None}
def _fit_method(self, model, x, y, **kwargs):
t = kwargs.pop("t", None)
s = kwargs.pop("s", None)
task = kwargs.pop("task", 0)
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if t is not None:
if model.user_knots:
warnings.warn(
"The current user specified knots will be "
"overwritten for by knots passed into this function",
AstropyUserWarning,
)
else:
if model.user_knots:
t = model.t_interior
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import splrep
tck, fp, ier, msg = splrep(
x,
y,
w=weights,
xb=bbox[0],
xe=bbox[1],
k=model.degree,
s=s,
t=t,
task=task,
full_output=1,
)
model.tck = tck
return fp, ier, msg
def _set_fit_info(self, spline):
self.fit_info["fp"] = spline[0]
self.fit_info["ier"] = spline[1]
self.fit_info["msg"] = spline[2]
|
c8c4531e408914d3de22a7030a5e1820a4219e7a50f21957ea905a0516c023be | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Implements rotations, including spherical rotations as defined in WCS Paper II [1]_.
`RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in
WCS Paper II to rotate to/from a native sphere and the celestial sphere.
The implementation uses `EulerAngleRotation`. The model parameters are
three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point
in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial
pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat``
and ``-(lon_pole-90)``.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
# pylint: disable=invalid-name, too-many-arguments, no-member
import math
from functools import reduce
import numpy as np
from astropy import units as u
from astropy.coordinates.matrix_utilities import rotation_matrix
from .core import Model
from .parameters import Parameter
from .utils import _to_orig_unit, _to_radian
__all__ = [
"RotateCelestial2Native",
"RotateNative2Celestial",
"Rotation2D",
"EulerAngleRotation",
"RotationSequence3D",
"SphericalRotationSequence",
]
def _create_matrix(angles, axes_order):
matrices = []
for angle, axis in zip(angles, axes_order):
if isinstance(angle, u.Quantity):
angle = angle.value
angle = angle.item()
matrices.append(rotation_matrix(angle, axis, unit=u.rad))
return reduce(np.matmul, matrices[::-1])
def spherical2cartesian(alpha, delta):
alpha = np.deg2rad(alpha)
delta = np.deg2rad(delta)
x = np.cos(alpha) * np.cos(delta)
y = np.cos(delta) * np.sin(alpha)
z = np.sin(delta)
return np.array([x, y, z])
def cartesian2spherical(x, y, z):
h = np.hypot(x, y)
alpha = np.rad2deg(np.arctan2(y, x))
delta = np.rad2deg(np.arctan2(z, h))
return alpha, delta
class RotationSequence3D(Model):
"""
Perform a series of rotations about different axis in 3D space.
Positive angles represent a counter-clockwise rotation.
Parameters
----------
angles : array-like
Angles of rotation in deg in the order of axes_order.
axes_order : str
A sequence of 'x', 'y', 'z' corresponding to axis of rotation.
Examples
--------
>>> model = RotationSequence3D([1.1, 2.1, 3.1, 4.1], axes_order='xyzx')
"""
standard_broadcasting = False
_separable = False
n_inputs = 3
n_outputs = 3
angles = Parameter(
default=[],
getter=_to_orig_unit,
setter=_to_radian,
description="Angles of rotation in deg in the order of axes_order",
)
def __init__(self, angles, axes_order, name=None):
self.axes = ["x", "y", "z"]
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError(
f"Unrecognized axis label {unrecognized}; should be one of {self.axes} "
)
self.axes_order = axes_order
if len(angles) != len(axes_order):
raise ValueError(
f"The number of angles {len(angles)} should match "
f"the number of axes {len(axes_order)}."
)
super().__init__(angles, name=name)
self._inputs = ("x", "y", "z")
self._outputs = ("x", "y", "z")
@property
def inverse(self):
"""Inverse rotation."""
angles = self.angles.value[::-1] * -1
return self.__class__(angles, axes_order=self.axes_order[::-1])
def evaluate(self, x, y, z, angles):
"""
Apply the rotation to a set of 3D Cartesian coordinates.
"""
if x.shape != y.shape or x.shape != z.shape:
raise ValueError("Expected input arrays to have the same shape")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.flatten(), y.flatten(), z.flatten()])
result = np.dot(_create_matrix(angles[0], self.axes_order), inarr)
x, y, z = result[0], result[1], result[2]
x.shape = y.shape = z.shape = orig_shape
return x, y, z
class SphericalRotationSequence(RotationSequence3D):
"""
Perform a sequence of rotations about arbitrary number of axes
in spherical coordinates.
Parameters
----------
angles : list
A sequence of angles (in deg).
axes_order : str
A sequence of characters ('x', 'y', or 'z') corresponding to the
axis of rotation and matching the order in ``angles``.
"""
def __init__(self, angles, axes_order, name=None, **kwargs):
self._n_inputs = 2
self._n_outputs = 2
super().__init__(angles, axes_order=axes_order, name=name, **kwargs)
self._inputs = ("lon", "lat")
self._outputs = ("lon", "lat")
@property
def n_inputs(self):
return self._n_inputs
@property
def n_outputs(self):
return self._n_outputs
def evaluate(self, lon, lat, angles):
x, y, z = spherical2cartesian(lon, lat)
x1, y1, z1 = super().evaluate(x, y, z, angles)
lon, lat = cartesian2spherical(x1, y1, z1)
return lon, lat
class _EulerRotation:
"""
Base class which does the actual computation.
"""
_separable = False
def evaluate(self, alpha, delta, phi, theta, psi, axes_order):
shape = None
if isinstance(alpha, np.ndarray):
alpha = alpha.flatten()
delta = delta.flatten()
shape = alpha.shape
inp = spherical2cartesian(alpha, delta)
matrix = _create_matrix([phi, theta, psi], axes_order)
result = np.dot(matrix, inp)
a, b = cartesian2spherical(*result)
if shape is not None:
a.shape = shape
b.shape = shape
return a, b
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
class EulerAngleRotation(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity` ['angle']
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
n_inputs = 2
n_outputs = 2
phi = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="1st Euler angle (Quantity or value in deg)",
)
theta = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="2nd Euler angle (Quantity or value in deg)",
)
psi = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="3rd Euler angle (Quantity or value in deg)",
)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ["x", "y", "z"]
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3, "
f"got {axes_order}"
)
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError(
f"Unrecognized axis label {unrecognized}; should be one of {self.axes}"
)
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError(
"All parameters should be of the same type - float or Quantity."
)
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
self._inputs = ("alpha", "delta")
self._outputs = ("alpha", "delta")
@property
def inverse(self):
return self.__class__(
phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1],
)
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
class _SkyRotation(_EulerRotation, Model):
"""
Base class for RotateNative2Celestial and RotateCelestial2Native.
"""
lon = Parameter(
default=0, getter=_to_orig_unit, setter=_to_radian, description="Latitude"
)
lat = Parameter(
default=0, getter=_to_orig_unit, setter=_to_radian, description="Longtitude"
)
lon_pole = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="Longitude of a pole",
)
def __init__(self, lon, lat, lon_pole, **kwargs):
qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]]
if any(qs) and not all(qs):
raise TypeError(
"All parameters should be of the same type - float or Quantity."
)
super().__init__(lon, lat, lon_pole, **kwargs)
self.axes_order = "zxz"
def _evaluate(self, phi, theta, lon, lat, lon_pole):
alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole, self.axes_order)
mask = alpha < 0
if isinstance(mask, np.ndarray):
alpha[mask] += 360
else:
alpha += 360
return alpha, delta
class RotateNative2Celestial(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or `~astropy.units.Quantity` ['angle']
Celestial longitude of the fiducial point.
lat : float or `~astropy.units.Quantity` ['angle']
Celestial latitude of the fiducial point.
lon_pole : float or `~astropy.units.Quantity` ['angle']
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they
should be in units of deg. Inputs are angles on the native sphere.
Outputs are angles on the celestial sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
self.inputs = ("phi_N", "theta_N")
self.outputs = ("alpha_C", "delta_C")
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float or `~astropy.units.Quantity` ['angle']
Angles in the Native coordinate system.
it is assumed that numerical only inputs are in degrees.
If float, assumed in degrees.
lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle']
Parameter values when the model was initialized.
If float, assumed in degrees.
Returns
-------
alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle']
Angles on the Celestial sphere.
If float, in degrees.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = -(np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
class RotateCelestial2Native(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or `~astropy.units.Quantity` ['angle']
Celestial longitude of the fiducial point.
lat : float or `~astropy.units.Quantity` ['angle']
Celestial latitude of the fiducial point.
lon_pole : float or `~astropy.units.Quantity` ['angle']
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be
in units of deg. Inputs are angles on the celestial sphere.
Outputs are angles on the native sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
# Inputs are angles on the celestial sphere
self.inputs = ("alpha_C", "delta_C")
# Outputs are angles on the native sphere
self.outputs = ("phi_N", "theta_N")
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle']
Angles in the Celestial coordinate frame.
If float, assumed in degrees.
lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle']
Parameter values when the model was initialized.
If float, assumed in degrees.
Returns
-------
phi_N, theta_N : float or `~astropy.units.Quantity` ['angle']
Angles on the Native sphere.
If float, in degrees.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = np.pi / 2 + lon
theta = np.pi / 2 - lat
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
class Rotation2D(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity` ['angle']
Angle of rotation (if float it should be in deg).
"""
n_inputs = 2
n_outputs = 2
_separable = False
angle = Parameter(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Angle of rotation (Quantity or value in deg)",
)
def __init__(self, angle=angle, **kwargs):
super().__init__(angle=angle, **kwargs)
self._inputs = ("x", "y")
self._outputs = ("x", "y")
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : array-like
Input quantities
angle : float or `~astropy.units.Quantity` ['angle']
Angle of rotations.
If float, assumed in degrees.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# If one argument has units, enforce they both have units and they are compatible.
x_unit = getattr(x, "unit", None)
y_unit = getattr(y, "unit", None)
has_units = x_unit is not None and y_unit is not None
if x_unit != y_unit:
if has_units and y_unit.is_equivalent(x_unit):
y = y.to(x_unit)
y_unit = x_unit
else:
raise u.UnitsError("x and y must have compatible units")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.flatten(), y.flatten()])
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.rad)
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if has_units:
return u.Quantity(x, unit=x_unit, subok=True), u.Quantity(
y, unit=y_unit, subok=True
)
return x, y
@staticmethod
def _compute_matrix(angle):
if not np.isscalar(angle):
angle = angle[0]
return np.array(
[[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]],
dtype=np.float64,
)
|
03dda8d824b67e1f72701807830fbbd18636f77f54cec4fbb1625903f38ad52d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides utility functions for the models package.
"""
import warnings
# pylint: disable=invalid-name
from collections import UserDict
from inspect import signature
import numpy as np
from astropy import units as u
from astropy.utils.decorators import deprecated
__all__ = ["poly_map_domain", "comb", "ellipse_extent"]
def make_binary_operator_eval(oper, f, g):
"""
Given a binary operator (as a callable of two arguments) ``oper`` and
two callables ``f`` and ``g`` which accept the same arguments,
returns a *new* function that takes the same arguments as ``f`` and ``g``,
but passes the outputs of ``f`` and ``g`` in the given ``oper``.
``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The
given operator is applied element-wise to tuple outputs).
Example
-------
>>> from operator import add
>>> def prod(x, y):
... return (x * y,)
...
>>> sum_of_prod = make_binary_operator_eval(add, prod, prod)
>>> sum_of_prod(3, 5)
(30,)
"""
return lambda inputs, params: tuple(
oper(x, y) for x, y in zip(f(inputs, params), g(inputs, params))
)
def poly_map_domain(oldx, domain, window):
"""
Map domain into window by shifting and scaling.
Parameters
----------
oldx : array
original coordinates
domain : list or tuple of length 2
function domain
window : list or tuple of length 2
range into which to map the domain
"""
domain = np.array(domain, dtype=np.float64)
window = np.array(window, dtype=np.float64)
if domain.shape != (2,) or window.shape != (2,):
raise ValueError('Expected "domain" and "window" to be a tuple of size 2.')
scl = (window[1] - window[0]) / (domain[1] - domain[0])
off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0])
return off + scl * oldx
def _validate_domain_window(value):
if value is not None:
if np.asanyarray(value).shape != (2,):
raise ValueError("domain and window should be tuples of size 2.")
return tuple(value)
return value
@deprecated("5.3", alternative="math.comb")
def comb(N, k):
"""
The number of combinations of N things taken k at a time.
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
"""
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in range(min(k, N - k)):
val = (val * (N - j)) / (j + 1)
return val
def array_repr_oneline(array):
"""
Represents a multi-dimensional Numpy array flattened onto a single line.
"""
r = np.array2string(array, separator=", ", suppress_small=True)
return " ".join(line.strip() for line in r.splitlines())
def combine_labels(left, right):
"""
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity).
"""
if set(left).intersection(right):
left = tuple(label + "0" for label in left)
right = tuple(label + "1" for label in right)
return left + right
def ellipse_extent(a, b, theta):
"""
Calculates the half size of a box encapsulating a rotated 2D
ellipse.
Parameters
----------
a : float or `~astropy.units.Quantity`
The ellipse semimajor axis.
b : float or `~astropy.units.Quantity`
The ellipse semiminor axis.
theta : float or `~astropy.units.Quantity` ['angle']
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or
a value in radians (as a float). The rotation angle increases
counterclockwise.
Returns
-------
offsets : tuple
The absolute value of the offset distances from the ellipse center that
define its bounding box region, ``(dx, dy)``.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Ellipse2D
from astropy.modeling.utils import ellipse_extent, render_model
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi / 4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy]
model.bounding_box = limits
image = render_model(model)
plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5,
extent = limits)
plt.show()
"""
from .parameters import Parameter # prevent circular import
if isinstance(theta, Parameter):
if theta.quantity is None:
theta = theta.value
else:
theta = theta.quantity
t = np.arctan2(-b * np.tan(theta), a)
dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta)
t = np.arctan2(b, a * np.tan(theta))
dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta)
if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity):
return np.abs(u.Quantity([dx, dy], subok=True))
return np.abs([dx, dy])
def get_inputs_and_params(func):
"""
Given a callable, determine the input variables and the
parameters.
Parameters
----------
func : callable
Returns
-------
inputs, params : tuple
Each entry is a list of inspect.Parameter objects
"""
sig = signature(func)
inputs = []
params = []
for param in sig.parameters.values():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("Signature must not have *args or **kwargs")
if param.default == param.empty:
inputs.append(param)
else:
params.append(param)
return inputs, params
def _combine_equivalency_dict(keys, eq1=None, eq2=None):
# Given two dictionaries that give equivalencies for a set of keys, for
# example input value names, return a dictionary that includes all the
# equivalencies
eq = {}
for key in keys:
eq[key] = []
if eq1 is not None and key in eq1:
eq[key].extend(eq1[key])
if eq2 is not None and key in eq2:
eq[key].extend(eq2[key])
return eq
def _to_radian(value):
"""Convert ``value`` to radian."""
if isinstance(value, u.Quantity):
return value.to(u.rad)
return np.deg2rad(value)
def _to_orig_unit(value, raw_unit=None, orig_unit=None):
"""Convert value with ``raw_unit`` to ``orig_unit``."""
if raw_unit is not None:
return (value * raw_unit).to(orig_unit)
return np.rad2deg(value)
class _ConstraintsDict(UserDict):
"""
Wrapper around UserDict to allow updating the constraints
on a Parameter when the dictionary is updated.
"""
def __init__(self, model, constraint_type):
self._model = model
self.constraint_type = constraint_type
c = {}
for name in model.param_names:
param = getattr(model, name)
c[name] = getattr(param, constraint_type)
super().__init__(c)
def __setitem__(self, key, val):
super().__setitem__(key, val)
param = getattr(self._model, key)
setattr(param, self.constraint_type, val)
class _SpecialOperatorsDict(UserDict):
"""
Wrapper around UserDict to allow for better tracking of the Special
Operators for CompoundModels. This dictionary is structured so that
one cannot inadvertently overwrite an existing special operator.
Parameters
----------
unique_id: int
the last used unique_id for a SPECIAL OPERATOR
special_operators: dict
a dictionary containing the special_operators
Notes
-----
Direct setting of operators (`dict[key] = value`) into the
dictionary has been deprecated in favor of the `.add(name, value)`
method, so that unique dictionary keys can be generated and tracked
consistently.
"""
def __init__(self, unique_id=0, special_operators={}):
super().__init__(special_operators)
self._unique_id = unique_id
def _set_value(self, key, val):
if key in self:
raise ValueError(f'Special operator "{key}" already exists')
else:
super().__setitem__(key, val)
def __setitem__(self, key, val):
self._set_value(key, val)
warnings.warn(
DeprecationWarning(
"""
Special operator dictionary assignment has been deprecated.
Please use `.add` instead, so that you can capture a unique
key for your operator.
"""
)
)
def _get_unique_id(self):
self._unique_id += 1
return self._unique_id
def add(self, operator_name, operator):
"""
Adds a special operator to the dictionary, and then returns the
unique key that the operator is stored under for later reference.
Parameters
----------
operator_name: str
the name for the operator
operator: function
the actual operator function which will be used
Returns
-------
the unique operator key for the dictionary
`(operator_name, unique_id)`
"""
key = (operator_name, self._get_unique_id())
self._set_value(key, operator)
return key
|
763455a2eb1ea201135dcba585cba383d24c298a0e2ff13e37f921b8dc036b71 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import warnings
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning
from .core import Fittable1DModel, Fittable2DModel
from .parameters import InputParameterError, Parameter
from .utils import ellipse_extent
__all__ = [
"AiryDisk2D",
"Moffat1D",
"Moffat2D",
"Box1D",
"Box2D",
"Const1D",
"Const2D",
"Ellipse2D",
"Disk2D",
"Gaussian1D",
"Gaussian2D",
"Linear1D",
"Lorentz1D",
"RickerWavelet1D",
"RickerWavelet2D",
"RedshiftScaleFactor",
"Multiply",
"Planar2D",
"Scale",
"Sersic1D",
"Sersic2D",
"Shift",
"Sine1D",
"Cosine1D",
"Tangent1D",
"ArcSine1D",
"ArcCosine1D",
"ArcTangent1D",
"Trapezoid1D",
"TrapezoidDisk2D",
"Ring2D",
"Voigt1D",
"KingProjectedAnalytic1D",
"Exponential1D",
"Logarithmic1D",
]
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Gaussian"
)
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Standard deviation of the Gaussian",
)
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(-0.5 * (x - mean) ** 2 / stddev**2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev**2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev**2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev**3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.input_unit is None:
return None
return {self.inputs[0]: self.mean.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mean": inputs_unit[self.inputs[0]],
"stddev": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise. Must be `None` if a covariance matrix
(``cov_matrix``) is provided. If no ``cov_matrix`` is given,
`None` means the default value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(
\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}
\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(
default=0, description="Peak position (along x axis) of Gaussian"
)
y_mean = Parameter(
default=0, description="Peak position (along y axis) of Gaussian"
)
x_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along x axis)"
)
y_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along y axis)"
)
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle (optional)"
),
)
def __init__(
self,
amplitude=amplitude.default,
x_mean=x_mean.default,
y_mean=y_mean.default,
x_stddev=None,
y_stddev=None,
theta=None,
cov_matrix=None,
**kwargs,
):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError(
"Cannot specify both cov_matrix and x/y_stddev/theta"
)
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault("bounds", {})
kwargs["bounds"].setdefault("x_stddev", (FLOAT_EPSILON, None))
kwargs["bounds"].setdefault("y_stddev", (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude,
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
**kwargs,
)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``.
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-5.5, upper=5.5)
y: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-2.0, upper=2.0)
y: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
dx, dy = ellipse_extent(a, b, self.theta)
return (
(self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx),
)
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function."""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(
-((a * xdiff**2) + (b * xdiff * ydiff) + (c * ydiff**2))
)
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters."""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2.0 * theta)
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xstd3 = x_stddev**3
ystd3 = y_stddev**3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff**2
ydiff2 = ydiff**2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2)))
da_dtheta = sint * cost * ((1.0 / ystd2) - (1.0 / xstd2))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2.0 * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2.0 * c * ydiff))
dg_dx_stddev = g * (
-(
da_dx_stddev * xdiff2
+ db_dx_stddev * xdiff * ydiff
+ dc_dx_stddev * ydiff2
)
)
dg_dy_stddev = g * (
-(
da_dy_stddev * xdiff2
+ db_dy_stddev * xdiff * ydiff
+ dc_dy_stddev * ydiff2
)
)
dg_dtheta = g * (
-(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2)
)
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta]
@property
def input_units(self):
x_unit = self.x_mean.input_unit
y_unit = self.y_mean.input_unit
if x_unit is None and y_unit is None:
return None
return {self.inputs[0]: x_unit, self.inputs[1]: y_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_mean": inputs_unit[self.inputs[0]],
"y_mean": inputs_unit[self.inputs[0]],
"x_stddev": inputs_unit[self.inputs[0]],
"y_stddev": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.input_unit is None:
return None
return {self.inputs[0]: self.offset.input_unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function."""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box # noqa: B018
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.offset) for x in self.bounding_box
)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function."""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model."""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter."""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"offset": outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.input_unit is None:
return None
return {self.inputs[0]: self.factor.input_unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box # noqa: B018
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function."""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box # noqa: B018
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function."""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description="Redshift", default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function."""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative."""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model."""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box # noqa: B018
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.z) for x in self.bounding_box.bounding_box()
)
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
return amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)
)
@property
def input_units(self):
if self.r_eff.input_unit is None:
return None
return {self.inputs[0]: self.r_eff.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_eff": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class _Trigonometric1D(Fittable1DModel):
"""
Base class for one dimensional trigonometric and inverse trigonometric models.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@property
def input_units(self):
if self.frequency.input_unit is None:
return None
return {self.inputs[0]: 1.0 / self.frequency.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": inputs_unit[self.inputs[0]] ** -1,
"amplitude": outputs_unit[self.outputs[0]],
}
class Sine1D(_Trigonometric1D):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative."""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (
TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Sine."""
return ArcSine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Cosine1D(_Trigonometric1D):
"""
One dimensional Cosine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Cosine1D
plt.figure()
s1 = Cosine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Cosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.cos(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Cosine model derivative."""
d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase)
d_frequency = -(
TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = -(TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Cosine."""
return ArcCosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Tangent1D(_Trigonometric1D):
"""
One dimensional Tangent model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Sine1D, Cosine1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p)
Note that the tangent function is undefined for inputs of the form
pi/2 + n*pi for all integers n. Thus thus the default bounding box
has been restricted to:
.. math:: [(-1/4 - p)/f, (1/4 - p)/f]
which is the smallest interval for the tangent function to be continuous
on.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Tangent1D
plt.figure()
s1 = Tangent1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Tangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.tan(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Tangent model derivative."""
sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase)) ** 2
d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase)
d_frequency = TWOPI * x * amplitude * sec
d_phase = TWOPI * amplitude * sec
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Tangent."""
return ArcTangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
bbox = [
(-1 / 4 - self.phase) / self.frequency,
(1 / 4 - self.phase) / self.frequency,
]
if self.frequency.unit is not None:
bbox = bbox / self.frequency.unit
return bbox
class _InverseTrigonometric1D(_Trigonometric1D):
"""
Base class for one dimensional inverse trigonometric models.
"""
@property
def input_units(self):
if self.amplitude.input_unit is None:
return None
return {self.inputs[0]: self.amplitude.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": outputs_unit[self.outputs[0]] ** -1,
"amplitude": inputs_unit[self.inputs[0]],
}
class ArcSine1D(_InverseTrigonometric1D):
"""
One dimensional ArcSine model returning values between -pi/2 and pi/2
only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Sine
frequency : float
Oscillation frequency for corresponding Sine
phase : float
Oscillation phase for corresponding Sine
See Also
--------
Sine1D, ArcCosine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f
The arcsin function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcSine1D
plt.figure()
s1 = ArcSine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcSine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_sine = np.arcsin(argument) / TWOPI
return (arc_sine - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcSine model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcSine."""
return Sine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcCosine1D(_InverseTrigonometric1D):
"""
One dimensional ArcCosine returning values between 0 and pi only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Cosine
frequency : float
Oscillation frequency for corresponding Cosine
phase : float
Oscillation phase for corresponding Cosine
See Also
--------
Cosine1D, ArcSine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f
The arccos function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcCosine1D
plt.figure()
s1 = ArcCosine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, 0, np.pi])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arccos(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model derivative."""
d_amplitude = x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcCosine."""
return Cosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcTangent1D(_InverseTrigonometric1D):
"""
One dimensional ArcTangent model returning values between -pi/2 and
pi/2 only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Tangent
frequency : float
Oscillation frequency for corresponding Tangent
phase : float
Oscillation phase for corresponding Tangent
See Also
--------
Tangent1D, ArcSine1D, ArcCosine1D
Notes
-----
Model formula:
.. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcTangent1D
plt.figure()
s1 = ArcTangent1D(amplitude=1, frequency=.25)
r=np.arange(-10, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-10, 10, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arctan(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * (1 + (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of ArcTangent."""
return Tangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function."""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters."""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope**-1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.input_unit is None and self.slope.input_unit is None:
return None
return {self.inputs[0]: self.intercept.input_unit / self.slope.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit[self.outputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function."""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters."""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit["z"],
"slope_x": outputs_unit["z"] / inputs_unit["x"],
"slope_y": outputs_unit["z"] / inputs_unit["y"],
}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function."""
return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters."""
d_amplitude = fwhm**2 / (fwhm**2 + (x - x_0) ** 2)
d_x_0 = (
amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm**2 + (x - x_0) ** 2)
)
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0, description="Position of the peak")
amplitude_L = Parameter(default=1, description="The Lorentzian amplitude")
fwhm_L = Parameter(
default=2 / np.pi, description="The Lorentzian full width at half maximum"
)
fwhm_G = Parameter(
default=np.log(2), description="The Gaussian full width at half maximum"
)
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(
self,
x_0=x_0.default,
amplitude_L=amplitude_L.default,
fwhm_L=fwhm_L.default,
fwhm_G=fwhm_G.default,
method=None,
**kwargs,
):
if str(method).lower() == "humlicek2" and HAS_SCIPY:
warnings.warn(
f"{method} has been deprecated since Astropy 5.3 and will be removed in a future version.\n"
"It is recommended to always use the `~scipy.special.wofz` implementation "
"when `scipy` is installed.",
AstropyDeprecationWarning,
)
if method is None:
if HAS_SCIPY:
method = "wofz"
else:
method = "humlicek2"
if str(method).lower() in ("wofz", "scipy"):
from scipy.special import wofz
self._faddeeva = wofz
elif str(method).lower() == "humlicek2":
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(
f"Not a valid method for Voigt1D Faddeeva function: {method}."
)
self.method = self._faddeeva.__name__
super().__init__(
x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs
)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`.
"""
if z.shape == self._last_z.shape and np.allclose(
z, self._last_z, rtol=1.0e-14, atol=1.0e-15
):
return self._last_w
self._last_z = (
z.to_value(u.dimensionless_unscaled) if isinstance(z, u.Quantity) else z
)
self._last_w = self._faddeeva(self._last_z)
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""
Derivative of the one dimensional Voigt function with respect to parameters.
"""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [
-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G,
]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm_L": inputs_unit[self.inputs[0]],
"fwhm_G": inputs_unit[self.inputs[0]],
"amplitude_L": outputs_unit[self.outputs[0]],
}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z = x + iy) combining Humlicek's rational approximations.
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
# fmt: off
AA = np.array(
[
+46236.3358828121, -147726.58393079657j,
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j,
]
) # 1j/sqrt(pi) to the 12. digit
bb = np.array(
[
+7918.06640624997,
-126689.0625,
+295607.8125,
-236486.25,
+84459.375,
-15015.0,
+1365.0,
-60.0,
+1.0,
]
)
# fmt: on
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz * (zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
# fmt: off
# Recursive algorithms for the polynomials in Z with coefficients AA, bb
# numer = 0.0
# for A in AA[::-1]:
# numer = numer * Z + A
# Explicitly unrolled above loop for speed
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
# denom = 0.0
# for b in bb[::-1]:
# denom = denom * ZZ + b
# Explicitly unrolled above loop for speed
denom = (((((((ZZ + bb[7])*ZZ + bb[6])*ZZ + bb[5])*ZZ+bb[4])*ZZ + bb[3])*ZZ +
bb[2])*ZZ + bb[1])*ZZ + bb[0]
# fmt: on
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters."""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse", mag=True)
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = ((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.0
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"a": inputs_unit[self.inputs[0]],
"b": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0**2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return (
(self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0),
)
@property
def input_units(self):
x_unit = self.x_0.input_unit
y_unit = self.y_0.input_unit
if x_unit is None and y_unit is None:
return None
return {self.inputs[0]: x_unit, self.inputs[1]: y_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(
self,
amplitude=amplitude.default,
x_0=x_0.default,
y_0=y_0.default,
r_in=None,
width=None,
r_out=None,
**kwargs,
):
if (r_in is None) and (r_out is None) and (width is None):
r_in = self.r_in.default
width = self.width.default
elif (r_in is not None) and (r_out is None) and (width is None):
width = self.width.default
elif (r_in is None) and (r_out is not None) and (width is None):
r_in = self.r_in.default
width = r_out - r_in
elif (r_in is None) and (r_out is None) and (width is not None):
r_in = self.r_in.default
elif (r_in is not None) and (r_out is not None) and (width is None):
width = r_out - r_in
elif (r_in is None) and (r_out is not None) and (width is not None):
r_in = r_out - width
elif (r_in is not None) and (r_out is not None) and (width is not None):
if np.any(width != (r_out - r_in)):
raise InputParameterError("Width must be r_out - r_in")
if np.any(r_in < 0) or np.any(width < 0):
raise InputParameterError(f"{r_in=} and {width=} must both be >=0")
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in**2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_in": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A", mag=True)
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function."""
inside = np.logical_and(x >= x_0 - width / 2.0, x <= x_0 + width / 2.0)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude", mag=True)
x_0 = Parameter(
default=0, description="X position of the center of the box function"
)
y_0 = Parameter(
default=0, description="Y position of the center of the box function"
)
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function."""
x_range = np.logical_and(x >= x_0 - x_width / 2.0, x <= x_0 + x_width / 2.0)
y_range = np.logical_and(y >= y_0 - y_width / 2.0, y <= y_0 + y_width / 2.0)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[1]],
"x_width": inputs_unit[self.inputs[0]],
"y_width": inputs_unit[self.inputs[1]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function."""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.0
x3 = x_0 + width / 2.0
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(
default=1, description="Slope of tails of trapezoid in x direction"
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function."""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
x_unit = self.x_0.input_unit
y_unit = self.y_0.input_unit
if x_unit is None and y_unit is None:
return None
return {self.inputs[0]: x_unit, self.inputs[1]: y_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit["x"] != inputs_unit["y"]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function."""
xx_ww = (x - x_0) ** 2 / (2 * sigma**2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function."""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma**2)
return amplitude * (1 - rr_ww) * np.exp(-rr_ww)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[
\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}
\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Airy function"
)
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(
default=1,
description="The radius of the Airy disk (radius of first zero crossing)",
)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function."""
if cls._rz is None:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False, subok=True)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"radius": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function."""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters."""
fac = 1 + (x - x_0) ** 2 / gamma**2
d_A = fac ** (-alpha)
d_x_0 = 2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma**2)
d_gamma = 2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma**3)
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(
default=0, description="X position of the maximum of the Moffat model"
)
y_0 = Parameter(
default=0, description="Y position of the maximum of the Moffat model"
)
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = 2 * amplitude * alpha * d_A * (x - x_0) / (gamma**2 * (1 + rr_gg))
d_y_0 = 2 * amplitude * alpha * d_A * (y - y_0) / (gamma**2 * (1 + rr_gg))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = 2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
else:
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{
-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]
\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
bn = cls._gammaincinv(2.0 * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_eff": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}")
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K
"""
amplitude = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Amplitude or scaling factor",
)
r_core = Parameter(
default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius"
)
r_tide = Parameter(
default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius"
)
@property
def concentration(self):
"""Concentration parameter of the king model."""
return np.log10(np.abs(self.r_tide / self.r_core))
@staticmethod
def _core_func(x, r_core, r_tide, power=1):
return (
1.0 / np.sqrt(x**2 + r_core**2) ** power
- 1.0 / np.sqrt(r_tide**2 + r_core**2) ** power
)
@staticmethod
def _filter(x, r_tide, result):
"""Set invalid r values to 0"""
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.0
def evaluate(self, x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = amplitude * r_core**2 * self._core_func(x, r_core, r_tide) ** 2
self._filter(x, r_tide, result)
return result
def fit_deriv(self, x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = r_core**2 * self._core_func(x, r_core, r_tide) ** 2
self._filter(x, r_tide, d_amplitude)
d_r_core = (
-2.0
* amplitude
* r_core**3
* self._core_func(x, r_core, r_tide, power=3)
* self._core_func(x, r_core, r_tide)
+ 2 * amplitude * r_core * self._core_func(x, r_core, r_tide) ** 2
)
self._filter(x, r_tide, d_r_core)
d_r_tide = (
2 * amplitude * r_core**2 * r_tide * self._core_func(x, r_core, r_tide)
) / (r_core**2 + r_tide**2) ** (3 / 2)
self._filter(x, r_tide, d_r_tide)
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.input_unit is None:
return None
return {self.inputs[0]: self.r_core.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_core": inputs_unit[self.inputs[0]],
"r_tide": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
def _tau_validator(self, val):
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
tau._validator = _tau_validator
@property
def input_units(self):
if self.tau.input_unit is None:
return None
return {self.inputs[0]: self.tau.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
"""Derivative with respect to parameters."""
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
def _tau_validator(self, val):
"""tau cannot be 0."""
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
tau._validator = _tau_validator
@property
def input_units(self):
if self.tau.input_unit is None:
return None
return {self.inputs[0]: self.tau.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
|
1240f9c1279a88a23c1820f4bcbbf8f7259e59ffd3fe4f4084560e5b84f5fff4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
Implements projections--particularly sky projections defined in WCS Paper II
[1]_.
All angles are set and and displayed in degrees but internally computations are
performed in radians. All functions expect inputs and outputs degrees.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import abc
from itertools import chain, product
import numpy as np
from astropy import units as u
from astropy import wcs
from .core import Model
from .parameters import InputParameterError, Parameter
from .utils import _to_orig_unit, _to_radian
# List of tuples of the form
# (long class name without suffix, short WCSLIB projection code):
_PROJ_NAME_CODE = [
("ZenithalPerspective", "AZP"),
("SlantZenithalPerspective", "SZP"),
("Gnomonic", "TAN"),
("Stereographic", "STG"),
("SlantOrthographic", "SIN"),
("ZenithalEquidistant", "ARC"),
("ZenithalEqualArea", "ZEA"),
("Airy", "AIR"),
("CylindricalPerspective", "CYP"),
("CylindricalEqualArea", "CEA"),
("PlateCarree", "CAR"),
("Mercator", "MER"),
("SansonFlamsteed", "SFL"),
("Parabolic", "PAR"),
("Molleweide", "MOL"),
("HammerAitoff", "AIT"),
("ConicPerspective", "COP"),
("ConicEqualArea", "COE"),
("ConicEquidistant", "COD"),
("ConicOrthomorphic", "COO"),
("BonneEqualArea", "BON"),
("Polyconic", "PCO"),
("TangentialSphericalCube", "TSC"),
("COBEQuadSphericalCube", "CSC"),
("QuadSphericalCube", "QSC"),
("HEALPix", "HPX"),
("HEALPixPolar", "XPH"),
]
_NOT_SUPPORTED_PROJ_CODES = ["ZPN"]
_PROJ_NAME_CODE_MAP = dict(_PROJ_NAME_CODE)
projcodes = [code for _, code in _PROJ_NAME_CODE]
__all__ = [
"Projection",
"Pix2SkyProjection",
"Sky2PixProjection",
"Zenithal",
"Cylindrical",
"PseudoCylindrical",
"Conic",
"PseudoConic",
"QuadCube",
"HEALPix",
"AffineTransformation2D",
"projcodes",
] + list(map("_".join, product(["Pix2Sky", "Sky2Pix"], chain(*_PROJ_NAME_CODE))))
class _ParameterDS(Parameter):
"""
Same as `Parameter` but can indicate its modified status via the ``dirty``
property. This flag also gets set automatically when a parameter is
modified.
This ability to track parameter's modified status is needed for automatic
update of WCSLIB's prjprm structure (which may be a more-time intensive
operation) *only as required*.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dirty = True
def validate(self, value):
super().validate(value)
self.dirty = True
class Projection(Model):
"""Base class for all sky projections."""
# Radius of the generating sphere.
# This sets the circumference to 360 deg so that arc length is measured in deg.
r0 = 180 * u.deg / np.pi
_separable = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj = wcs.Prjprm()
@property
@abc.abstractmethod
def inverse(self):
"""
Inverse projection--all projection models must provide an inverse.
"""
@property
def prjprm(self):
"""WCSLIB ``prjprm`` structure."""
self._update_prj()
return self._prj
def _update_prj(self):
"""
A default updater for projection's pv.
.. warning::
This method assumes that PV0 is never modified. If a projection
that uses PV0 is ever implemented in this module, that projection
class should override this method.
.. warning::
This method assumes that the order in which PVi values (i>0)
are to be assigned is identical to the order of model parameters
in ``param_names``. That is, pv[1] = model.parameters[0], ...
"""
if not self.param_names:
return
pv = []
dirty = False
for p in self.param_names:
param = getattr(self, p)
pv.append(float(param.value))
dirty |= param.dirty
param.dirty = False
if dirty:
self._prj.pv = None, *pv
self._prj.set()
def __getstate__(self):
return {
"p": self.parameters,
"fixed": self.fixed,
"tied": self.tied,
"bounds": self.bounds,
}
def __setstate__(self, state):
params = state.pop("p")
return self.__init__(*params, **state)
class Pix2SkyProjection(Projection):
"""Base class for all Pix2Sky projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# with no parameters:
self._prj.set()
self.inputs = ("x", "y")
self.outputs = ("phi", "theta")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, x, y, *args, **kwargs):
self._update_prj()
return self._prj.prjx2s(x, y)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Sky2PixProjection(Projection):
"""Base class for all Sky2Pix projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# without parameters:
self._prj.set()
self.inputs = ("phi", "theta")
self.outputs = ("x", "y")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, phi, theta, *args, **kwargs):
self._update_prj()
return self._prj.prjs2x(phi, theta)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Zenithal(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default = 0°)",
)
def _mu_validator(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
mu._validator = _mu_validator
class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
x &= R \sin \phi \\
y &= -R \sec \gamma \cos \theta
where:
.. math::
R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}
{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default=0°)",
)
def _mu_validator(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
mu._validator = _mu_validator
class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees (Default=0°)",
)
theta0 = _ParameterDS(
default=90.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees (Default=0°)",
)
def _mu_validator(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
mu._validator = _mu_validator
class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees",
)
theta0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees",
)
def _mu_validator(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
mu._validator = _mu_validator
class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal):
r"""
Gnomonic projection - pixel to sky.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal):
r"""
Gnomonic Projection - sky to pixel.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal):
r"""
Stereographic Projection - pixel to sky.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal):
r"""
Stereographic Projection - sky to pixel.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal):
r"""
Slant orthographic projection - pixel to sky.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
\theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
The parameters :math:`\xi` and :math:`\eta` are defined from the
reference point :math:`(\phi_c, \theta_c)` as:
.. math::
\xi &= \cot \theta_c \sin \phi_c \\
\eta &= - \cot \theta_c \cos \phi_c
Parameters
----------
xi : float
Obliqueness parameter, ξ. Default is 0.0.
eta : float
Obliqueness parameter, η. Default is 0.0.
"""
xi = _ParameterDS(default=0.0, description="Obliqueness parameter")
eta = _ParameterDS(default=0.0, description="Obliqueness parameter")
class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal):
r"""
Slant orthographic projection - sky to pixel.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
But more specifically are:
.. math::
x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\
y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)]
"""
xi = _ParameterDS(default=0.0)
eta = _ParameterDS(default=0.0)
class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - R_\theta
"""
class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\
&= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right)
"""
class Pix2Sky_Airy(Pix2SkyProjection, Zenithal):
r"""
Airy projection - pixel to sky.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(default=90.0)
class Sky2Pix_Airy(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} +
\frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(
default=90.0,
description="The latitude at which to minimize the error,in degrees",
)
class Cylindrical(Projection):
r"""Base class for Cylindrical projections.
Cylindrical projections are so-named because the surface of
projection is a cylinder.
"""
_separable = True
class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = _ParameterDS(default=1.0)
lam = _ParameterDS(default=1.0)
def _mu_validator(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
mu._validator = _mu_validator
def _lam_validator(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
lam._validator = _lam_validator
class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical):
r"""
Cylindrical Perspective - sky to pixel.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
x &= \lambda \phi \\
y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 0.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
mu = _ParameterDS(
default=1.0, description="Distance from center of sphere in spherical radii"
)
lam = _ParameterDS(
default=1.0, description="Radius of the cylinder in spherical radii"
)
def _mu_validator(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
mu._validator = _mu_validator
def _lam_validator(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
lam._validator = _lam_validator
class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
lam = _ParameterDS(default=1)
class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical):
r"""
Cylindrical equal area projection - sky to pixel.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = _ParameterDS(default=1)
class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical):
r"""
Plate carrée projection - pixel to sky.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= y
"""
@staticmethod
def evaluate(x, y):
# The intermediate variables are only used here for clarity
phi = np.array(x)
theta = np.array(y)
return phi, theta
class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi)
y = np.array(theta)
return x, y
class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical):
r"""
Mercator - pixel to sky.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical):
r"""
Mercator - sky to pixel.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
class PseudoCylindrical(Projection):
r"""Base class for pseudocylindrical projections.
Pseudocylindrical projections are like cylindrical projections
except the parallels of latitude are projected at diminishing
lengths toward the polar regions in order to reduce lateral
distortion there. Consequently, the meridians are curved.
"""
_separable = True
class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - pixel to sky.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\cos y} \\
\theta &= y
"""
class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical):
r"""
Parabolic projection - pixel to sky.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
\phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\
\theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right)
"""
class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical):
r"""
Parabolic projection - sky to pixel.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\
y &= 180^\circ \sin \frac{\theta}{3}
"""
class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(
\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right)
+ \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}
\right)
"""
class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical):
r"""
Molleweide's projection - sky to pixel.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\
y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma
where :math:`\gamma` is defined as the solution of the
transcendental equation:
.. math::
\sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi}
"""
class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
class Conic(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic):
r"""
Colles' conic perspective projection - pixel to sky.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic):
r"""
Colles' conic perspective projection - sky to pixel.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic):
r"""
Alber's conic equal area projection - pixel to sky.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic):
r"""
Conic equidistant projection - pixel to sky.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic):
r"""
Conic equidistant projection - sky to pixel.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic):
r"""
Conic orthomorphic projection - pixel to sky.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class PseudoConic(Projection):
r"""Base class for pseudoconic projections.
Pseudoconics are a subclass of conics with concentric parallels.
"""
class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - pixel to sky.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\
\theta &= Y_0 - R_\theta
where:
.. math::
R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\
A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right)
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - sky to pixel.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
x &= R_\theta \sin A_\phi \\
y &= -R_\theta \cos A_\phi + Y_0
where:
.. math::
A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\
R_\theta &= Y_0 - \theta \\
Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Bonne conformal latitude, in degrees",
)
class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic):
r"""
Polyconic projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class QuadCube(Projection):
r"""Base class for quad cube projections.
Quadrilateralized spherical cube (quad-cube) projections belong to
the class of polyhedral projections in which the sphere is
projected onto the surface of an enclosing polyhedron.
The six faces of the quad-cube projections are numbered and laid
out as::
0
4 3 2 1 4 3 2
5
"""
class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class HEALPix(Projection):
r"""Base class for HEALPix projections."""
class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix):
r"""
HEALPix - pixel to sky.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(
default=4.0, description="The number of facets in longitude direction."
)
X = _ParameterDS(
default=3.0, description="The number of facets in latitude direction."
)
class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix):
r"""
HEALPix projection - sky to pixel.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(
default=4.0, description="The number of facets in longitude direction."
)
X = _ParameterDS(
default=3.0, description="The number of facets in latitude direction."
)
class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class AffineTransformation2D(Model):
"""
Perform an affine transformation in 2 dimensions.
Parameters
----------
matrix : array
A 2x2 matrix specifying the linear transformation to apply to the
inputs
translation : array
A 2D vector (given as either a 2x1 or 1x2 array) specifying a
translation to apply to the inputs
"""
n_inputs = 2
n_outputs = 2
standard_broadcasting = False
_separable = False
matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]])
translation = Parameter(default=[0.0, 0.0])
def _matrix_validator(self, value):
"""Validates that the input matrix is a 2x2 2D array."""
if np.shape(value) != (2, 2):
raise InputParameterError(
"Expected transformation matrix to be a 2x2 array"
)
matrix._validator = _matrix_validator
def _translation_validator(self, value):
"""
Validates that the translation vector is a 2D vector. This allows
either a "row" vector or a "column" vector where in the latter case the
resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``.
"""
if not (
(np.ndim(value) == 1 and np.shape(value) == (2,))
or (np.ndim(value) == 2 and np.shape(value) == (1, 2))
):
raise InputParameterError(
"Expected translation vector to be a 2 element row or column "
"vector array"
)
translation._validator = _translation_validator
def __init__(self, matrix=matrix, translation=translation, **kwargs):
super().__init__(matrix=matrix, translation=translation, **kwargs)
self.inputs = ("x", "y")
self.outputs = ("x", "y")
@property
def inverse(self):
"""
Inverse transformation.
Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted.
"""
det = np.linalg.det(self.matrix.value)
if det == 0:
raise InputParameterError(
f"Transformation matrix is singular; {self.__class__.__name__} model"
" does not have an inverse"
)
matrix = np.linalg.inv(self.matrix.value)
if self.matrix.unit is not None:
matrix = matrix * self.matrix.unit
# If matrix has unit then translation has unit, so no need to assign it.
translation = -np.dot(matrix, self.translation.value)
return self.__class__(matrix=matrix, translation=translation)
@classmethod
def evaluate(cls, x, y, matrix, translation):
"""
Apply the transformation to a set of 2D Cartesian coordinates given as
two lists--one for the x coordinates and one for a y coordinates--or a
single coordinate pair.
Parameters
----------
x, y : array, float
x and y coordinates
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
shape = x.shape or (1,)
# Use asarray to ensure loose the units.
inarr = np.vstack(
[np.asarray(x).ravel(), np.asarray(y).ravel(), np.ones(x.size, x.dtype)]
)
if inarr.shape[0] != 3 or inarr.ndim != 2:
raise ValueError("Incompatible input shapes")
augmented_matrix = cls._create_augmented_matrix(matrix, translation)
result = np.dot(augmented_matrix, inarr)
x, y = result[0], result[1]
x.shape = y.shape = shape
return x, y
@staticmethod
def _create_augmented_matrix(matrix, translation):
unit = None
if any([hasattr(translation, "unit"), hasattr(matrix, "unit")]):
if not all([hasattr(translation, "unit"), hasattr(matrix, "unit")]):
raise ValueError(
"To use AffineTransformation with quantities, "
"both matrix and unit need to be quantities."
)
unit = translation.unit
# matrix should have the same units as translation
if not (matrix.unit / translation.unit) == u.dimensionless_unscaled:
raise ValueError("matrix and translation must have the same units.")
augmented_matrix = np.empty((3, 3), dtype=float)
augmented_matrix[0:2, 0:2] = matrix
augmented_matrix[0:2, 2:].flat = translation
augmented_matrix[2] = [0, 0, 1]
if unit is not None:
return augmented_matrix * unit
return augmented_matrix
@property
def input_units(self):
translation_unit = self.translation.input_unit
matrix_unit = self.matrix.input_unit
if translation_unit is None and matrix_unit is None:
return None
elif translation_unit is not None:
return dict(zip(self.inputs, [translation_unit] * 2))
else:
return dict(zip(self.inputs, [matrix_unit] * 2))
for long_name, short_name in _PROJ_NAME_CODE:
# define short-name projection equivalent classes:
globals()["Pix2Sky_" + short_name] = globals()["Pix2Sky_" + long_name]
globals()["Sky2Pix_" + short_name] = globals()["Sky2Pix_" + long_name]
# set inverse classes:
globals()["Pix2Sky_" + long_name]._inv_cls = globals()["Sky2Pix_" + long_name]
globals()["Sky2Pix_" + long_name]._inv_cls = globals()["Pix2Sky_" + long_name]
|
0b28dfe2e702282c84fb0e751da0856e9a7d1684d70e4f87219361da76815872 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains models representing polynomials and polynomial series.
"""
# pylint: disable=invalid-name
from math import comb
import numpy as np
from astropy.utils import check_broadcast, indent
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
from .utils import _validate_domain_window, poly_map_domain
__all__ = [
"Chebyshev1D",
"Chebyshev2D",
"Hermite1D",
"Hermite2D",
"InverseSIP",
"Legendre1D",
"Legendre2D",
"Polynomial1D",
"Polynomial2D",
"SIP",
"OrthoPolynomialBase",
"PolynomialModel",
]
class PolynomialBase(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
class PolynomialModel(PolynomialBase):
"""
Base class for polynomial models.
Its main purpose is to determine how many coefficients are needed
based on the polynomial order and dimension and to provide their
default values, names and ordering.
"""
def __init__(
self, degree, n_models=None, model_set_axis=None, name=None, meta=None, **params
):
self._degree = degree
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
@property
def degree(self):
"""Degree of polynomial."""
return self._degree
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one parameter set.
"""
if self.degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
nmixed = comb(self.degree, ndim)
else:
nmixed = 0
numc = self.degree * ndim + nmixed + 1
return numc
def _invlex(self):
c = []
lencoeff = self.degree + 1
for i in range(lencoeff):
for j in range(lencoeff):
if i + j <= self.degree:
c.append((j, i))
return c[::-1]
def _generate_coeff_names(self, ndim):
names = []
if ndim == 1:
for n in range(self._order):
names.append(f"c{n}")
else:
for i in range(self.degree + 1):
names.append(f"c{i}_{0}")
for i in range(1, self.degree + 1):
names.append(f"c{0}_{i}")
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j < self.degree + 1:
names.append(f"c{i}_{j}")
return tuple(names)
class _PolyDomainWindow1D(PolynomialModel):
"""
This class sets ``domain`` and ``window`` of 1D polynomials.
"""
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree, n_models, model_set_axis, name=name, meta=meta, **params
)
self._set_default_domain_window(domain, window)
@property
def window(self):
return self._window
@window.setter
def window(self, val):
self._window = _validate_domain_window(val)
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, val):
self._domain = _validate_domain_window(val)
def _set_default_domain_window(self, domain, window):
"""
This method sets the ``domain`` and ``window`` attributes on 1D subclasses.
"""
self._default_domain_window = {"domain": None, "window": (-1, 1)}
self.window = window or (-1, 1)
self.domain = domain
def __repr__(self):
return self._format_repr(
[self.degree],
kwargs={"domain": self.domain, "window": self.window},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[("Degree", self.degree), ("Domain", self.domain), ("Window", self.window)],
self._default_domain_window,
)
class OrthoPolynomialBase(PolynomialBase):
"""
This is a base class for the 2D Chebyshev and Legendre models.
The polynomials implemented here require a maximum degree in x and y.
For explanation of ``x_domain``, ``y_domain``, ```x_window`` and ```y_window``
see :ref:`Notes regarding usage of domain and window <astropy:domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
x_window : tuple or None, optional
range of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
y_window : tuple or None, optional
range of the y independent variable
**params : dict
{keyword: value} pairs, representing {parameter_name: value}
"""
n_inputs = 2
n_outputs = 1
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
self.x_degree = x_degree
self.y_degree = y_degree
self._order = self.get_num_coeff()
# Set the ``x/y_domain`` and ``x/y_wndow`` attributes in subclasses.
self._default_domain_window = {
"x_window": (-1, 1),
"y_window": (-1, 1),
"x_domain": None,
"y_domain": None,
}
self.x_window = x_window or self._default_domain_window["x_window"]
self.y_window = y_window or self._default_domain_window["y_window"]
self.x_domain = x_domain
self.y_domain = y_domain
self._param_names = self._generate_coeff_names()
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
@property
def x_domain(self):
return self._x_domain
@x_domain.setter
def x_domain(self, val):
self._x_domain = _validate_domain_window(val)
@property
def y_domain(self):
return self._y_domain
@y_domain.setter
def y_domain(self, val):
self._y_domain = _validate_domain_window(val)
@property
def x_window(self):
return self._x_window
@x_window.setter
def x_window(self, val):
self._x_window = _validate_domain_window(val)
@property
def y_window(self):
return self._y_window
@y_window.setter
def y_window(self, val):
self._y_window = _validate_domain_window(val)
def __repr__(self):
return self._format_repr(
[self.x_degree, self.y_degree],
kwargs={
"x_domain": self.x_domain,
"y_domain": self.y_domain,
"x_window": self.x_window,
"y_window": self.y_window,
},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[
("X_Degree", self.x_degree),
("Y_Degree", self.y_degree),
("X_Domain", self.x_domain),
("Y_Domain", self.y_domain),
("X_Window", self.x_window),
("Y_Window", self.y_window),
],
self._default_domain_window,
)
def get_num_coeff(self):
"""
Determine how many coefficients are needed.
Returns
-------
numc : int
number of coefficients
"""
if self.x_degree < 0 or self.y_degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
return (self.x_degree + 1) * (self.y_degree + 1)
def _invlex(self):
# TODO: This is a very slow way to do this; fix it and related methods
# like _alpha
c = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
c.append((i, j))
return np.array(c[::-1])
def invlex_coeff(self, coeffs):
invlex_coeffs = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
name = f"c{i}_{j}"
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return np.array(invlex_coeffs[::-1])
def _alpha(self):
invlexdeg = self._invlex()
invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1
nx = self.x_degree + 1
ny = self.y_degree + 1
alpha = np.zeros((ny * nx + 3, ny + nx))
for n in range(len(invlexdeg)):
alpha[n][invlexdeg[n]] = [1, 1]
alpha[-2, 0] = 1
alpha[-3, nx] = 1
return alpha
def imhorner(self, x, y, coeff):
_coeff = list(coeff)
_coeff.extend([0, 0, 0])
alpha = self._alpha()
r0 = _coeff[0]
nalpha = len(alpha)
karr = np.diff(alpha, axis=0)
kfunc = self._fcache(x, y)
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
nterms = x_terms + y_terms
for n in range(1, nterms + 1 + 3):
setattr(self, "r" + str(n), 0.0)
for n in range(1, nalpha):
k = karr[n - 1].nonzero()[0].max() + 1
rsum = 0
for i in range(1, k + 1):
rsum = rsum + getattr(self, "r" + str(i))
val = kfunc[k - 1] * (r0 + rsum)
setattr(self, "r" + str(k), val)
r0 = _coeff[n]
for i in range(1, k):
setattr(self, "r" + str(i), 0.0)
result = r0
for i in range(1, nterms + 1 + 3):
result = result + getattr(self, "r" + str(i))
return result
def _generate_coeff_names(self):
names = []
for j in range(self.y_degree + 1):
for i in range(self.x_degree + 1):
names.append(f"c{i}_{j}")
return tuple(names)
def _fcache(self, x, y):
"""
Computation and store the individual functions.
To be implemented by subclasses"
"""
raise NotImplementedError("Subclasses should implement this")
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
return self.imhorner(x, y, invcoeff)
def prepare_inputs(self, x, y, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), broadcasted_shapes
class Chebyshev1D(_PolyDomainWindow1D):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
For explanation of ```domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window.
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain=domain,
window=window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
class Hermite1D(_PolyDomainWindow1D):
r"""
Univariate Hermite series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x)
where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind").
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units,
4x^2 and -2 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = 2 * x
for i in range(2, self.degree + 1):
v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
x2 = x * 2
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
temp = c0
nd = nd - 1
c0 = coeffs[-i] - c1 * (2 * (nd - 1))
c1 = temp + c1 * x2
return c0 + c1 * x2
class Hermite2D(OrthoPolynomialBase):
r"""
Bivariate Hermite series.
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y)
where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x and/or y - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Hermite polynomial (H2) is 4x^2-2, but if x was
specified with units, 4x^2 and -2 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Hermite functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = 2 * x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = 2 * y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Hermite polynomials:
.. math::
H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._hermderiv1d(x, self.x_degree + 1).T
y_deriv = self._hermderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _hermderiv1d(self, x, deg):
"""
Derivative of 1D Hermite series.
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x2
for i in range(2, deg + 1):
d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre1D(_PolyDomainWindow1D):
r"""
Univariate Legendre series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_i(x)`` is the corresponding Legendre polynomial.
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def clenshaw(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
nd = nd - 1
c0 = coeffs[-i] - (c1 * (nd - 1)) / nd
c1 = tmp + (c1 * x * (2 * nd - 1)) / nd
return c0 + c1 * x
class Polynomial1D(_PolyDomainWindow1D):
r"""
1D Polynomial model.
It is defined as:
.. math::
P = \sum_{i=0}^{i=n}C_{i} * x^{i}
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
If None, it is set to (-1, 1)
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
# Set domain separately because it's different from
# the orthogonal polynomials.
self._default_domain_window = {
"domain": (-1, 1),
"window": (-1, 1),
}
self.domain = domain or self._default_domain_window["domain"]
self.window = window or self._default_domain_window["window"]
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.horner(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=float)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def horner(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[-1] * np.ones_like(x, subok=False)
else:
c0 = coeffs[-1]
for i in range(2, len(coeffs) + 1):
c0 = coeffs[-i] + c0 * x
return c0
@property
def input_units(self):
if self.degree == 0 or self.c1.input_unit is None:
return None
else:
return {self.inputs[0]: self.c0.input_unit / self.c1.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
par = getattr(self, f"c{i}")
mapping[par.name] = (
outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i
)
return mapping
class Polynomial2D(PolynomialModel):
r"""
2D Polynomial model.
Represents a general polynomial of degree n:
.. math::
P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n
+ c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
Polynomial degree: largest sum of exponents (:math:`i + j`) of
variables in each monomial term of the form :math:`x^i y^j`. The
number of terms in a 2D polynomial of degree ``n`` is given by binomial
coefficient :math:`C(n + 2, 2) = (n + 2)! / (2!\,n!) = (n + 1)(n + 2) / 2`.
x_domain : tuple or None, optional
domain of the x independent variable
If None, it is set to (-1, 1)
y_domain : tuple or None, optional
domain of the y independent variable
If None, it is set to (-1, 1)
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the x_domain to x_window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the y_domain to y_window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 2
n_outputs = 1
_separable = False
def __init__(
self,
degree,
x_domain=None,
y_domain=None,
x_window=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
self._default_domain_window = {
"x_domain": (-1, 1),
"y_domain": (-1, 1),
"x_window": (-1, 1),
"y_window": (-1, 1),
}
self.x_domain = x_domain or self._default_domain_window["x_domain"]
self.y_domain = y_domain or self._default_domain_window["y_domain"]
self.x_window = x_window or self._default_domain_window["x_window"]
self.y_window = y_window or self._default_domain_window["y_window"]
def prepare_inputs(self, x, y, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
return (x, y), broadcasted_shapes
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
result = self.multivariate_horner(x, y, invcoeff)
# Special case for degree==0 to ensure that the shape of the output is
# still as expected by the broadcasting rules, even though the x and y
# inputs are not used in the evaluation
if self.degree == 0:
output_shape = check_broadcast(np.shape(coeffs[0]), x.shape)
if output_shape:
new_result = np.empty(output_shape)
new_result[:] = result
result = new_result
return result
def __repr__(self):
return self._format_repr(
[self.degree],
kwargs={
"x_domain": self.x_domain,
"y_domain": self.y_domain,
"x_window": self.x_window,
"y_window": self.y_window,
},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[
("Degree", self.degree),
("X_Domain", self.x_domain),
("Y_Domain", self.y_domain),
("X_Window", self.x_window),
("Y_Window", self.y_window),
],
self._default_domain_window,
)
def fit_deriv(self, x, y, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.ndim == 2:
x = x.flatten()
if y.ndim == 2:
y = y.flatten()
if x.size != y.size:
raise ValueError("Expected x and y to be of equal size")
designx = x[:, None] ** np.arange(self.degree + 1)
designy = y[:, None] ** np.arange(1, self.degree + 1)
designmixed = []
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j <= self.degree:
designmixed.append((x**i) * (y**j))
designmixed = np.array(designmixed).T
if designmixed.any():
v = np.hstack([designx, designy, designmixed])
else:
v = np.hstack([designx, designy])
return v
def invlex_coeff(self, coeffs):
invlex_coeffs = []
lencoeff = range(self.degree + 1)
for i in lencoeff:
for j in lencoeff:
if i + j <= self.degree:
name = f"c{j}_{i}"
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return invlex_coeffs[::-1]
def multivariate_horner(self, x, y, coeffs):
"""
Multivariate Horner's scheme.
Parameters
----------
x, y : array
coeffs : array
Coefficients in inverse lexical order.
"""
alpha = self._invlex()
r0 = coeffs[0]
r1 = r0 * 0.0
r2 = r0 * 0.0
karr = np.diff(alpha, axis=0)
for n in range(len(karr)):
if karr[n, 1] != 0:
r2 = y * (r0 + r1 + r2)
r1 = np.zeros_like(coeffs[0], subok=False)
else:
r1 = x * (r0 + r1)
r0 = coeffs[n + 1]
return r0 + r1 + r2
@property
def input_units(self):
if self.degree == 0 or (
self.c1_0.input_unit is None and self.c0_1.input_unit is None
):
return None
return {
self.inputs[0]: self.c0_0.input_unit / self.c1_0.input_unit,
self.inputs[1]: self.c0_0.input_unit / self.c0_1.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
for j in range(self.degree + 1):
if i + j > 2:
continue
par = getattr(self, f"c{i}_{j}")
mapping[par.name] = (
outputs_unit[self.outputs[0]]
/ inputs_unit[self.inputs[0]] ** i
/ inputs_unit[self.inputs[1]] ** j
)
return mapping
@property
def x_domain(self):
return self._x_domain
@x_domain.setter
def x_domain(self, val):
self._x_domain = _validate_domain_window(val)
@property
def y_domain(self):
return self._y_domain
@y_domain.setter
def y_domain(self, val):
self._y_domain = _validate_domain_window(val)
@property
def x_window(self):
return self._x_window
@x_window.setter
def x_window(self, val):
self._x_window = _validate_domain_window(val)
@property
def y_window(self):
return self._y_window
@y_window.setter
def y_window(self, val):
self._y_window = _validate_domain_window(val)
class Chebyshev2D(OrthoPolynomialBase):
r"""
Bivariate Chebyshev series..
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y)
where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x and/or y - since
the coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was
specified with units, 2x^2 and -1 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Chebyshev functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Chebyshev polynomials:
.. math::
T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._chebderiv1d(x, self.x_degree + 1).T
y_deriv = self._chebderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _chebderiv1d(self, x, deg):
"""
Derivative of 1D Chebyshev series.
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x
for i in range(2, deg + 1):
d[i] = d[i - 1] * x2 - d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre2D(OrthoPolynomialBase):
r"""
Bivariate Legendre series.
Defined as:
.. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y)
where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
Model formula:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_{i}`` is the corresponding Legendre polynomial.
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Legendre functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = (
(2 * (n - 1) + 1) * x * kfunc[n - 1] - (n - 1) * kfunc[n - 2]
) / n
for n in range(2, y_terms):
kfunc[n + x_terms] = (
(2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1]
- (n - 1) * kfunc[n + x_terms - 2]
) / (n)
return kfunc
def fit_deriv(self, x, y, *params):
"""Derivatives with respect to the coefficients.
This is an array with Legendre polynomials:
Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._legendderiv1d(x, self.x_degree + 1).T
y_deriv = self._legendderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _legendderiv1d(self, x, deg):
"""Derivative of 1D Legendre polynomial."""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1,) + x.shape, dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
d[1] = x
for i in range(2, deg + 1):
d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i
return np.rollaxis(d, 0, d.ndim)
class _SIP1D(PolynomialBase):
"""
This implements the Simple Imaging Polynomial Model (SIP) in 1D.
It's unlikely it will be used in 1D so this class is private
and SIP should be used instead.
"""
n_inputs = 2
n_outputs = 1
_separable = False
def __init__(
self,
order,
coeff_prefix,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
self.order = order
self.coeff_prefix = coeff_prefix
self._param_names = self._generate_coeff_names(coeff_prefix)
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def __repr__(self):
return self._format_repr(args=[self.order, self.coeff_prefix])
def __str__(self):
return self._format_str(
[("Order", self.order), ("Coeff. Prefix", self.coeff_prefix)]
)
def evaluate(self, x, y, *coeffs):
# TODO: Rewrite this so that it uses a simpler method of determining
# the matrix based on the number of given coefficients.
mcoef = self._coeff_matrix(self.coeff_prefix, coeffs)
return self._eval_sip(x, y, mcoef)
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one param set.
"""
if self.order < 2 or self.order > 9:
raise ValueError("Degree of polynomial must be 2< deg < 9")
nmixed = comb(self.order, ndim)
# remove 3 terms because SIP deg >= 2
numc = self.order * ndim + nmixed - 2
return numc
def _generate_coeff_names(self, coeff_prefix):
names = []
for i in range(2, self.order + 1):
names.append(f"{coeff_prefix}_{i}_{0}")
for i in range(2, self.order + 1):
names.append(f"{coeff_prefix}_{0}_{i}")
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
names.append(f"{coeff_prefix}_{i}_{j}")
return tuple(names)
def _coeff_matrix(self, coeff_prefix, coeffs):
mat = np.zeros((self.order + 1, self.order + 1))
for i in range(2, self.order + 1):
attr = f"{coeff_prefix}_{i}_{0}"
mat[i, 0] = coeffs[self.param_names.index(attr)][0]
for i in range(2, self.order + 1):
attr = f"{coeff_prefix}_{0}_{i}"
mat[0, i] = coeffs[self.param_names.index(attr)][0]
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
attr = f"{coeff_prefix}_{i}_{j}"
mat[i, j] = coeffs[self.param_names.index(attr)][0]
return mat
def _eval_sip(self, x, y, coef):
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if self.coeff_prefix == "A":
result = np.zeros(x.shape)
else:
result = np.zeros(y.shape)
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
if 1 < i + j < self.order + 1:
result = result + coef[i, j] * x**i * y**j
return result
class SIP(Model):
"""
Simple Imaging Polynomial (SIP) model.
The SIP convention is used to represent distortions in FITS image headers.
See [1]_ for a description of the SIP convention.
Parameters
----------
crpix : list or (2,) ndarray
CRPIX values
a_order : int
SIP polynomial order for first axis
b_order : int
SIP order for second axis
a_coeff : dict
SIP coefficients for first axis
b_coeff : dict
SIP coefficients for the second axis
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
References
----------
.. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005
<https://ui.adsabs.harvard.edu/abs/2005ASPC..347..491S>`_
"""
n_inputs = 2
n_outputs = 2
_separable = False
def __init__(
self,
crpix,
a_order,
b_order,
a_coeff={},
b_coeff={},
ap_order=None,
bp_order=None,
ap_coeff={},
bp_coeff={},
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
self._crpix = crpix
self._a_order = a_order
self._b_order = b_order
self._a_coeff = a_coeff
self._b_coeff = b_coeff
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
self.shift_a = Shift(-crpix[0])
self.shift_b = Shift(-crpix[1])
self.sip1d_a = _SIP1D(
a_order,
coeff_prefix="A",
n_models=n_models,
model_set_axis=model_set_axis,
**a_coeff,
)
self.sip1d_b = _SIP1D(
b_order,
coeff_prefix="B",
n_models=n_models,
model_set_axis=model_set_axis,
**b_coeff,
)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta
)
self._inputs = ("u", "v")
self._outputs = ("x", "y")
def __repr__(self):
return (
f"<{self.__class__.__name__}"
f"({[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]!r})>"
)
def __str__(self):
parts = [f"Model: {self.__class__.__name__}"]
for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]:
parts.append(indent(str(model), width=4))
parts.append("")
return "\n".join(parts)
@property
def inverse(self):
if self._ap_order is not None and self._bp_order is not None:
return InverseSIP(
self._ap_order, self._bp_order, self._ap_coeff, self._bp_coeff
)
else:
raise NotImplementedError("SIP inverse coefficients are not available.")
def evaluate(self, x, y):
u = self.shift_a.evaluate(x, *self.shift_a.param_sets)
v = self.shift_b.evaluate(y, *self.shift_b.param_sets)
f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets)
g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets)
return f, g
class InverseSIP(Model):
"""
Inverse Simple Imaging Polynomial.
Parameters
----------
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
"""
n_inputs = 2
n_outputs = 2
_separable = False
def __init__(
self,
ap_order,
bp_order,
ap_coeff={},
bp_coeff={},
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
# define the 0th term in order to use Polynomial2D
ap_coeff.setdefault("AP_0_0", 0)
bp_coeff.setdefault("BP_0_0", 0)
ap_coeff_params = {k.replace("AP_", "c"): v for k, v in ap_coeff.items()}
bp_coeff_params = {k.replace("BP_", "c"): v for k, v in bp_coeff.items()}
self.sip1d_ap = Polynomial2D(
degree=ap_order, model_set_axis=model_set_axis, **ap_coeff_params
)
self.sip1d_bp = Polynomial2D(
degree=bp_order, model_set_axis=model_set_axis, **bp_coeff_params
)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta
)
def __repr__(self):
return f"<{self.__class__.__name__}({[self.sip1d_ap, self.sip1d_bp]!r})>"
def __str__(self):
parts = [f"Model: {self.__class__.__name__}"]
for model in [self.sip1d_ap, self.sip1d_bp]:
parts.append(indent(str(model), width=4))
parts.append("")
return "\n".join(parts)
def evaluate(self, x, y):
x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets)
y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets)
return x1, y1
|
bc69742f78966e8cba11b9e15979d427aa00f2295219932147fdbb0e58a6aa28 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Models that have physical origins.
"""
# pylint: disable=invalid-name, no-member
import warnings
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
from .core import Fittable1DModel
from .parameters import InputParameterError, Parameter
__all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"]
class BlackBody(Fittable1DModel):
"""
Blackbody model using the Planck function.
Parameters
----------
temperature : `~astropy.units.Quantity` ['temperature']
Blackbody temperature.
scale : float or `~astropy.units.Quantity` ['dimensionless']
Scale factor. If dimensionless, input units will assumed
to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr).
If not dimensionless, must be equivalent to either
(erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr),
in which case the result will be returned in the requested units and
the scale will be stripped of units (with the float value applied).
Notes
-----
Model formula:
.. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody(temperature=5000*u.K)
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.53254685e-05 erg / (Hz s sr cm2)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav)
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a scale.
temperature = Parameter(
default=5000.0, min=0, unit=u.K, description="Blackbody temperature"
)
scale = Parameter(default=1.0, min=0, description="Scale factor")
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz or wavelengths
# in AA (depending on the choice of output units controlled by units on scale
# and stored in self._output_units during init).
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {"x": u.spectral()}
# Store the native units returned by B_nu equation
_native_units = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
# Store the base native output units. If scale is not dimensionless, it
# must be equivalent to one of these. If equivalent to SLAM, then
# input_units will expect AA for 'x', otherwise Hz.
_native_output_units = {
"SNU": u.erg / (u.cm**2 * u.s * u.Hz * u.sr),
"SLAM": u.erg / (u.cm**2 * u.s * u.AA * u.sr),
}
def __init__(self, *args, **kwargs):
scale = kwargs.get("scale", None)
# Support scale with non-dimensionless unit by stripping the unit and
# storing as self._output_units.
if hasattr(scale, "unit") and not scale.unit.is_equivalent(
u.dimensionless_unscaled
):
output_units = scale.unit
if not output_units.is_equivalent(
self._native_units, u.spectral_density(1 * u.AA)
):
raise ValueError(
"scale units not dimensionless or in "
f"surface brightness: {output_units}"
)
kwargs["scale"] = scale.value
self._output_units = output_units
else:
self._output_units = self._native_units
return super().__init__(*args, **kwargs)
def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
if not isinstance(x, u.Quantity):
# then we assume it has input_units which depends on the
# requested output units (either Hz or AA)
in_x = u.Quantity(x, self.input_units["x"])
else:
in_x = x
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
if not hasattr(scale, "unit"):
# during fitting, scale will be passed without units
# but we still need to convert from the input dimensionless
# to dimensionless unscaled
scale = scale * self.scale.unit
scale = scale.to(u.dimensionless_unscaled).value
# NOTE: scale is already stripped of any input units
y = scale * bb_nu.to(self._output_units, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
@property
def input_units(self):
# The input units are those of the 'x' value, which will depend on the
# units compatible with the expected output units.
if self._output_units.is_equivalent(self._native_output_units["SNU"]):
return {self.inputs[0]: u.Hz}
else:
# only other option is equivalent with SLAM
return {self.inputs[0]: u.AA}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"temperature": u.K}
@property
def bolometric_flux(self):
"""Bolometric flux."""
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
scale = self.scale.quantity.to(u.dimensionless_unscaled)
else:
scale = self.scale.value
# bolometric flux in the native units of the planck function
native_bolflux = scale * const.sigma_sb * self.temperature**4 / np.pi
# return in more "astro" units
return native_bolflux.to(u.erg / (u.cm**2 * u.s))
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
@property
def nu_max(self):
"""Peak frequency when the curve is expressed as power density."""
return 2.8214391 * const.k_B * self.temperature / const.h
class Drude1D(Fittable1DModel):
"""
Drude model based one the behavior of electons in materials (esp. metals).
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
Model formula:
.. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Drude1D
fig, ax = plt.subplots()
# generate the curves and plot them
x = np.arange(7.5 , 12.5 , 0.1)
dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0)
ax.plot(x, dmodel(x))
ax.set_xlabel('x')
ax.set_ylabel('F(x)')
plt.show()
"""
amplitude = Parameter(default=1.0, description="Peak Value")
x_0 = Parameter(default=1.0, description="Position of the peak")
fwhm = Parameter(default=1.0, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""
One dimensional Drude model function.
"""
return (
amplitude
* ((fwhm / x_0) ** 2)
/ ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""
Drude1D model function derivatives.
"""
d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
d_x_0 = (
-2
* amplitude
* d_amplitude
* (
(1 / x_0)
+ d_amplitude
* (x_0**2 / fwhm**2)
* (
(-x / x_0 - 1 / x) * (x / x_0 - x_0 / x)
- (2 * fwhm**2 / x_0**3)
)
)
)
d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _x_0_validator(self, val):
"""Ensure `x_0` is not 0."""
if np.any(val == 0):
raise InputParameterError("0 is not an allowed value for x_0")
x_0._validator = _x_0_validator
def bounding_box(self, factor=50):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
class Plummer1D(Fittable1DModel):
r"""One dimensional Plummer density profile model.
Parameters
----------
mass : float
Total mass of cluster.
r_plum : float
Scale parameter which sets the size of the cluster core.
Notes
-----
Model formula:
.. math::
\rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2}
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P
"""
mass = Parameter(default=1.0, description="Total mass of cluster")
r_plum = Parameter(
default=1.0,
description="Scale parameter which sets the size of the cluster core",
)
@staticmethod
def evaluate(x, mass, r_plum):
"""
Evaluate plummer density profile model.
"""
return (
(3 * mass) / (4 * np.pi * r_plum**3) * (1 + (x / r_plum) ** 2) ** (-5 / 2)
)
@staticmethod
def fit_deriv(x, mass, r_plum):
"""
Plummer1D model derivatives.
"""
d_mass = 3 / ((4 * np.pi * r_plum**3) * (((x / r_plum) ** 2 + 1) ** (5 / 2)))
d_r_plum = (6 * mass * x**2 - 9 * mass * r_plum**2) / (
(4 * np.pi * r_plum**6) * (1 + (x / r_plum) ** 2) ** (7 / 2)
)
return [d_mass, d_r_plum]
@property
def input_units(self):
mass_unit = self.mass.input_unit
r_plum_unit = self.r_plum.input_unit
if mass_unit is None and r_plum_unit is None:
return None
return {self.inputs[0]: r_plum_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mass": outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3,
"r_plum": inputs_unit[self.inputs[0]],
}
class NFW(Fittable1DModel):
r"""
Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter.
Parameters
----------
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
massfactor : tuple or str
Mass overdensity factor and type for provided profiles:
Tuple version:
("virial",) : virial radius
("critical", N) : radius where density is N times that of the critical density
("mean", N) : radius where density is N times that of the mean density
String version:
"virial" : virial radius
"Nc" : radius where density is N times that of the critical density (e.g. "200c")
"Nm" : radius where density is N times that of the mean density (e.g. "500m")
cosmo : :class:`~astropy.cosmology.Cosmology`
Background cosmology for density calculation. If None, the default cosmology will be used.
Notes
-----
Model formula:
.. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2}
References
----------
.. [1] https://arxiv.org/pdf/astro-ph/9508025
.. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile
.. [3] https://en.wikipedia.org/wiki/Virial_mass
"""
# Model Parameters
# NFW Profile mass
mass = Parameter(
default=1.0,
min=1.0,
unit=u.M_sun,
description="Peak mass within specified overdensity radius",
)
# NFW profile concentration
concentration = Parameter(default=1.0, min=1.0, description="Concentration")
# NFW Profile redshift
redshift = Parameter(default=0.0, min=0.0, description="Redshift")
# We allow values without units to be passed when evaluating the model, and
# in this case the input r values are assumed to be lengths / positions in kpc.
_input_units_allow_dimensionless = True
def __init__(
self,
mass=u.Quantity(mass.default, mass.unit),
concentration=concentration.default,
redshift=redshift.default,
massfactor=("critical", 200),
cosmo=None,
**kwargs,
):
# Set default cosmology
if cosmo is None:
# LOCAL
from astropy.cosmology import default_cosmology
cosmo = default_cosmology.get()
# Set mass overdensity type and factor
self._density_delta(massfactor, cosmo, redshift)
# Establish mass units for density calculation (default solar masses)
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Obtain scale radius
self._radius_s(mass, concentration)
# Obtain scale density
self._density_s(mass, concentration)
super().__init__(
mass=in_mass, concentration=concentration, redshift=redshift, **kwargs
)
def evaluate(self, r, mass, concentration, redshift):
"""
One dimensional NFW profile function.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of density to be calculated for the NFW profile.
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
Returns
-------
density : float or `~astropy.units.Quantity` ['density']
NFW profile mass density at location ``r``. The density units are:
[``mass`` / ``r`` ^3]
Notes
-----
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Create radial version of input with dimension
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Define reduced radius (r / r_{\\rm s})
# also update scale radius
radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit)
# Density distribution
# \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2}
# also update scale density
density = self._density_s(mass, concentration) / (
radius_reduced * (u.Quantity(1.0) + radius_reduced) ** 2
)
if hasattr(mass, "unit"):
return density
else:
return density.value
def _density_delta(self, massfactor, cosmo, redshift):
"""
Calculate density delta.
"""
# Set mass overdensity type and factor
if isinstance(massfactor, tuple):
# Tuple options
# ("virial") : virial radius
# ("critical", N) : radius where density is N that of the critical density
# ("mean", N) : radius where density is N that of the mean density
if massfactor[0].lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor[0].lower()
elif massfactor[0].lower() == "critical":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = "c"
elif massfactor[0].lower() == "mean":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = "m"
else:
raise ValueError(
f"Massfactor '{massfactor[0]}' not one of 'critical', "
"'mean', or 'virial'"
)
else:
try:
# String options
# virial : virial radius
# Nc : radius where density is N that of the critical density
# Nm : radius where density is N that of the mean density
if massfactor.lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor.lower()
elif massfactor[-1].lower() == "c" or massfactor[-1].lower() == "m":
# Critical or Mean Overdensity Mass
delta = float(massfactor[0:-1])
masstype = massfactor[-1].lower()
else:
raise ValueError(
f"Massfactor {massfactor} string not of the form "
"'#m', '#c', or 'virial'"
)
except (AttributeError, TypeError):
raise TypeError(f"Massfactor {massfactor} not a tuple or string")
# Set density from masstype specification
if masstype == "virial":
Om_c = cosmo.Om(redshift) - 1.0
d_c = 18.0 * np.pi**2 + 82.0 * Om_c - 39.0 * Om_c**2
self.density_delta = d_c * cosmo.critical_density(redshift)
elif masstype == "c":
self.density_delta = delta * cosmo.critical_density(redshift)
elif masstype == "m":
self.density_delta = (
delta * cosmo.critical_density(redshift) * cosmo.Om(redshift)
)
return self.density_delta
@staticmethod
def A_NFW(y):
r"""
Dimensionless volume integral of the NFW profile, used as an intermediate step in some
calculations for this model.
Notes
-----
Model formula:
.. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}]
"""
return np.log(1.0 + y) - (y / (1.0 + y))
def _density_s(self, mass, concentration):
"""
Calculate scale density of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Calculate scale density
# M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right].
self.density_s = in_mass / (
4.0
* np.pi
* self._radius_s(in_mass, concentration) ** 3
* self.A_NFW(concentration)
)
return self.density_s
@property
def rho_scale(self):
r"""
Scale density of the NFW profile. Often written in the literature as :math:`\rho_s`.
"""
return self.density_s
def _radius_s(self, mass, concentration):
"""
Calculate scale radius of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Delta Mass is related to delta radius by
# M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c}
# And delta radius is related to the NFW scale radius by
# c = R / r_{\\rm s}
self.radius_s = (
((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** (1.0 / 3.0)
) / concentration
# Set radial units to kiloparsec by default (unit will be rescaled by units of radius
# in evaluate)
return self.radius_s.to(u.kpc)
@property
def r_s(self):
"""
Scale radius of the NFW profile.
"""
return self.radius_s
@property
def r_virial(self):
"""
Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.).
"""
return self.r_s * self.concentration
@property
def r_max(self):
"""
Radius of maximum circular velocity.
"""
return self.r_s * 2.16258
@property
def v_max(self):
"""
Maximum circular velocity.
"""
return self.circular_velocity(self.r_max)
def circular_velocity(self, r):
r"""
Circular velocities of the NFW profile.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of velocity to be calculated for the NFW profile.
Returns
-------
velocity : float or `~astropy.units.Quantity` ['speed']
NFW profile circular velocity at location ``r``. The velocity units are:
[km / s]
Notes
-----
Model formula:
.. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
.. math:: x = r/r_s
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Enforce default units (if parameters are without units)
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir)
v_profile = np.sqrt(
self.mass
* const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2))
/ self.r_virial
)
# Define reduced radius (r / r_{\\rm s})
reduced_radius = in_r / self.r_virial.to(in_r.unit)
# Circular velocity given by:
# v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
# where x=r/r_{200}
velocity = np.sqrt(
(v_profile**2 * self.A_NFW(self.concentration * reduced_radius))
/ (reduced_radius * self.A_NFW(self.concentration))
)
return velocity.to(u.km / u.s)
@property
def input_units(self):
# The units for the 'r' variable should be a length (default kpc)
return {self.inputs[0]: u.kpc}
@property
def return_units(self):
# The units for the 'density' variable should be a matter density (default M_sun / kpc^3)
if self.mass.unit is None:
return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3}
else:
return {
self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"mass": u.M_sun, "concentration": None, "redshift": None}
|
c70b88229a4b555a17242e466cd1c59f451b46e8446f0f37d831f94d33435645 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Power law model variants.
"""
# pylint: disable=invalid-name
import numpy as np
from astropy.units import Magnitude, Quantity, UnitsError, dimensionless_unscaled, mag
from .core import Fittable1DModel
from .parameters import InputParameterError, Parameter
__all__ = [
"PowerLaw1D",
"BrokenPowerLaw1D",
"SmoothlyBrokenPowerLaw1D",
"ExponentialCutoffPowerLaw1D",
"LogParabola1D",
"Schechter1D",
]
class PowerLaw1D(Fittable1DModel):
"""
One dimensional power law model.
Parameters
----------
amplitude : float
Model amplitude at the reference point
x_0 : float
Reference point
alpha : float
Power law index
See Also
--------
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha}
"""
amplitude = Parameter(default=1, description="Peak value at the reference point")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
@staticmethod
def evaluate(x, amplitude, x_0, alpha):
"""One dimensional power law model function."""
xx = x / x_0
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
"""One dimensional power law derivative with respect to parameters."""
xx = x / x_0
d_amplitude = xx ** (-alpha)
d_x_0 = amplitude * alpha * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
return [d_amplitude, d_x_0, d_alpha]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class BrokenPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with a break.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for x < x_break.
alpha_2 : float
Power law index for x > x_break.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1`
for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``):
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\
A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Peak value at break point")
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=1, description="Power law index before break point")
alpha_2 = Parameter(default=1, description="Power law index after break point")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law model function."""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law derivative with respect to parameters."""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
d_amplitude = xx ** (-alpha)
d_x_break = amplitude * alpha * d_amplitude / x_break
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_alpha_1 = np.where(x < x_break, d_alpha, 0)
d_alpha_2 = np.where(x >= x_break, d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
@property
def input_units(self):
if self.x_break.input_unit is None:
return None
return {self.inputs[0]: self.x_break.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_break": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class SmoothlyBrokenPowerLaw1D(Fittable1DModel):
"""One dimensional smoothly broken power law model.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for ``x << x_break``.
alpha_2 : float
Power law index for ``x >> x_break``.
delta : float
Smoothness parameter.
See Also
--------
BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for
``x_break``, :math:`\\alpha_1` for ``alpha_1``,
:math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for
``delta``):
.. math::
f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1}
\\left\\{
\\frac{1}{2}
\\left[
1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta}
\\right]
\\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta}
The change of slope occurs between the values :math:`x_1`
and :math:`x_2` such that:
.. math::
\\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1}
\\sim \\Delta
At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the
model is approximately a simple power law with index
:math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two
power laws are smoothly joined at values :math:`x_1 < x < x_2`,
hence the :math:`\\Delta` parameter sets the "smoothness" of the
slope change.
The ``delta`` parameter is bounded to values greater than 1e-3
(corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid
overflow errors.
The ``amplitude`` parameter is bounded to positive values since
this model is typically used to represent positive quantities.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
x = np.logspace(0.7, 2.3, 500)
f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20,
alpha_1=-2, alpha_2=2)
plt.figure()
plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2")
f.delta = 0.5
plt.loglog(x, f(x), '--', label='delta=0.5')
f.delta = 0.3
plt.loglog(x, f(x), '-.', label='delta=0.3')
f.delta = 0.1
plt.loglog(x, f(x), label='delta=0.1')
plt.axis([x.min(), x.max(), 0.1, 1.1])
plt.legend(loc='lower center')
plt.grid(True)
plt.show()
"""
amplitude = Parameter(
default=1, min=0, description="Peak value at break point", mag=True
)
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=-2, description="Power law index before break point")
alpha_2 = Parameter(default=2, description="Power law index after break point")
delta = Parameter(default=1, min=1.0e-3, description="Smoothness Parameter")
def _amplitude_validator(self, value):
if np.any(value <= 0):
raise InputParameterError("amplitude parameter must be > 0")
amplitude._validator = _amplitude_validator
def _delta_validator(self, value):
if np.any(value < 0.001):
raise InputParameterError("delta parameter must be >= 0.001")
delta._validator = _delta_validator
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law model function."""
# Pre-calculate `x/x_b`
xx = x / x_break
# Initialize the return value
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
# The quantity `t = (x / x_b)^(1 / delta)` can become quite
# large. To avoid overflow errors we will start by calculating
# its natural logarithm:
logt = np.log(xx) / delta
# When `t >> 1` or `t << 1` we don't actually need to compute
# the `t` value since the main formula (see docstring) can be
# significantly simplified by neglecting `1` or `t`
# respectively. In the following we will check whether `t` is
# much greater, much smaller, or comparable to 1 by comparing
# the `logt` value with an appropriate threshold.
threshold = 30 # corresponding to exp(30) ~ 1e13
i = logt > threshold
if i.max():
# In this case the main formula reduces to a simple power
# law with index `alpha_2`.
f[i] = (
amplitude * xx[i] ** (-alpha_2) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
i = logt < -threshold
if i.max():
# In this case the main formula reduces to a simple power
# law with index `alpha_1`.
f[i] = (
amplitude * xx[i] ** (-alpha_1) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
i = np.abs(logt) <= threshold
if i.max():
# In this case the `t` value is "comparable" to 1, hence we
# we will evaluate the whole formula.
t = np.exp(logt[i])
r = (1.0 + t) / 2.0
f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta)
if return_unit:
return Quantity(f, unit=return_unit, copy=False, subok=True)
return f
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law derivative with respect
to parameters.
"""
# Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in
# SmoothlyBrokenPowerLaw1D.evaluate)
xx = x / x_break
logt = np.log(xx) / delta
# Initialize the return values
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate)
i = logt > threshold
if i.max():
f[i] = (
amplitude * xx[i] ** (-alpha_2) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_2 / x_break
d_alpha_1[i] = f[i] * (-delta * np.log(2))
d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2))
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = logt < -threshold
if i.max():
f[i] = (
amplitude * xx[i] ** (-alpha_1) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_1 / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2))
d_alpha_2[i] = f[i] * delta * np.log(2)
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = np.abs(logt) <= threshold
if i.max():
t = np.exp(logt[i])
r = (1.0 + t) / 2.0
f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = (
f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2.0 / r) / x_break
)
d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r))
d_alpha_2[i] = f[i] * (-delta * np.log(r))
d_delta[i] = (
f[i]
* (alpha_1 - alpha_2)
* (np.log(r) - t / (1.0 + t) / delta * np.log(xx[i]))
)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta]
@property
def input_units(self):
if self.x_break.input_unit is None:
return None
return {self.inputs[0]: self.x_break.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_break": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class ExponentialCutoffPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with an exponential cutoff.
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
x_cutoff : float
Cutoff point
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff})
"""
amplitude = Parameter(default=1, description="Peak value of model")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
x_cutoff = Parameter(default=1, description="Cutoff point")
@staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law model function."""
xx = x / x_0
return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
"""
One dimensional exponential cutoff power law derivative with respect to parameters.
"""
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff**2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"x_cutoff": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class LogParabola1D(Fittable1DModel):
"""
One dimensional log parabola model (sometimes called curved power law).
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
beta : float
Power law curvature
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and
:math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``):
.. math:: f(x) = A \\left(
\\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}}
\\right )}}
"""
amplitude = Parameter(default=1, description="Peak value of model")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
beta = Parameter(default=0, description="Power law curvature")
@staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola model function."""
xx = x / x_0
exponent = -alpha - beta * np.log(xx)
return amplitude * xx**exponent
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola derivative with respect to parameters."""
xx = x / x_0
log_xx = np.log(xx)
exponent = -alpha - beta * log_xx
d_amplitude = xx**exponent
d_beta = -amplitude * d_amplitude * log_xx**2
d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)
d_alpha = -amplitude * d_amplitude * log_xx
return [d_amplitude, d_x_0, d_alpha, d_beta]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Schechter1D(Fittable1DModel):
r"""
Schechter luminosity function (`Schechter 1976
<https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract>`_),
parameterized in terms of magnitudes.
Parameters
----------
phi_star : float
The normalization factor in units of number density.
m_star : float
The characteristic magnitude where the power-law form of the
function cuts off.
alpha : float
The power law index, also known as the faint-end slope. Must not
have units.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`\phi^{*}` for ``phi_star``, :math:`M^{*}`
for ``m_star``, and :math:`\alpha` for ``alpha``):
.. math::
n(M) \ dM = (0.4 \ln 10) \ \phi^{*} \
[{10^{0.4 (M^{*} - M)}}]^{\alpha + 1} \
\exp{[-10^{0.4 (M^{*} - M)}]} \ dM
``phi_star`` is the normalization factor in units of number density.
``m_star`` is the characteristic magnitude where the power-law form
of the function cuts off into the exponential form. ``alpha`` is
the power-law index, defining the faint-end slope of the luminosity
function.
Examples
--------
.. plot::
:include-source:
from astropy.modeling.models import Schechter1D
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
phi_star = 4.3e-4 * (u.Mpc ** -3)
m_star = -20.26
alpha = -1.98
model = Schechter1D(phi_star, m_star, alpha)
mag = np.linspace(-25, -17)
fig, ax = plt.subplots()
ax.plot(mag, model(mag))
ax.set_yscale('log')
ax.set_xlim(-22.6, -17)
ax.set_ylim(1.e-7, 1.e-2)
ax.set_xlabel('$M_{UV}$')
ax.set_ylabel('$\phi$ [mag$^{-1}$ Mpc$^{-3}]$')
References
----------
.. [1] Schechter 1976; ApJ 203, 297
(https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract)
.. [2] `Luminosity function <https://en.wikipedia.org/wiki/Luminosity_function_(astronomy)>`_
"""
phi_star = Parameter(
default=1.0, description="Normalization factor in units of number density"
)
m_star = Parameter(default=-20.0, description="Characteristic magnitude", mag=True)
alpha = Parameter(default=-1.0, description="Faint-end slope")
@staticmethod
def _factor(magnitude, m_star):
factor_exp = magnitude - m_star
if isinstance(factor_exp, Quantity):
if factor_exp.unit == mag:
factor_exp = Magnitude(factor_exp.value, unit=mag)
return factor_exp.to(dimensionless_unscaled)
else:
raise UnitsError(
"The units of magnitude and m_star must be a magnitude"
)
else:
return 10 ** (-0.4 * factor_exp)
def evaluate(self, mag, phi_star, m_star, alpha):
"""Schechter luminosity function model function."""
factor = self._factor(mag, m_star)
return 0.4 * np.log(10) * phi_star * factor ** (alpha + 1) * np.exp(-factor)
def fit_deriv(self, mag, phi_star, m_star, alpha):
"""
Schechter luminosity function derivative with respect to
parameters.
"""
factor = self._factor(mag, m_star)
d_phi_star = 0.4 * np.log(10) * factor ** (alpha + 1) * np.exp(-factor)
func = phi_star * d_phi_star
d_m_star = (alpha + 1) * 0.4 * np.log(10) * func - (
0.4 * np.log(10) * func * factor
)
d_alpha = func * np.log(factor)
return [d_phi_star, d_m_star, d_alpha]
@property
def input_units(self):
if self.m_star.input_unit is None:
return None
return {self.inputs[0]: self.m_star.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"m_star": inputs_unit[self.inputs[0]],
"phi_star": outputs_unit[self.outputs[0]],
}
|
77354657f28927c2822ced09527fc7eb1e99ac8d824b776b151e40268bbc0556 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for console input and output.
"""
import codecs
import locale
import math
import multiprocessing
import os
import re
import struct
import sys
import threading
import time
# concurrent.futures imports moved inside functions using them to avoid
# import failure when running in pyodide/Emscripten
try:
import fcntl
import signal
import termios
_CAN_RESIZE_TERMINAL = True
except ImportError:
_CAN_RESIZE_TERMINAL = False
from astropy import conf
from .decorators import classproperty
from .misc import isiterable
__all__ = [
"isatty",
"color_print",
"human_time",
"human_file_size",
"ProgressBar",
"Spinner",
"print_code_line",
"ProgressBarOrSpinner",
"terminal_size",
]
_DEFAULT_ENCODING = "utf-8"
class _IPython:
"""Singleton class given access to IPython streams, etc."""
@classproperty
def get_ipython(cls):
try:
from IPython import get_ipython
except ImportError:
pass
return get_ipython
@classproperty
def OutStream(cls):
if not hasattr(cls, "_OutStream"):
cls._OutStream = None
try:
cls.get_ipython()
except NameError:
return None
try:
from ipykernel.iostream import OutStream
except ImportError:
try:
from IPython.zmq.iostream import OutStream
except ImportError:
from IPython import version_info
if version_info[0] >= 4:
return None
try:
from IPython.kernel.zmq.iostream import OutStream
except ImportError:
return None
cls._OutStream = OutStream
return cls._OutStream
@classproperty
def ipyio(cls):
if not hasattr(cls, "_ipyio"):
try:
from IPython.utils import io
except ImportError:
cls._ipyio = None
else:
cls._ipyio = io
return cls._ipyio
@classmethod
def get_stream(cls, stream):
return getattr(cls.ipyio, stream)
def _get_stdout(stderr=False):
"""
This utility function contains the logic to determine what streams to use
by default for standard out/err.
Typically this will just return `sys.stdout`, but it contains additional
logic for use in IPython on Windows to determine the correct stream to use
(usually ``IPython.util.io.stdout`` but only if sys.stdout is a TTY).
"""
if stderr:
stream = "stderr"
else:
stream = "stdout"
sys_stream = getattr(sys, stream)
return sys_stream
def isatty(file):
"""
Returns `True` if ``file`` is a tty.
Most built-in Python file-like objects have an `isatty` member,
but some user-defined types may not, so this assumes those are not
ttys.
"""
if (
multiprocessing.current_process().name != "MainProcess"
or threading.current_thread().name != "MainThread"
):
return False
if hasattr(file, "isatty"):
return file.isatty()
if _IPython.OutStream is None or (not isinstance(file, _IPython.OutStream)):
return False
# File is an IPython OutStream. Check whether:
# - File name is 'stdout'; or
# - File wraps a Console
if getattr(file, "name", None) == "stdout":
return True
if hasattr(file, "stream"):
# FIXME: pyreadline has no had new release since 2015, drop it when
# IPython minversion is 5.x.
# On Windows, in IPython 2 the standard I/O streams will wrap
# pyreadline.Console objects if pyreadline is available; this should
# be considered a TTY.
try:
from pyreadline.console import Console as PyreadlineConsole
except ImportError:
return False
return isinstance(file.stream, PyreadlineConsole)
return False
def terminal_size(file=None):
"""
Returns a tuple (height, width) containing the height and width of
the terminal.
This function will look for the width in height in multiple areas
before falling back on the width and height in astropy's
configuration.
"""
if file is None:
file = _get_stdout()
try:
s = struct.pack("HHHH", 0, 0, 0, 0)
x = fcntl.ioctl(file, termios.TIOCGWINSZ, s)
(lines, width, xpixels, ypixels) = struct.unpack("HHHH", x)
if lines > 12:
lines -= 6
if width > 10:
width -= 1
if lines <= 0 or width <= 0:
raise Exception("unable to get terminal size")
return (lines, width)
except Exception:
try:
# see if POSIX standard variables will work
return (int(os.environ.get("LINES")), int(os.environ.get("COLUMNS")))
except TypeError:
# fall back on configuration variables, or if not
# set, (25, 80)
lines = conf.max_lines
width = conf.max_width
if lines is None:
lines = 25
if width is None:
width = 80
return lines, width
def _color_text(text, color):
"""Returns a string wrapped in ANSI color codes for coloring the text in a terminal.
::
colored_text = color_text('Here is a message', 'blue')
This won't actually effect the text until it is printed to the
terminal.
Parameters
----------
text : str
The string to return, bounded by the color codes.
color : str
An ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
"""
color_mapping = {
"black": "0;30",
"red": "0;31",
"green": "0;32",
"brown": "0;33",
"blue": "0;34",
"magenta": "0;35",
"cyan": "0;36",
"lightgrey": "0;37",
"default": "0;39",
"darkgrey": "1;30",
"lightred": "1;31",
"lightgreen": "1;32",
"yellow": "1;33",
"lightblue": "1;34",
"lightmagenta": "1;35",
"lightcyan": "1;36",
"white": "1;37",
}
if sys.platform == "win32" and _IPython.OutStream is None:
# On Windows do not colorize text unless in IPython
return text
color_code = color_mapping.get(color, "0;39")
return f"\033[{color_code}m{text}\033[0m"
def _decode_preferred_encoding(s):
"""Decode the supplied byte string using the preferred encoding
for the locale (`locale.getpreferredencoding`) or, if the default encoding
is invalid, fall back first on utf-8, then on latin-1 if the message cannot
be decoded with utf-8.
"""
enc = locale.getpreferredencoding()
try:
try:
return s.decode(enc)
except LookupError:
enc = _DEFAULT_ENCODING
return s.decode(enc)
except UnicodeDecodeError:
return s.decode("latin-1")
def _write_with_fallback(s, write, fileobj):
"""Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'.
"""
try:
write(s)
return write
except UnicodeEncodeError:
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter(_DEFAULT_ENCODING)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter("latin-1")
f = Writer(fileobj)
write = f.write
# If this doesn't work let the exception bubble up; I'm out of ideas
write(s)
return write
def color_print(*args, end="\n", **kwargs):
"""
Prints colors and styles to the terminal uses ANSI escape
sequences.
::
color_print('This is the color ', 'default', 'GREEN', 'green')
Parameters
----------
positional args : str
The positional arguments come in pairs (*msg*, *color*), where
*msg* is the string to display and *color* is the color to
display it in.
*color* is an ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
file : writable file-like, optional
Where to write to. Defaults to `sys.stdout`. If file is not
a tty (as determined by calling its `isatty` member, if one
exists), no coloring will be included.
end : str, optional
The ending of the message. Defaults to ``\\n``. The end will
be printed after resetting any color or font state.
"""
file = kwargs.get("file", _get_stdout())
write = file.write
if isatty(file) and conf.use_color:
for i in range(0, len(args), 2):
msg = args[i]
if i + 1 == len(args):
color = ""
else:
color = args[i + 1]
if color:
msg = _color_text(msg, color)
# Some file objects support writing unicode sensibly on some Python
# versions; if this fails try creating a writer using the locale's
# preferred encoding. If that fails too give up.
write = _write_with_fallback(msg, write, file)
write(end)
else:
for i in range(0, len(args), 2):
msg = args[i]
write(msg)
write(end)
def strip_ansi_codes(s):
"""
Remove ANSI color codes from the string.
"""
return re.sub("\033\\[([0-9]+)(;[0-9]+)*m", "", s)
def human_time(seconds):
"""
Returns a human-friendly time string that is always exactly 6
characters long.
Depending on the number of seconds given, can be one of::
1w 3d
2d 4h
1h 5m
1m 4s
15s
Will be in color if console coloring is turned on.
Parameters
----------
seconds : int
The number of seconds to represent
Returns
-------
time : str
A human-friendly representation of the given number of seconds
that is always exactly 6 characters.
"""
units = [
("y", 60 * 60 * 24 * 7 * 52),
("w", 60 * 60 * 24 * 7),
("d", 60 * 60 * 24),
("h", 60 * 60),
("m", 60),
("s", 1),
]
seconds = int(seconds)
if seconds < 60:
return f" {seconds:2d}s"
for i in range(len(units) - 1):
unit1, limit1 = units[i]
unit2, limit2 = units[i + 1]
if seconds >= limit1:
return "{:2d}{}{:2d}{}".format(
seconds // limit1, unit1, (seconds % limit1) // limit2, unit2
)
return " ~inf"
def human_file_size(size):
"""
Returns a human-friendly string representing a file size
that is 2-4 characters long.
For example, depending on the number of bytes given, can be one
of::
256b
64k
1.1G
Parameters
----------
size : int
The size of the file (in bytes)
Returns
-------
size : str
A human-friendly representation of the size of the file
"""
if hasattr(size, "unit"):
# Import units only if necessary because the import takes a
# significant time [#4649]
from astropy import units as u
size = u.Quantity(size, u.byte).value
suffixes = " kMGTPEZY"
if size == 0:
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
if num_scale > 7:
suffix = "?"
else:
suffix = suffixes[num_scale]
num_scale = int(math.pow(1000, num_scale))
value = size / num_scale
str_value = str(value)
if suffix == " ":
str_value = str_value[: str_value.index(".")]
elif str_value[2] == ".":
str_value = str_value[:2]
else:
str_value = str_value[:3]
return f"{str_value:>3s}{suffix}"
class _mapfunc:
"""
A function wrapper to support ProgressBar.map().
"""
def __init__(self, func):
self._func = func
def __call__(self, i_arg):
i, arg = i_arg
return i, self._func(arg)
class ProgressBar:
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the ``with`` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, ipython_widget=False, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the progress bar will be
completely silent.
"""
if file is None:
file = _get_stdout()
if not ipython_widget and not isatty(file):
self.update = self._silent_update
self._silent = True
else:
self._silent = False
if isiterable(total_or_items):
self._items = iter(total_or_items)
self._total = len(total_or_items)
else:
try:
self._total = int(total_or_items)
except TypeError:
raise TypeError("First argument must be int or sequence")
else:
self._items = iter(range(self._total))
self._file = file
self._start_time = time.time()
self._human_total = human_file_size(self._total)
self._ipython_widget = ipython_widget
self._signal_set = False
if not ipython_widget:
self._should_handle_resize = _CAN_RESIZE_TERMINAL and self._file.isatty()
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
self.update(0)
def _handle_resize(self, signum=None, frame=None):
terminal_width = terminal_size(self._file)[1]
self._bar_length = terminal_width - 37
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write("\n")
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def __next__(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update progress bar via the console or notebook accordingly.
"""
# Update self.value
if value is None:
value = self._current_value + 1
self._current_value = value
# Choose the appropriate environment
if self._ipython_widget:
self._update_ipython_widget(value)
else:
self._update_console(value)
def _update_console(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write("\r|")
color_print("=" * bar_fill, "blue", file=file, end="")
if bar_fill < self._bar_length:
color_print(">", "green", file=file, end="")
write("-" * (self._bar_length - bar_fill - 1))
write("|")
if value >= self._total:
t = time.time() - self._start_time
prefix = " "
elif value <= 0:
t = None
prefix = ""
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix = " ETA "
write(f" {human_file_size(value):>4s}/{self._human_total:>4s}")
write(f" ({frac:>6.2%})")
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush()
def _update_ipython_widget(self, value=None):
"""
Update the progress bar to the given value (out of a total
given to the constructor).
This method is for use in the IPython notebook 2+.
"""
# Create and display an empty progress bar widget,
# if none exists.
if not hasattr(self, "_widget"):
# Import only if an IPython widget, i.e., widget in iPython NB
from IPython import version_info
if version_info[0] < 4:
from IPython.html import widgets
self._widget = widgets.FloatProgressWidget()
else:
_IPython.get_ipython()
from ipywidgets import widgets
self._widget = widgets.FloatProgress()
from IPython.display import display
display(self._widget)
self._widget.value = 0
# Calculate percent completion, and update progress bar
frac = value / self._total
self._widget.value = frac * 100
self._widget.description = f" ({frac:>6.2%})"
def _silent_update(self, value=None):
pass
@classmethod
def map(
cls,
function,
items,
multiprocess=False,
file=None,
step=100,
ipython_widget=False,
multiprocessing_start_method=None,
):
"""Map function over items while displaying a progress bar with percentage complete.
The map operation may run in arbitrary order on the items, but the results are
returned in sequential order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
if multiprocess:
function = _mapfunc(function)
items = list(enumerate(items))
results = cls.map_unordered(
function,
items,
multiprocess=multiprocess,
file=file,
step=step,
ipython_widget=ipython_widget,
multiprocessing_start_method=multiprocessing_start_method,
)
if multiprocess:
_, results = zip(*sorted(results))
results = list(results)
return results
@classmethod
def map_unordered(
cls,
function,
items,
multiprocess=False,
file=None,
step=100,
ipython_widget=False,
multiprocessing_start_method=None,
):
"""Map function over items, reporting the progress.
Does a `map` operation while displaying a progress bar with
percentage complete. The map operation may run on arbitrary order
on the items, and the results may be returned in arbitrary order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
# concurrent.futures import here to avoid import failure when running
# in pyodide/Emscripten
from concurrent.futures import ProcessPoolExecutor, as_completed
results = []
if file is None:
file = _get_stdout()
with cls(len(items), ipython_widget=ipython_widget, file=file) as bar:
if bar._ipython_widget:
chunksize = step
else:
default_step = max(int(float(len(items)) / bar._bar_length), 1)
chunksize = min(default_step, step)
if not multiprocess or multiprocess < 1:
for i, item in enumerate(items):
results.append(function(item))
if (i % chunksize) == 0:
bar.update(i)
else:
ctx = multiprocessing.get_context(multiprocessing_start_method)
kwargs = dict(mp_context=ctx)
with ProcessPoolExecutor(
max_workers=(
int(multiprocess) if multiprocess is not True else None
),
**kwargs,
) as p:
for i, f in enumerate(
as_completed(p.submit(function, item) for item in items)
):
bar.update(i)
results.append(f.result())
return results
class Spinner:
"""
A class to display a spinner in the terminal.
It is designed to be used with the ``with`` statement::
with Spinner("Reticulating splines", "green") as s:
for item in enumerate(items):
s.update()
"""
_default_unicode_chars = "◓◑◒◐"
_default_ascii_chars = "-/|\\"
def __init__(self, msg, color="default", file=None, step=1, chars=None):
"""
Parameters
----------
msg : str
The message to print
color : str, optional
An ANSI terminal color name. Must be one of: black, red,
green, brown, blue, magenta, cyan, lightgrey, default,
darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white.
file : writable file-like, optional
The file to write the spinner to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the spinner will be
completely silent.
step : int, optional
Only update the spinner every *step* steps
chars : str, optional
The character sequence to use for the spinner
"""
if file is None:
file = _get_stdout()
self._msg = msg
self._color = color
self._file = file
self._step = step
if chars is None:
if conf.unicode_output:
chars = self._default_unicode_chars
else:
chars = self._default_ascii_chars
self._chars = chars
self._silent = not isatty(file)
if self._silent:
self._iter = self._silent_iterator()
else:
self._iter = self._iterator()
def _iterator(self):
chars = self._chars
index = 0
file = self._file
write = file.write
flush = file.flush
try_fallback = True
while True:
write("\r")
color_print(self._msg, self._color, file=file, end="")
write(" ")
try:
if try_fallback:
write = _write_with_fallback(chars[index], write, file)
else:
write(chars[index])
except UnicodeError:
# If even _write_with_fallback failed for any reason just give
# up on trying to use the unicode characters
chars = self._default_ascii_chars
write(chars[index])
try_fallback = False # No good will come of using this again
flush()
yield
for i in range(self._step):
yield
index = (index + 1) % len(chars)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
file = self._file
write = file.write
flush = file.flush
if not self._silent:
write("\r")
color_print(self._msg, self._color, file=file, end="")
if exc_type is None:
color_print(" [Done]", "green", file=file)
else:
color_print(" [Failed]", "red", file=file)
flush()
def __iter__(self):
return self
def __next__(self):
next(self._iter)
def update(self, value=None):
"""Update the spin wheel in the terminal.
Parameters
----------
value : int, optional
Ignored (present just for compatibility with `ProgressBar.update`).
"""
next(self)
def _silent_iterator(self):
color_print(self._msg, self._color, file=self._file, end="")
self._file.flush()
while True:
yield
class ProgressBarOrSpinner:
"""
A class that displays either a `ProgressBar` or `Spinner`
depending on whether the total size of the operation is
known or not.
It is designed to be used with the ``with`` statement::
if file.has_length():
length = file.get_length()
else:
length = None
bytes_read = 0
with ProgressBarOrSpinner(length) as bar:
while file.read(blocksize):
bytes_read += blocksize
bar.update(bytes_read)
"""
def __init__(self, total, msg, color="default", file=None):
"""
Parameters
----------
total : int or None
If an int, the number of increments in the process being
tracked and a `ProgressBar` is displayed. If `None`, a
`Spinner` is displayed.
msg : str
The message to display above the `ProgressBar` or
alongside the `Spinner`.
color : str, optional
The color of ``msg``, if any. Must be an ANSI terminal
color name. Must be one of: black, red, green, brown,
blue, magenta, cyan, lightgrey, default, darkgrey,
lightred, lightgreen, yellow, lightblue, lightmagenta,
lightcyan, white.
file : writable file-like, optional
The file to write the to. Defaults to `sys.stdout`. If
``file`` is not a tty (as determined by calling its `isatty`
member, if any), only ``msg`` will be displayed: the
`ProgressBar` or `Spinner` will be silent.
"""
if file is None:
file = _get_stdout()
if total is None or not isatty(file):
self._is_spinner = True
self._obj = Spinner(msg, color=color, file=file)
else:
self._is_spinner = False
color_print(msg, color, file=file)
self._obj = ProgressBar(total, file=file)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._obj.__exit__(exc_type, exc_value, traceback)
def update(self, value):
"""
Update the progress bar to the given value (out of the total
given to the constructor.
"""
self._obj.update(value)
def print_code_line(line, col=None, file=None, tabwidth=8, width=70):
"""
Prints a line of source code, highlighting a particular character
position in the line. Useful for displaying the context of error
messages.
If the line is more than ``width`` characters, the line is truncated
accordingly and '…' characters are inserted at the front and/or
end.
It looks like this::
there_is_a_syntax_error_here :
^
Parameters
----------
line : unicode
The line of code to display
col : int, optional
The character in the line to highlight. ``col`` must be less
than ``len(line)``.
file : writable file-like, optional
Where to write to. Defaults to `sys.stdout`.
tabwidth : int, optional
The number of spaces per tab (``'\\t'``) character. Default
is 8. All tabs will be converted to spaces to ensure that the
caret lines up with the correct column.
width : int, optional
The width of the display, beyond which the line will be
truncated. Defaults to 70 (this matches the default in the
standard library's `textwrap` module).
"""
if file is None:
file = _get_stdout()
if conf.unicode_output:
ellipsis = "…"
else:
ellipsis = "..."
write = file.write
if col is not None:
if col >= len(line):
raise ValueError("col must be less the the line length.")
ntabs = line[:col].count("\t")
col += ntabs * (tabwidth - 1)
line = line.rstrip("\n")
line = line.replace("\t", " " * tabwidth)
if col is not None and col > width:
new_col = min(width // 2, len(line) - col)
offset = col - new_col
line = line[offset + len(ellipsis) :]
width -= len(ellipsis)
new_col = col
col -= offset
color_print(ellipsis, "darkgrey", file=file, end="")
if len(line) > width:
write(line[: width - len(ellipsis)])
color_print(ellipsis, "darkgrey", file=file)
else:
write(line)
write("\n")
if col is not None:
write(" " * col)
color_print("^", "red", file=file)
# The following four Getch* classes implement unbuffered character reading from
# stdin on Windows, linux, MacOSX. This is taken directly from ActiveState
# Code Recipes:
# http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/
#
class Getch:
"""Get a single character from standard input without screen echo.
Returns
-------
char : str (one character)
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except (ImportError, AttributeError):
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import sys # noqa: F401
# import termios now or else you'll get the Unix
# version on the Mac
import termios # noqa: F401
import tty # noqa: F401
def __call__(self):
import sys
import termios
import tty
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt # noqa: F401
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt # noqa: B018 # see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ""
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
|
b2f939614fac52acb7b3b98dc70c94f61cfe02d15acab7486239b25d610c8513 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for accessing, downloading, and caching data files."""
import atexit
import contextlib
import errno
import fnmatch
import ftplib
import functools
import hashlib
import io
import os
import re
import shutil
# import ssl moved inside functions using ssl to avoid import failure
# when running in pyodide/Emscripten
import sys
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir, mkdtemp
from warnings import warn
try:
import certifi
except ImportError:
# certifi support is optional; when available it will be used for TLS/SSL
# downloads
certifi = None
import astropy.config.paths
from astropy import config as _config
from astropy.utils.compat.optional_deps import HAS_FSSPEC
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.introspection import find_current_module, resolve_name
# Order here determines order in the autosummary
__all__ = [
"Conf",
"conf",
"download_file",
"download_files_in_parallel",
"get_readable_fileobj",
"get_pkg_data_fileobj",
"get_pkg_data_filename",
"get_pkg_data_contents",
"get_pkg_data_fileobjs",
"get_pkg_data_filenames",
"get_pkg_data_path",
"is_url",
"is_url_in_cache",
"get_cached_urls",
"cache_total_size",
"cache_contents",
"export_download_cache",
"import_download_cache",
"import_file_to_cache",
"check_download_cache",
"clear_download_cache",
"compute_hash",
"get_free_space_in_dir",
"check_free_space_in_dir",
"get_file_contents",
"CacheMissingWarning",
"CacheDamaged",
]
_dataurls_to_alias = {}
class _NonClosingBufferedReader(io.BufferedReader):
def __del__(self):
try:
# NOTE: self.raw will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __del__(self):
try:
# NOTE: self.stream will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
"http://data.astropy.org/", "Primary URL for astropy remote data site."
)
dataurl_mirror = _config.ConfigItem(
"http://www.astropy.org/astropy-data/",
"Mirror URL for astropy remote data site.",
)
default_http_user_agent = _config.ConfigItem(
"astropy",
"Default User-Agent for HTTP request headers. This can be overwritten "
"for a particular call via http_headers option, where available. "
"This only provides the default value when not set by https_headers.",
)
remote_timeout = _config.ConfigItem(
10.0,
"Time to wait for remote data queries (in seconds).",
aliases=["astropy.coordinates.name_resolve.name_resolve_timeout"],
)
allow_internet = _config.ConfigItem(
True, "If False, prevents any attempt to download from Internet."
)
compute_hash_block_size = _config.ConfigItem(
2**16, "Block size for computing file hashes." # 64K
)
download_block_size = _config.ConfigItem(
2**16, "Number of bytes of remote data to download per step." # 64K
)
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
"If True, temporary download files created when the cache is "
"inaccessible will be deleted at the end of the python session.",
)
conf = Conf()
class CacheMissingWarning(AstropyWarning):
"""
This warning indicates the standard cache directory is not accessible, with
the first argument providing the warning message. If args[1] is present, it
is a filename indicating the path to a temporary file that was created to
store a remote data download in the absence of the cache.
"""
def is_url(string):
"""
Test whether a string is a valid URL for :func:`download_file`.
Parameters
----------
string : str
The string to test.
Returns
-------
status : bool
String is URL or not.
"""
url = urllib.parse.urlparse(string)
# we can't just check that url.scheme is not an empty string, because
# file paths in windows would return a non-empty scheme (e.g. e:\\
# returns 'e').
return url.scheme.lower() in ["http", "https", "ftp", "sftp", "ssh", "file"]
# Backward compatibility because some downstream packages allegedly uses it.
_is_url = is_url
def _requires_fsspec(url):
"""Does the `url` require the optional ``fsspec`` dependency to open?"""
return isinstance(url, str) and url.startswith(("s3://", "gs://"))
def _is_inside(path, parent_path):
# We have to try realpath too to avoid issues with symlinks, but we leave
# abspath because some systems like debian have the absolute path (with no
# symlinks followed) match, but the real directories in different
# locations, so need to try both cases.
return os.path.abspath(path).startswith(
os.path.abspath(parent_path)
) or os.path.realpath(path).startswith(os.path.realpath(parent_path))
@contextlib.contextmanager
def get_readable_fileobj(
name_or_obj,
encoding=None,
cache=False,
show_progress=True,
remote_timeout=None,
sources=None,
http_headers=None,
*,
use_fsspec=None,
fsspec_kwargs=None,
close_files=True,
):
"""Yield a readable, seekable file-like object from a file or URL.
This supports passing filenames, URLs, and readable file-like objects,
any of which can be compressed in gzip, bzip2 or lzma (xz) if the
appropriate compression libraries are provided by the Python installation.
Notes
-----
This function is a context manager, and should be used for example
as::
with get_readable_fileobj('file.dat') as f:
contents = f.read()
If a URL is provided and the cache is in use, the provided URL will be the
name used in the cache. The contents may already be stored in the cache
under this URL provided, they may be downloaded from this URL, or they may
be downloaded from one of the locations listed in ``sources``. See
`~download_file` for details.
Parameters
----------
name_or_obj : str or file-like
The filename of the file to access (if given as a string), or
the file-like object to access.
If a file-like object, it must be opened in binary mode.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
check the remote URL for a new version but store the result
in the cache.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name_or_obj`` starts with the Amazon S3 storage prefix ``s3://``
or the Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g. ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
close_files : bool, optional
Close the file object when exiting the context manager.
Default is `True`.
.. versionadded:: 5.2
Returns
-------
file : readable file-like
"""
# close_fds is a list of file handles created by this function
# that need to be closed. We don't want to always just close the
# returned file handle, because it may simply be the file handle
# passed in. In that case it is not the responsibility of this
# function to close it: doing so could result in a "double close"
# and an "invalid file descriptor" exception.
close_fds = []
delete_fds = []
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
# Have `use_fsspec` default to ``True`` if the user passed an Amazon S3
# or Google Cloud Storage URI.
if use_fsspec is None and _requires_fsspec(name_or_obj):
use_fsspec = True
if use_fsspec:
if not isinstance(name_or_obj, str):
raise TypeError("`name_or_obj` must be a string when `use_fsspec=True`")
if fsspec_kwargs is None:
fsspec_kwargs = {}
# name_or_obj could be an os.PathLike object
if isinstance(name_or_obj, os.PathLike):
name_or_obj = os.fspath(name_or_obj)
# Get a file object to the content
if isinstance(name_or_obj, str):
# Use fsspec to open certain cloud-hosted files (e.g., AWS S3, Google Cloud Storage)
if use_fsspec:
if not HAS_FSSPEC:
raise ModuleNotFoundError("please install `fsspec` to open this file")
import fsspec # local import because it is a niche dependency
openfileobj = fsspec.open(name_or_obj, **fsspec_kwargs)
close_fds.append(openfileobj)
fileobj = openfileobj.open()
close_fds.append(fileobj)
else:
is_url = _is_url(name_or_obj)
if is_url:
name_or_obj = download_file(
name_or_obj,
cache=cache,
show_progress=show_progress,
timeout=remote_timeout,
sources=sources,
http_headers=http_headers,
)
fileobj = io.FileIO(name_or_obj, "r")
if is_url and not cache:
delete_fds.append(fileobj)
close_fds.append(fileobj)
else:
fileobj = name_or_obj
# Check if the file object supports random access, and if not,
# then wrap it in a BytesIO buffer. It would be nicer to use a
# BufferedReader to avoid reading loading the whole file first,
# but that might not be compatible with all possible I/O classes.
if not hasattr(fileobj, "seek"):
try:
# py.path.LocalPath objects have .read() method but it uses
# text mode, which won't work. .read_binary() does, and
# surely other ducks would return binary contents when
# called like this.
# py.path.LocalPath is what comes from the legacy tmpdir fixture
# in pytest.
fileobj = io.BytesIO(fileobj.read_binary())
except AttributeError:
fileobj = io.BytesIO(fileobj.read())
# Now read enough bytes to look at signature
signature = fileobj.read(4)
fileobj.seek(0)
if signature[:3] == b"\x1f\x8b\x08": # gzip
import struct
try:
import gzip
fileobj_new = gzip.GzipFile(fileobj=fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really gzip
except (OSError, EOFError, struct.error): # invalid gzip file
fileobj.seek(0)
fileobj_new.close()
else:
fileobj_new.seek(0)
fileobj = fileobj_new
elif signature[:3] == b"BZh": # bzip2
try:
import bz2
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
try:
# bz2.BZ2File does not support file objects, only filenames, so we
# need to write the data to a temporary file
with NamedTemporaryFile("wb", delete=False) as tmp:
tmp.write(fileobj.read())
tmp.close()
fileobj_new = bz2.BZ2File(tmp.name, mode="rb")
fileobj_new.read(1) # need to check that the file is really bzip2
except OSError: # invalid bzip2 file
fileobj.seek(0)
fileobj_new.close()
# raise
else:
fileobj_new.seek(0)
close_fds.append(fileobj_new)
fileobj = fileobj_new
elif signature[:3] == b"\xfd7z": # xz
try:
import lzma
fileobj_new = lzma.LZMAFile(fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really xz
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the lzma module."
)
except (OSError, EOFError): # invalid xz file
fileobj.seek(0)
fileobj_new.close()
# should we propagate this to the caller to signal bad content?
# raise ValueError(e)
else:
fileobj_new.seek(0)
fileobj = fileobj_new
# By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
# or lzma.LZMAFile instance opened in binary mode (that is, read
# returns bytes). Now we need to, if requested, wrap it in a
# io.TextIOWrapper so read will return unicode based on the
# encoding parameter.
needs_textio_wrapper = encoding != "binary"
if needs_textio_wrapper:
# A bz2.BZ2File can not be wrapped by a TextIOWrapper,
# so we decompress it to a temporary file and then
# return a handle to that.
try:
import bz2
except ImportError:
pass
else:
if isinstance(fileobj, bz2.BZ2File):
tmp = NamedTemporaryFile("wb", delete=False)
data = fileobj.read()
tmp.write(data)
tmp.close()
delete_fds.append(tmp)
fileobj = io.FileIO(tmp.name, "r")
close_fds.append(fileobj)
fileobj = _NonClosingBufferedReader(fileobj)
fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding)
# Ensure that file is at the start - io.FileIO will for
# example not always be at the start:
# >>> import io
# >>> f = open('test.fits', 'rb')
# >>> f.read(4)
# 'SIMP'
# >>> f.seek(0)
# >>> fileobj = io.FileIO(f.fileno())
# >>> fileobj.tell()
# 4096L
fileobj.seek(0)
try:
yield fileobj
finally:
if close_files:
for fd in close_fds:
fd.close()
for fd in delete_fds:
os.remove(fd.name)
def get_file_contents(*args, **kwargs):
"""
Retrieves the contents of a filename or file-like object.
See the `get_readable_fileobj` docstring for details on parameters.
Returns
-------
object
The content of the file (as requested by ``encoding``).
"""
with get_readable_fileobj(*args, **kwargs) as f:
return f.read()
@contextlib.contextmanager
def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations for the package and
provides the file as a file-like object that reads bytes.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
fileobj : file-like
An object with the contents of the data file available via
``read`` function. Can be used as part of a ``with`` statement,
automatically closing itself after the ``with`` block.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Examples
--------
This will retrieve a data file and its contents for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('data/3d_cd.hdr',
... package='astropy.wcs.tests') as fobj:
... fcontents = fobj.read()
...
This next example would download a data file from the astropy data server
because the ``allsky/allsky_rosat.fits`` file is not present in the
source distribution. It will also save the file locally so the
next time it is accessed it won't need to be downloaded.::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
This does the same thing but does *not* cache it locally::
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_filename : returns a local name for a file containing the data
"""
datafn = get_pkg_data_path(data_name, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
with get_readable_fileobj(datafn, encoding=encoding) as fileobj:
yield fileobj
else: # remote file
with get_readable_fileobj(
conf.dataurl + data_name,
encoding=encoding,
cache=cache,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
) as fileobj:
# We read a byte to trigger any URLErrors
fileobj.read(1)
fileobj.seek(0)
yield fileobj
def get_pkg_data_filename(
data_name, package=None, show_progress=True, remote_timeout=None
):
"""
Retrieves a data file from the standard locations for the package and
provides a local filename for the data.
This function is similar to `get_pkg_data_fileobj` but returns the
file *name* instead of a readable file-like object. This means
that this function must always cache remote files locally, unlike
`get_pkg_data_fileobj`.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for the requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Returns
-------
filename : str
A file path on the local file system corresponding to the data
requested in ``data_name``.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('data/3d_cd.hdr',
... package='astropy.wcs.tests')
>>> with open(fn) as f:
... fcontents = f.read()
...
This retrieves a data file by hash either locally or from the astropy data
server::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP
>>> with open(fn) as f:
... fcontents = f.read()
...
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_fileobj : returns a file-like object with the data
"""
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
if data_name.startswith("hash/"):
# first try looking for a local version if a hash is specified
hashfn = _find_hash_fn(data_name[5:])
if hashfn is None:
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
else:
return hashfn
else:
fs_path = os.path.normpath(data_name)
datafn = get_pkg_data_path(fs_path, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
return datafn
else: # remote file
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations and returns its
contents as a bytes object.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
* A URL to some other file.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
contents : bytes
The complete contents of the file as a bytes object.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
See Also
--------
get_pkg_data_fileobj : returns a file-like object with the data
get_pkg_data_filename : returns a local name for a file containing the data
"""
with get_pkg_data_fileobj(
data_name, package=package, encoding=encoding, cache=cache
) as fd:
contents = fd.read()
return contents
def get_pkg_data_filenames(datadir, package=None, pattern="*"):
"""
Returns the path of all of the data files in a given directory
that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``.
* Remote URLs are not currently supported.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
Returns
-------
filenames : iterator of str
Paths on the local filesystem in *datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... with open(fn) as f:
... fcontents = f.read()
...
"""
path = get_pkg_data_path(datadir, package=package)
if os.path.isfile(path):
raise OSError(
"Tried to access a data directory that's actually a package data file"
)
elif os.path.isdir(path):
for filename in os.listdir(path):
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(path, filename)
else:
raise OSError("Path not found")
def get_pkg_data_fileobjs(datadir, package=None, pattern="*", encoding=None):
"""
Returns readable file objects for all of the data files in a given
directory that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``
* Remote URLs are not currently supported
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
Returns
-------
fileobjs : iterator of file object
File objects for each of the files on the local filesystem in
*datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... fcontents = fd.read()
...
"""
for fn in get_pkg_data_filenames(datadir, package=package, pattern=pattern):
with get_readable_fileobj(fn, encoding=encoding) as fd:
yield fd
def compute_hash(localfn):
"""Computes the MD5 hash for a file.
The hash for a data file is used for looking up data files in a unique
fashion. This is of particular use for tests; a test may require a
particular version of a particular file, in which case it can be accessed
via hash to get the appropriate version.
Typically, if you wish to write a test that requires a particular data
file, you will want to submit that file to the astropy data servers, and
use
e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``,
but with the hash for your file in place of the hash in the example.
Parameters
----------
localfn : str
The path to the file for which the hash should be generated.
Returns
-------
hash : str
The hex digest of the cryptographic hash for the contents of the
``localfn`` file.
"""
with open(localfn, "rb") as f:
h = hashlib.md5()
block = f.read(conf.compute_hash_block_size)
while block:
h.update(block)
block = f.read(conf.compute_hash_block_size)
return h.hexdigest()
def get_pkg_data_path(*path, package=None):
"""Get path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings for ``os.path`` joining.
package : str or None, optional, keyword-only
If specified, look for a file relative to the given package, rather
than the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
Raises
------
ImportError
Given package or module is not importable.
RuntimeError
If the local data file is outside of the package's tree.
"""
if package is None:
module = find_current_module(1, finddiff=["astropy.utils.data", "contextlib"])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, "__package__") or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if "." in module.__name__:
package = module.__name__.rpartition(".")[0]
else:
package = module.__name__
else:
package = module.__package__
else:
# package errors if it isn't a str
# so there is no need for checks in the containing if/else
module = resolve_name(package)
# module path within package
module_path = os.path.dirname(module.__file__)
full_path = os.path.join(module_path, *path)
# Check that file is inside tree.
rootpkgname = package.partition(".")[0]
rootpkg = resolve_name(rootpkgname)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(full_path, root_dir):
raise RuntimeError(
f"attempted to get a local data file outside of the {rootpkgname} tree."
)
return full_path
def _find_hash_fn(hexdigest, pkgname="astropy"):
"""
Looks for a local file by hash - returns file name if found and a valid
file, otherwise returns None.
"""
for v in cache_contents(pkgname=pkgname).values():
if compute_hash(v) == hexdigest:
return v
return None
def get_free_space_in_dir(path, unit=False):
"""
Given a path to a directory, returns the amount of free space
on that filesystem.
Parameters
----------
path : str
The path to a directory.
unit : bool or `~astropy.units.Unit`
Return the amount of free space as Quantity in the given unit,
if provided. Default is `False` for backward-compatibility.
Returns
-------
free_space : int or `~astropy.units.Quantity`
The amount of free space on the partition that the directory is on.
If ``unit=False``, it is returned as plain integer (in bytes).
"""
if not os.path.isdir(path):
raise OSError(
"Can only determine free space associated with directories, not files."
)
# Actually you can on Linux but I want to avoid code that fails
# on Windows only.
free_space = shutil.disk_usage(path).free
if unit:
from astropy import units as u
# TODO: Automatically determine best prefix to use.
if unit is True:
unit = u.byte
free_space = u.Quantity(free_space, u.byte).to(unit)
return free_space
def check_free_space_in_dir(path, size):
"""
Determines if a given directory has enough space to hold a file of
a given size.
Parameters
----------
path : str
The path to a directory.
size : int or `~astropy.units.Quantity`
A proposed filesize. If not a Quantity, assume it is in bytes.
Raises
------
OSError
There is not enough room on the filesystem.
"""
space = get_free_space_in_dir(path, unit=getattr(size, "unit", False))
if space < size:
from astropy.utils.console import human_file_size
raise OSError(
f"Not enough free space in {path} "
f"to download a {human_file_size(size)} file, "
f"only {human_file_size(space)} left"
)
class _ftptlswrapper(urllib.request.ftpwrapper):
def init(self):
self.busy = 0
self.ftp = ftplib.FTP_TLS()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
self.ftp.prot_p()
_target = "/".join(self.dirs)
self.ftp.cwd(_target)
class _FTPTLSHandler(urllib.request.FTPHandler):
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
return _ftptlswrapper(user, passwd, host, port, dirs, timeout, persistent=False)
@functools.lru_cache
def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False):
"""
Helper for building a `urllib.request.build_opener` which handles TLS/SSL.
"""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
ssl_context = dict(it for it in ssl_context) if ssl_context else {}
cert_chain = {}
if "certfile" in ssl_context:
cert_chain.update(
{
"certfile": ssl_context.pop("certfile"),
"keyfile": ssl_context.pop("keyfile", None),
"password": ssl_context.pop("password", None),
}
)
elif "password" in ssl_context or "keyfile" in ssl_context:
raise ValueError(
"passing 'keyfile' or 'password' in the ssl_context argument "
"requires passing 'certfile' as well"
)
if "cafile" not in ssl_context and certifi is not None:
ssl_context["cafile"] = certifi.where()
ssl_context = ssl.create_default_context(**ssl_context)
if allow_insecure:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
if cert_chain:
ssl_context.load_cert_chain(**cert_chain)
https_handler = urllib.request.HTTPSHandler(context=ssl_context)
if ftp_tls:
urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler)
else:
urlopener = urllib.request.build_opener(https_handler)
return urlopener
def _try_url_open(
source_url,
timeout=None,
http_headers=None,
ftp_tls=False,
ssl_context=None,
allow_insecure=False,
):
"""Helper for opening a URL while handling TLS/SSL verification issues."""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
# Always try first with a secure connection
# _build_urlopener uses lru_cache, so the ssl_context argument must be
# converted to a hashshable type (a set of 2-tuples)
ssl_context = frozenset(ssl_context.items() if ssl_context else [])
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=False
)
req = urllib.request.Request(source_url, headers=http_headers)
try:
return urlopener.open(req, timeout=timeout)
except urllib.error.URLError as exc:
reason = exc.reason
if (
isinstance(reason, ssl.SSLError)
and reason.reason == "CERTIFICATE_VERIFY_FAILED"
):
msg = (
f"Verification of TLS/SSL certificate at {source_url} "
"failed: this can mean either the server is "
"misconfigured or your local root CA certificates are "
"out-of-date; in the latter case this can usually be "
'addressed by installing the Python package "certifi" '
"(see the documentation for astropy.utils.data.download_url)"
)
if not allow_insecure:
msg += (
" or in both cases you can work around this by "
"passing allow_insecure=True, but only if you "
"understand the implications; the original error "
f"was: {reason}"
)
raise urllib.error.URLError(msg)
else:
msg += ". Re-trying with allow_insecure=True."
warn(msg, AstropyWarning)
# Try again with a new urlopener allowing insecure connections
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=True
)
return urlopener.open(req, timeout=timeout)
raise
def _download_file_from_source(
source_url,
show_progress=True,
timeout=None,
remote_url=None,
cache=False,
pkgname="astropy",
http_headers=None,
ftp_tls=None,
ssl_context=None,
allow_insecure=False,
):
from astropy.utils.console import ProgressBarOrSpinner
if not conf.allow_internet:
raise urllib.error.URLError(
f"URL {remote_url} was supposed to be downloaded but "
f"allow_internet is {conf.allow_internet}; "
"if this is unexpected check the astropy.cfg file for the option "
"allow_internet"
)
if remote_url is None:
remote_url = source_url
if http_headers is None:
http_headers = {}
if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp":
try:
return _download_file_from_source(
source_url,
show_progress=show_progress,
timeout=timeout,
remote_url=remote_url,
cache=cache,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=False,
)
except urllib.error.URLError as e:
# e.reason might not be a string, e.g. socket.gaierror
# URLError changed to report original exception in Python 3.10, 3.11 (bpo-43564)
if (
str(e.reason)
.removeprefix("ftp error: ")
.startswith(("error_perm", "5"))
):
ftp_tls = True
else:
raise
with _try_url_open(
source_url,
timeout=timeout,
http_headers=http_headers,
ftp_tls=ftp_tls,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
) as remote:
info = remote.info()
try:
size = int(info["Content-Length"])
except (KeyError, ValueError, TypeError):
size = None
if size is not None:
check_free_space_in_dir(gettempdir(), size)
if cache:
dldir = _get_download_cache_loc(pkgname)
check_free_space_in_dir(dldir, size)
# If a user has overridden sys.stdout it might not have the
# isatty method, in that case assume it's not a tty
is_tty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
if show_progress and is_tty:
progress_stream = sys.stdout
else:
progress_stream = io.StringIO()
if source_url == remote_url:
dlmsg = f"Downloading {remote_url}"
else:
dlmsg = f"Downloading {remote_url} from {source_url}"
with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
with NamedTemporaryFile(
prefix=f"astropy-download-{os.getpid()}-", delete=False
) as f:
try:
bytes_read = 0
block = remote.read(conf.download_block_size)
while block:
f.write(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(conf.download_block_size)
if size is not None and bytes_read > size:
raise urllib.error.URLError(
f"File was supposed to be {size} bytes but "
f"server provides more, at least {bytes_read} "
"bytes. Download failed."
)
if size is not None and bytes_read < size:
raise urllib.error.ContentTooShortError(
f"File was supposed to be {size} bytes but we "
f"only got {bytes_read} bytes. Download failed.",
content=None,
)
except BaseException:
if os.path.exists(f.name):
try:
os.remove(f.name)
except OSError:
pass
raise
return f.name
def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {"User-Agent": conf.default_http_user_agent, "Accept": "*/*"}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
cache = False
missing_cache = (
f"Cache directory cannot be read or created ({e}), "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(
f"Cache value '{cache}' was requested but "
"'update' is the only recognized string; "
"otherwise use a boolean"
)
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (
hasattr(e, "reason")
and hasattr(e.reason, "errno")
and e.reason.errno == 8
):
e.reason.strerror = f"{e.reason.strerror}. requested URL: {remote_url}"
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
"Please include primary URL in sources if you want it to be "
"included as a valid source."
)
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}"
) from errors[sources[0]]
if cache:
try:
return import_file_to_cache(
url_key,
f_name,
remove_original=True,
replace=(cache == "update"),
pkgname=pkgname,
)
except PermissionError as e:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only ({e}), unable to import "
f"downloaded file, providing data in temporary file {f_name} "
"instead."
)
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
def is_url_in_cache(url_key, pkgname="astropy"):
"""Check if a download for ``url_key`` is in the cache.
The provided ``url_key`` will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
Parameters
----------
url_key : str
The URL retrieved
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
in_cache : bool
`True` if a download for ``url_key`` is in the cache, `False` if not
or if the cache does not exist at all.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
return False
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
return os.path.exists(filename)
def cache_total_size(pkgname="astropy"):
"""Return the total size in bytes of all files in the cache."""
size = 0
dldir = _get_download_cache_loc(pkgname=pkgname)
for root, dirs, files in os.walk(dldir):
size += sum(os.path.getsize(os.path.join(root, name)) for name in files)
return size
def _do_download_files_in_parallel(kwargs):
with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")):
with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")):
return download_file(**kwargs)
def download_files_in_parallel(
urls,
cache="update",
show_progress=True,
timeout=None,
sources=None,
multiprocessing_start_method=None,
pkgname="astropy",
):
"""Download multiple files in parallel from the given URLs.
Blocks until all files have downloaded. The result is a list of
local file paths corresponding to the given urls.
The results will be stored in the cache under the values in ``urls`` even
if they are obtained from some other location via ``sources``. See
`~download_file` for details.
Parameters
----------
urls : list of str
The URLs to retrieve.
cache : bool or "update", optional
Whether to use the cache (default is `True`). If "update",
always download the remote URLs to see if new data is available
and store the result in cache.
.. versionchanged:: 4.0
The default was changed to ``"update"`` and setting it to
``False`` will print a Warning and set it to ``"update"`` again,
because the function will not work properly without cache. Using
``True`` will work as expected.
.. versionchanged:: 3.0
The default was changed to ``True`` and setting it to ``False``
will print a Warning and set it to ``True`` again, because the
function will not work properly without cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`)
timeout : float, optional
Timeout for each individual requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
sources : dict, optional
If provided, for each URL a list of URLs to try to obtain the
file from. The result will be stored under the original URL.
For any URL in this dictionary, the original URL will *not* be
tried unless it is in this list; this is to prevent long waits
for a primary server that is known to be inaccessible at the
moment.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
paths : list of str
The local file paths corresponding to the downloaded URLs.
Notes
-----
If a URL is unreachable, the downloading will grind to a halt and the
exception will propagate upward, but an unpredictable number of
files will have been successfully downloaded and will remain in
the cache.
"""
from .console import ProgressBar
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = {}
if not cache:
# See issue #6662, on windows won't work because the files are removed
# again before they can be used. On *NIX systems it will behave as if
# cache was set to True because multiprocessing cannot insert the items
# in the list of to-be-removed files. This could be fixed, but really,
# just use the cache, with update_cache if appropriate.
warn(
"Disabling the cache does not work because of multiprocessing, "
'it will be set to ``"update"``. You may need to manually remove '
"the cached files with clear_download_cache() afterwards.",
AstropyWarning,
)
cache = "update"
if show_progress:
progress = sys.stdout
else:
progress = io.BytesIO()
# Combine duplicate URLs
combined_urls = list(set(urls))
combined_paths = ProgressBar.map(
_do_download_files_in_parallel,
[
dict(
remote_url=u,
cache=cache,
show_progress=False,
timeout=timeout,
sources=sources.get(u, None),
pkgname=pkgname,
temp_cache=astropy.config.paths.set_temp_cache._temp_path,
temp_config=astropy.config.paths.set_temp_config._temp_path,
)
for u in combined_urls
],
file=progress,
multiprocess=True,
multiprocessing_start_method=multiprocessing_start_method,
)
paths = []
for url in urls:
paths.append(combined_paths[combined_urls.index(url)])
return paths
# This is used by download_file and _deltemps to determine the files to delete
# when the interpreter exits
_tempfilestodel = []
@atexit.register
def _deltemps():
global _tempfilestodel
if _tempfilestodel is not None:
while len(_tempfilestodel) > 0:
fn = _tempfilestodel.pop()
if os.path.isfile(fn):
try:
os.remove(fn)
except OSError:
# oh well we tried
# could be held open by some process, on Windows
pass
elif os.path.isdir(fn):
try:
shutil.rmtree(fn)
except OSError:
# couldn't get rid of it, sorry
# could be held open by some process, on Windows
pass
def clear_download_cache(hashorurl=None, pkgname="astropy"):
"""Clears the data file cache by deleting the local file(s).
If a URL is provided, it will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
For the purposes of this function, a file can also be identified by a hash
of its contents or by the filename under which the data is stored (as
returned by `~download_file`, for example).
Parameters
----------
hashorurl : str or None
If None, the whole cache is cleared. Otherwise, specify
a hash for the cached file that is supposed to be deleted,
the full path to a file in the cache that should be deleted,
or a URL that should be removed from the cache if present.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
# Problem arose when trying to open the cache
# Just a warning, though
msg = "Not clearing data cache - cache inaccessible due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return
try:
if hashorurl is None:
# Optional: delete old incompatible caches too
_rmtree(dldir)
elif _is_url(hashorurl):
filepath = os.path.join(dldir, _url_to_dirname(hashorurl))
_rmtree(filepath)
else:
# Not a URL, it should be either a filename or a hash
filepath = os.path.join(dldir, hashorurl)
rp = os.path.relpath(filepath, dldir)
if rp.startswith(".."):
raise RuntimeError(
"attempted to use clear_download_cache on the path "
f"{filepath} outside the data cache directory {dldir}"
)
d, f = os.path.split(rp)
if d and f in ["contents", "url"]:
# It's a filename not the hash of a URL
# so we want to zap the directory containing the
# files "url" and "contents"
filepath = os.path.join(dldir, d)
if os.path.exists(filepath):
_rmtree(filepath)
elif len(hashorurl) == 2 * hashlib.md5().digest_size and re.match(
r"[0-9a-f]+", hashorurl
):
# It's the hash of some file contents, we have to find the right file
filename = _find_hash_fn(hashorurl)
if filename is not None:
clear_download_cache(filename)
except OSError as e:
msg = "Not clearing data from cache - problem arose "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
def _get_download_cache_loc(pkgname="astropy"):
"""Finds the path to the cache directory and makes them if they don't exist.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
datadir : str
The path to the data cache directory.
"""
try:
datadir = os.path.join(
astropy.config.paths.get_cache_dir(pkgname), "download", "url"
)
if not os.path.exists(datadir):
try:
os.makedirs(datadir)
except OSError:
if not os.path.exists(datadir):
raise
elif not os.path.isdir(datadir):
raise OSError(f"Data cache directory {datadir} is not a directory")
return datadir
except OSError as e:
msg = "Remote data cache could not be accessed due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
raise
def _url_to_dirname(url):
if not _is_url(url):
raise ValueError(f"Malformed URL: '{url}'")
# Make domain names case-insensitive
# Also makes the http:// case-insensitive
urlobj = list(urllib.parse.urlsplit(url))
urlobj[1] = urlobj[1].lower()
if urlobj[0].lower() in ["http", "https"] and urlobj[1] and urlobj[2] == "":
urlobj[2] = "/"
url_c = urllib.parse.urlunsplit(urlobj)
return hashlib.md5(url_c.encode("utf-8")).hexdigest()
class ReadOnlyDict(dict):
def __setitem__(self, key, value):
raise TypeError("This object is read-only.")
_NOTHING = ReadOnlyDict({})
class CacheDamaged(ValueError):
"""Record the URL or file that was a problem.
Using clear_download_cache on the .bad_file or .bad_url attribute,
whichever is not None, should resolve this particular problem.
"""
def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs):
super().__init__(*args, **kwargs)
self.bad_urls = bad_urls if bad_urls is not None else []
self.bad_files = bad_files if bad_files is not None else []
def check_download_cache(pkgname="astropy"):
"""Do a consistency check on the cache.
.. note::
Since v5.0, this function no longer returns anything.
Because the cache is shared by all versions of ``astropy`` in all virtualenvs
run by your user, possibly concurrently, it could accumulate problems.
This could lead to hard-to-debug problems or wasted space. This function
detects a number of incorrect conditions, including nonexistent files that
are indexed, files that are indexed but in the wrong place, and, if you
request it, files whose content does not match the hash that is indexed.
This function also returns a list of non-indexed files. A few will be
associated with the shelve object; their exact names depend on the backend
used but will probably be based on ``urlmap``. The presence of other files
probably indicates that something has gone wrong and inaccessible files
have accumulated in the cache. These can be removed with
:func:`clear_download_cache`, either passing the filename returned here, or
with no arguments to empty the entire cache and return it to a
reasonable, if empty, state.
Parameters
----------
pkgname : str, optional
The package name to use to locate the download cache, i.e., for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Raises
------
`~astropy.utils.data.CacheDamaged`
To indicate a problem with the cache contents; the exception contains
a ``.bad_files`` attribute containing a set of filenames to allow the
user to use :func:`clear_download_cache` to remove the offending items.
OSError, RuntimeError
To indicate some problem with the cache structure. This may need a full
:func:`clear_download_cache` to resolve, or may indicate some kind of
misconfiguration.
"""
bad_files = set()
messages = set()
dldir = _get_download_cache_loc(pkgname=pkgname)
with os.scandir(dldir) as it:
for entry in it:
f = os.path.abspath(os.path.join(dldir, entry.name))
if entry.name.startswith("rmtree-"):
if f not in _tempfilestodel:
bad_files.add(f)
messages.add(f"Cache entry {entry.name} not scheduled for deletion")
elif entry.is_dir():
for sf in os.listdir(f):
if sf in ["url", "contents"]:
continue
sf = os.path.join(f, sf)
bad_files.add(sf)
messages.add(f"Unexpected file f{sf}")
urlf = os.path.join(f, "url")
url = None
if not os.path.isfile(urlf):
bad_files.add(urlf)
messages.add(f"Problem with URL file f{urlf}")
else:
url = get_file_contents(urlf, encoding="utf-8")
if not _is_url(url):
bad_files.add(f)
messages.add(f"Malformed URL: {url}")
else:
hashname = _url_to_dirname(url)
if entry.name != hashname:
bad_files.add(f)
messages.add(
f"URL hashes to {hashname} but is stored in"
f" {entry.name}"
)
if not os.path.isfile(os.path.join(f, "contents")):
bad_files.add(f)
if url is None:
messages.add(f"Hash {entry.name} is missing contents")
else:
messages.add(
f"URL {url} with hash {entry.name} is missing contents"
)
else:
bad_files.add(f)
messages.add(f"Left-over non-directory {f} in cache")
if bad_files:
raise CacheDamaged("\n".join(messages), bad_files=bad_files)
@contextlib.contextmanager
def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None):
"""Temporary directory context manager.
This will not raise an exception if the temporary directory goes away
before it's supposed to be deleted. Specifically, what is deleted will
be the directory *name* produced; if no such directory exists, no
exception will be raised.
It would be safer to delete it only if it's really the same directory
- checked by file descriptor - and if it's still called the same thing.
But that opens a platform-specific can of worms.
It would also be more robust to use ExitStack and TemporaryDirectory,
which is more aggressive about removing readonly things.
"""
d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield d
finally:
try:
shutil.rmtree(d)
except OSError:
pass
def _rmtree(path, replace=None):
"""More-atomic rmtree. Ignores missing directory."""
with TemporaryDirectory(
prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path))
) as d:
try:
os.rename(path, os.path.join(d, "to-zap"))
except FileNotFoundError:
pass
except PermissionError:
warn(
CacheMissingWarning(
f"Unable to remove directory {path} because a file in it "
"is in use and you are on Windows",
path,
)
)
raise
except OSError as e:
if e.errno == errno.EXDEV:
warn(e.strerror, AstropyWarning)
shutil.move(path, os.path.join(d, "to-zap"))
else:
raise
if replace is not None:
try:
os.rename(replace, path)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
elif e.errno == errno.EXDEV:
warn(e.strerror, AstropyWarning)
shutil.move(replace, path)
else:
raise
def import_file_to_cache(
url_key, filename, remove_original=False, pkgname="astropy", *, replace=True
):
"""Import the on-disk file specified by filename to the cache.
The provided ``url_key`` will be the name used in the cache. The file
should contain the contents of this URL, at least notionally (the URL may
be temporarily or permanently unavailable). It is using ``url_key`` that
users will request these contents from the cache. See :func:`download_file` for
details.
If ``url_key`` already exists in the cache, it will be updated to point to
these imported contents, and its old contents will be deleted from the
cache.
Parameters
----------
url_key : str
The key to index the file under. This should probably be
the URL where the file was located, though if you obtained
it from a mirror you should use the URL of the primary
location.
filename : str
The file whose contents you want to import.
remove_original : bool
Whether to remove the original file (``filename``) once import is
complete.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
replace : boolean, optional
Whether or not to replace an existing object in the cache, if one exists.
If replacement is not requested but the object exists, silently pass.
"""
cache_dir = _get_download_cache_loc(pkgname=pkgname)
cache_dirname = _url_to_dirname(url_key)
local_dirname = os.path.join(cache_dir, cache_dirname)
local_filename = os.path.join(local_dirname, "contents")
with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir:
temp_filename = os.path.join(temp_dir, "contents")
# Make sure we're on the same filesystem
# This will raise an exception if the url_key doesn't turn into a valid filename
shutil.copy(filename, temp_filename)
with open(os.path.join(temp_dir, "url"), "w", encoding="utf-8") as f:
f.write(url_key)
if replace:
_rmtree(local_dirname, replace=temp_dir)
else:
try:
os.rename(temp_dir, local_dirname)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
if remove_original:
os.remove(filename)
return os.path.abspath(local_filename)
def get_cached_urls(pkgname="astropy"):
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
The listed URLs are the keys programs should use to access the file
contents, but those contents may have actually been obtained from a mirror.
See `~download_file` for details.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
cached_urls : list
List of cached URLs.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
return sorted(cache_contents(pkgname=pkgname).keys())
def cache_contents(pkgname="astropy"):
"""Obtain a dict mapping cached URLs to filenames.
This dictionary is a read-only snapshot of the state of the cache when this
function was called. If other processes are actively working with the
cache, it is possible for them to delete files that are listed in this
dictionary. Use with some caution if you are working on a system that is
busy with many running astropy processes, although the same issues apply to
most functions in this module.
"""
r = {}
try:
dldir = _get_download_cache_loc(pkgname=pkgname)
except OSError:
return _NOTHING
with os.scandir(dldir) as it:
for entry in it:
if entry.is_dir:
url = get_file_contents(
os.path.join(dldir, entry.name, "url"), encoding="utf-8"
)
r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents"))
return ReadOnlyDict(r)
def export_download_cache(
filename_or_obj, urls=None, overwrite=False, pkgname="astropy"
):
"""Exports the cache contents as a ZIP file.
Parameters
----------
filename_or_obj : str or file-like
Where to put the created ZIP file. Must be something the zipfile
module can write to.
urls : iterable of str or None
The URLs to include in the exported cache. The default is all
URLs currently in the cache. If a URL is included in this list
but is not currently in the cache, a KeyError will be raised.
To ensure that all are in the cache use `~download_file`
or `~download_files_in_parallel`.
overwrite : bool, optional
If filename_or_obj is a filename that exists, it will only be
overwritten if this is True.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
import_download_cache : import the contents of such a ZIP file
import_file_to_cache : import a single file directly
"""
if urls is None:
urls = get_cached_urls(pkgname)
with zipfile.ZipFile(filename_or_obj, "w" if overwrite else "x") as z:
for u in urls:
fn = download_file(u, cache=True, sources=[], pkgname=pkgname)
# Do not use os.path.join because ZIP files want
# "/" on all platforms
z_fn = urllib.parse.quote(u, safe="")
z.write(fn, z_fn)
def import_download_cache(
filename_or_obj, urls=None, update_cache=False, pkgname="astropy"
):
"""Imports the contents of a ZIP file into the cache.
Each member of the ZIP file should be named by a quoted version of the
URL whose contents it stores. These names are decoded with
:func:`~urllib.parse.unquote`.
Parameters
----------
filename_or_obj : str or file-like
Where the stored ZIP file is. Must be something the :mod:`~zipfile`
module can read from.
urls : set of str or list of str or None
The URLs to import from the ZIP file. The default is all
URLs in the file.
update_cache : bool, optional
If True, any entry in the ZIP file will overwrite the value in the
cache; if False, leave untouched any entry already in the cache.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
export_download_cache : export the contents the cache to of such a ZIP file
import_file_to_cache : import a single file directly
"""
with zipfile.ZipFile(filename_or_obj, "r") as z, TemporaryDirectory() as d:
for i, zf in enumerate(z.infolist()):
url = urllib.parse.unquote(zf.filename)
# FIXME(aarchiba): do we want some kind of validation on this URL?
# urllib.parse might do something sensible...but what URLs might
# they have?
# is_url in this file is probably a good check, not just here
# but throughout this file.
if urls is not None and url not in urls:
continue
if not update_cache and is_url_in_cache(url, pkgname=pkgname):
continue
f_temp_name = os.path.join(d, str(i))
with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp:
block = f_zip.read(conf.download_block_size)
while block:
f_temp.write(block)
block = f_zip.read(conf.download_block_size)
import_file_to_cache(
url, f_temp_name, remove_original=True, pkgname=pkgname
)
|
db8e0dc6d22499c5f5bb56139e3be2a8a3f2a6654d3042c1c0efd13744928c98 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A "grab bag" of relatively small general-purpose utilities that don't have
a clear module/package to live in.
"""
import contextlib
import difflib
import inspect
import json
import locale
import os
import re
import signal
import sys
import threading
import traceback
import unicodedata
from contextlib import contextmanager
from astropy.utils import deprecated
__all__ = [
"isiterable",
"silence",
"format_exception",
"NumpyRNGContext",
"find_api_page",
"is_path_hidden",
"walk_skip_hidden",
"JsonCustomEncoder",
"indent",
"dtype_bytes_or_chars",
]
NOT_OVERWRITING_MSG = (
"File {} already exists. If you mean to replace it "
'then use the argument "overwrite=True".'
)
# A useful regex for tests.
_NOT_OVERWRITING_MSG_MATCH = (
r"File .* already exists\. If you mean to "
r"replace it then use the argument "
r'"overwrite=True"\.'
)
def isiterable(obj):
"""Returns `True` if the given object is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = "\n".join(" " * (width * shift) + l if l else "" for l in s.splitlines())
if s[-1] == "\n":
indented += "\n"
return indented
class _DummyFile:
"""A noop writeable object."""
def write(self, s):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
yield
sys.stdout = old_stdout
sys.stderr = old_stderr
def format_exception(msg, *args, **kwargs):
"""Fill in information about the exception that occurred.
Given an exception message string, uses new-style formatting arguments
``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in
information about the exception that occurred. For example:
try:
1/0
except:
raise ZeroDivisionError(
format_except('A divide by zero occurred in {filename} at '
'line {lineno} of function {func}.'))
Any additional positional or keyword arguments passed to this function are
also used to format the message.
.. note::
This uses `sys.exc_info` to gather up the information needed to fill
in the formatting arguments. Since `sys.exc_info` is not carried
outside a handled exception, it's not wise to use this
outside of an ``except`` clause - if it is, this will substitute
'<unknown>' for the 4 formatting arguments.
"""
tb = traceback.extract_tb(sys.exc_info()[2], limit=1)
if len(tb) > 0:
filename, lineno, func, text = tb[0]
else:
filename = lineno = func = text = "<unknown>"
return msg.format(
*args, filename=filename, lineno=lineno, func=func, text=text, **kwargs
)
class NumpyRNGContext:
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
from numpy import random
self.startstate = random.get_state()
random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
from numpy import random
random.set_state(self.startstate)
def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
from zlib import decompress
from astropy.utils.data import get_readable_fileobj
if (
not isinstance(obj, str)
and hasattr(obj, "__module__")
and hasattr(obj, "__name__")
):
obj = obj.__module__ + "." + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from astropy import version
if version.release:
version = "v" + version.version
else:
version = "dev"
if "://" in version:
if version.endswith("index.html"):
baseurl = version[:-10]
elif version.endswith("/"):
baseurl = version
else:
baseurl = version + "/"
elif version == "dev" or version == "latest":
baseurl = "http://devdocs.astropy.org/"
else:
baseurl = f"https://docs.astropy.org/en/{version}/"
# Custom request headers; see
# https://github.com/astropy/astropy/issues/8990
url = baseurl + "objects.inv"
headers = {"User-Agent": f"Astropy/{version}"}
with get_readable_fileobj(
url, encoding="binary", remote_timeout=timeout, http_headers=headers
) as uf:
oiread = uf.read()
# need to first read/remove the first four lines, which have info before
# the compressed section with the actual object inventory
idx = -1
headerlines = []
for _ in range(4):
oldidx = idx
idx = oiread.index(b"\n", oldidx + 1)
headerlines.append(oiread[(oldidx + 1) : idx].decode("utf-8"))
# intersphinx version line, project name, and project version
ivers, proj, vers, compr = headerlines
if "The remainder of this file is compressed using zlib" not in compr:
raise ValueError(
f"The file downloaded from {baseurl}objects.inv does not seem to be"
"the usual Sphinx objects.inv format. Maybe it "
"has changed?"
)
compressed = oiread[(idx + 1) :]
decompressed = decompress(compressed).decode("utf-8")
resurl = None
for l in decompressed.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith("$"):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError(f"Could not find the docs for the object {obj}")
elif openinbrowser:
webbrowser.open(resurl)
return resurl
def signal_number_to_name(signum):
"""
Given an OS signal number, returns a signal name. If the signal
number is unknown, returns ``'UNKNOWN'``.
"""
# Since these numbers and names are platform specific, we use the
# builtin signal module and build a reverse mapping.
signal_to_name_map = {
k: v for v, k in signal.__dict__.items() if v.startswith("SIG")
}
return signal_to_name_map.get(signum, "UNKNOWN")
# _has_hidden_attribute() can be deleted together with deprecated is_path_hidden() and
# walk_skip_hidden().
if sys.platform == "win32":
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
https://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection.
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
result = bool(attrs & 2) and attrs != -1
except AttributeError:
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
@deprecated(since="6.0")
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b".")
else:
is_dotted = name.startswith(".")
return is_dotted or _has_hidden_attribute(filepath)
@deprecated(since="6.0")
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter ``topdown`` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See Also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror, followlinks=followlinks
):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
class JsonCustomEncoder(json.JSONEncoder):
"""Support for data types that JSON default encoder
does not do.
This includes:
* Numpy array or number
* Complex number
* Set
* Bytes
* astropy.UnitBase
* astropy.Quantity
Examples
--------
>>> import json
>>> import numpy as np
>>> from astropy.utils.misc import JsonCustomEncoder
>>> json.dumps(np.arange(3), cls=JsonCustomEncoder)
'[0, 1, 2]'
"""
def default(self, obj):
import numpy as np
from astropy import units as u
if isinstance(obj, u.Quantity):
return dict(value=obj.value, unit=obj.unit.to_string())
if isinstance(obj, (np.number, np.ndarray)):
return obj.tolist()
elif isinstance(obj, complex):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)):
if obj == u.dimensionless_unscaled:
obj = "dimensionless_unit"
else:
return obj.to_string()
return json.JSONEncoder.default(self, obj)
def strip_accents(s):
"""
Remove accents from a Unicode string.
This helps with matching "ångström" to "angstrom", for example.
"""
return "".join(
c for c in unicodedata.normalize("NFD", s) if unicodedata.category(c) != "Mn"
)
def did_you_mean(s, candidates, n=3, cutoff=0.8, fix=None):
"""
When a string isn't found in a set of candidates, we can be nice
to provide a list of alternatives in the exception. This
convenience function helps to format that part of the exception.
Parameters
----------
s : str
candidates : sequence of str or dict of str keys
n : int
The maximum number of results to include. See
`difflib.get_close_matches`.
cutoff : float
In the range [0, 1]. Possibilities that don't score at least
that similar to word are ignored. See
`difflib.get_close_matches`.
fix : callable
A callable to modify the results after matching. It should
take a single string and return a sequence of strings
containing the fixed matches.
Returns
-------
message : str
Returns the string "Did you mean X, Y, or Z?", or the empty
string if no alternatives were found.
"""
if isinstance(s, str):
s = strip_accents(s)
s_lower = s.lower()
# Create a mapping from the lower case name to all capitalization
# variants of that name.
candidates_lower = {}
for candidate in candidates:
candidate_lower = candidate.lower()
candidates_lower.setdefault(candidate_lower, [])
candidates_lower[candidate_lower].append(candidate)
# The heuristic here is to first try "singularizing" the word. If
# that doesn't match anything use difflib to find close matches in
# original, lower and upper case.
if s_lower.endswith("s") and s_lower[:-1] in candidates_lower:
matches = [s_lower[:-1]]
else:
matches = difflib.get_close_matches(
s_lower, candidates_lower, n=n, cutoff=cutoff
)
if len(matches):
capitalized_matches = set()
for match in matches:
capitalized_matches.update(candidates_lower[match])
matches = capitalized_matches
if fix is not None:
mapped_matches = []
for match in matches:
mapped_matches.extend(fix(match))
matches = mapped_matches
matches = list(set(matches))
matches = sorted(matches)
if len(matches) == 1:
matches = matches[0]
else:
matches = ", ".join(matches[:-1]) + " or " + matches[-1]
return f"Did you mean {matches}?"
return ""
LOCALE_LOCK = threading.Lock()
@contextmanager
def _set_locale(name):
"""
Context manager to temporarily set the locale to ``name``.
An example is setting locale to "C" so that the C strtod()
function will use "." as the decimal point to enable consistent
numerical string parsing.
Note that one cannot nest multiple _set_locale() context manager
statements as this causes a threading lock.
This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale.
Parameters
----------
name : str
Locale name, e.g. "C" or "fr_FR".
"""
name = str(name)
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
if saved == name:
# Don't do anything if locale is already the requested locale
yield
else:
try:
locale.setlocale(locale.LC_ALL, name)
yield
finally:
locale.setlocale(locale.LC_ALL, saved)
def dtype_bytes_or_chars(dtype):
"""
Parse the number out of a dtype.str value like '<U5' or '<f8'.
See #5819 for discussion on the need for this function for getting
the number of characters corresponding to a string dtype.
Parameters
----------
dtype : numpy dtype object
Input dtype
Returns
-------
bytes_or_chars : int or None
Bits (for numeric types) or characters (for string types)
"""
match = re.search(r"(\d+)$", dtype.str)
out = int(match.group(1)) if match else None
return out
def _hungry_for(option): # pragma: no cover
"""
Open browser loaded with ``option`` options near you.
*Disclaimers: Payments not included. Astropy is not
responsible for any liability from using this function.*
.. note:: Accuracy depends on your browser settings.
"""
import webbrowser
webbrowser.open(f"https://www.google.com/search?q={option}+near+me")
def pizza(): # pragma: no cover
"""``/pizza``."""
_hungry_for("pizza")
def coffee(is_adam=False, is_brigitta=False): # pragma: no cover
"""``/coffee``."""
if is_adam and is_brigitta:
raise ValueError("There can be only one!")
if is_adam:
option = "fresh+third+wave+coffee"
elif is_brigitta:
option = "decent+espresso"
else:
option = "coffee"
_hungry_for(option)
|
0d1e2d2c9e2c985cc8eddc06398964afa8e45e241cd6c7293859922866c90c02 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import weakref
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
# from astropy.utils.compat import ignored
from astropy import log
from astropy.units import Quantity, Unit, UnitConversionError
__all__ = [
"MissingDataAssociationException",
"IncompatibleUncertaintiesException",
"NDUncertainty",
"StdDevUncertainty",
"UnknownUncertainty",
"VarianceUncertainty",
"InverseVariance",
]
# mapping from collapsing operations to the complementary methods used for `to_variance`
collapse_to_variance_mapping = {
np.sum: np.square,
np.mean: np.square,
}
def _move_preserved_axes_first(arr, preserve_axes):
# When collapsing an ND array and preserving M axes, move the
# preserved axes to the first M axes of the output. For example,
# if arr.shape == (6, 5, 4, 3, 2) and we're preserving axes (1, 2),
# then the output should have shape (20, 6, 3, 2). Axes 1 and 2 have
# shape 5 and 4, so we take their product and put them both in the zeroth
# axis.
zeroth_axis_after_reshape = np.prod(np.array(arr.shape)[list(preserve_axes)])
collapse_axes = [i for i in range(arr.ndim) if i not in preserve_axes]
return arr.reshape(
[zeroth_axis_after_reshape] + np.array(arr.shape)[collapse_axes].tolist()
)
def _unravel_preserved_axes(arr, collapsed_arr, preserve_axes):
# After reshaping an array with _move_preserved_axes_first and collapsing
# the result, convert the reshaped first axis back into the shape of each
# of the original preserved axes.
# For example, if arr.shape == (6, 5, 4, 3, 2) and we're preserving axes (1, 2),
# then the output of _move_preserved_axes_first should have shape (20, 6, 3, 2).
# This method unravels the first axis in the output *after* a collapse, so the
# output with shape (20,) becomes shape (5, 4).
if collapsed_arr.ndim != len(preserve_axes):
arr_shape = np.array(arr.shape)
return collapsed_arr.reshape(arr_shape[np.asarray(preserve_axes)])
return collapsed_arr
def from_variance_for_mean(x, axis):
if axis is None:
# do operation on all dimensions:
denom = np.ma.count(x)
else:
denom = np.ma.count(x, axis)
return np.sqrt(np.ma.sum(x, axis)) / denom
# mapping from collapsing operations to the complementary methods used for `from_variance`
collapse_from_variance_mapping = {
np.sum: lambda x, axis: np.sqrt(np.ma.sum(x, axis)),
np.mean: from_variance_for_mean,
np.median: None,
}
class IncompatibleUncertaintiesException(Exception):
"""This exception should be used to indicate cases in which uncertainties
with two different classes can not be propagated.
"""
class MissingDataAssociationException(Exception):
"""This exception should be used to indicate that an uncertainty instance
has not been associated with a parent `~astropy.nddata.NDData` object.
"""
class NDUncertainty(metaclass=ABCMeta):
"""This is the metaclass for uncertainty classes used with `NDData`.
Parameters
----------
array : any type, optional
The array or value (the parameter name is due to historical reasons) of
the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or
`NDUncertainty` subclasses are recommended.
If the `array` is `list`-like or `numpy.ndarray`-like it will be cast
to a plain `numpy.ndarray`.
Default is ``None``.
unit : unit-like, optional
Unit for the uncertainty ``array``. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the `array` as a copy. ``True`` copies it
before saving, while ``False`` tries to save every parameter as
reference. Note however that it is not always possible to save the
input as reference.
Default is ``True``.
Raises
------
IncompatibleUncertaintiesException
If given another `NDUncertainty`-like class as ``array`` if their
``uncertainty_type`` is different.
"""
def __init__(self, array=None, copy=True, unit=None):
if isinstance(array, NDUncertainty):
# Given an NDUncertainty class or subclass check that the type
# is the same.
if array.uncertainty_type != self.uncertainty_type:
raise IncompatibleUncertaintiesException
# Check if two units are given and take the explicit one then.
if unit is not None and unit != array._unit:
# TODO : Clarify it (see NDData.init for same problem)?
log.info("overwriting Uncertainty's current unit with specified unit.")
elif array._unit is not None:
unit = array.unit
array = array.array
elif isinstance(array, Quantity):
# Check if two units are given and take the explicit one then.
if unit is not None and array.unit is not None and unit != array.unit:
log.info("overwriting Quantity's current unit with specified unit.")
elif array.unit is not None:
unit = array.unit
array = array.value
if unit is None:
self._unit = None
else:
self._unit = Unit(unit)
if copy:
array = deepcopy(array)
unit = deepcopy(unit)
self.array = array
self.parent_nddata = None # no associated NDData - until it is set!
@property
@abstractmethod
def uncertainty_type(self):
"""`str` : Short description of the type of uncertainty.
Defined as abstract property so subclasses *have* to override this.
"""
return None
@property
def supports_correlated(self):
"""`bool` : Supports uncertainty propagation with correlated uncertainties?
.. versionadded:: 1.2
"""
return False
@property
def array(self):
"""`numpy.ndarray` : the uncertainty's value."""
return self._array
@array.setter
def array(self, value):
if isinstance(value, (list, np.ndarray)):
value = np.array(value, subok=False, copy=False)
self._array = value
@property
def unit(self):
"""`~astropy.units.Unit` : The unit of the uncertainty, if any."""
return self._unit
@unit.setter
def unit(self, value):
"""
The unit should be set to a value consistent with the parent NDData
unit and the uncertainty type.
"""
if value is not None:
# Check the hidden attribute below, not the property. The property
# raises an exception if there is no parent_nddata.
if self._parent_nddata is not None:
parent_unit = self.parent_nddata.unit
try:
# Check for consistency with the unit of the parent_nddata
self._data_unit_to_uncertainty_unit(parent_unit).to(value)
except UnitConversionError:
raise UnitConversionError(
"Unit {} is incompatible with unit {} of parent nddata".format(
value, parent_unit
)
)
self._unit = Unit(value)
else:
self._unit = value
@property
def quantity(self):
"""
This uncertainty as an `~astropy.units.Quantity` object.
"""
return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype)
@property
def parent_nddata(self):
"""`NDData` : reference to `NDData` instance with this uncertainty.
In case the reference is not set uncertainty propagation will not be
possible since propagation might need the uncertain data besides the
uncertainty.
"""
no_parent_message = "uncertainty is not associated with an NDData object"
parent_lost_message = (
"the associated NDData object was deleted and cannot be accessed "
"anymore. You can prevent the NDData object from being deleted by "
"assigning it to a variable. If this happened after unpickling "
"make sure you pickle the parent not the uncertainty directly."
)
try:
parent = self._parent_nddata
except AttributeError:
raise MissingDataAssociationException(no_parent_message)
else:
if parent is None:
raise MissingDataAssociationException(no_parent_message)
else:
# The NDData is saved as weak reference so we must call it
# to get the object the reference points to. However because
# we have a weak reference here it's possible that the parent
# was deleted because its reference count dropped to zero.
if isinstance(self._parent_nddata, weakref.ref):
resolved_parent = self._parent_nddata()
if resolved_parent is None:
log.info(parent_lost_message)
return resolved_parent
else:
log.info("parent_nddata should be a weakref to an NDData object.")
return self._parent_nddata
@parent_nddata.setter
def parent_nddata(self, value):
if value is not None and not isinstance(value, weakref.ref):
# Save a weak reference on the uncertainty that points to this
# instance of NDData. Direct references should NOT be used:
# https://github.com/astropy/astropy/pull/4799#discussion_r61236832
value = weakref.ref(value)
# Set _parent_nddata here and access below with the property because value
# is a weakref
self._parent_nddata = value
# set uncertainty unit to that of the parent if it was not already set, unless initializing
# with empty parent (Value=None)
if value is not None:
parent_unit = self.parent_nddata.unit
# this will get the unit for masked quantity input:
parent_data_unit = getattr(self.parent_nddata.data, "unit", None)
if parent_unit is None and parent_data_unit is None:
self.unit = None
elif self.unit is None and parent_unit is not None:
# Set the uncertainty's unit to the appropriate value
self.unit = self._data_unit_to_uncertainty_unit(parent_unit)
elif parent_data_unit is not None:
# if the parent_nddata object has a unit, use it:
self.unit = self._data_unit_to_uncertainty_unit(parent_data_unit)
else:
# Check that units of uncertainty are compatible with those of
# the parent. If they are, no need to change units of the
# uncertainty or the data. If they are not, let the user know.
unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit)
try:
unit_from_data.to(self.unit)
except UnitConversionError:
raise UnitConversionError(
"Unit {} of uncertainty "
"incompatible with unit {} of "
"data".format(self.unit, parent_unit)
)
@abstractmethod
def _data_unit_to_uncertainty_unit(self, value):
"""
Subclasses must override this property. It should take in a data unit
and return the correct unit for the uncertainty given the uncertainty
type.
"""
return None
def __repr__(self):
prefix = self.__class__.__name__ + "("
try:
body = np.array2string(self.array, separator=", ", prefix=prefix)
except AttributeError:
# In case it wasn't possible to use array2string
body = str(self.array)
return f"{prefix}{body})"
def __getstate__(self):
# Because of the weak reference the class wouldn't be picklable.
try:
return self._array, self._unit, self.parent_nddata
except MissingDataAssociationException:
# In case there's no parent
return self._array, self._unit, None
def __setstate__(self, state):
if len(state) != 3:
raise TypeError("The state should contain 3 items.")
self._array = state[0]
self._unit = state[1]
parent = state[2]
if parent is not None:
parent = weakref.ref(parent)
self._parent_nddata = parent
def __getitem__(self, item):
"""Normal slicing on the array, keep the unit and return a reference."""
return self.__class__(self.array[item], unit=self.unit, copy=False)
def propagate(self, operation, other_nddata, result_data, correlation, axis=None):
"""Calculate the resulting uncertainty given an operation on the data.
.. versionadded:: 1.2
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide` (or `numpy.divide`).
other_nddata : `NDData` instance
The second operand in the arithmetic operation.
result_data : `~astropy.units.Quantity` or ndarray
The result of the arithmetic operations on the data.
correlation : `numpy.ndarray` or number
The correlation (rho) is defined between the uncertainties in
sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means
uncorrelated operands.
axis : int or tuple of ints, optional
Axis over which to perform a collapsing operation.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
Another instance of the same `NDUncertainty` subclass containing
the uncertainty of the result.
Raises
------
ValueError
If the ``operation`` is not supported or if correlation is not zero
but the subclass does not support correlated uncertainties.
Notes
-----
First this method checks if a correlation is given and the subclass
implements propagation with correlated uncertainties.
Then the second uncertainty is converted (or an Exception is raised)
to the same class in order to do the propagation.
Then the appropriate propagation method is invoked and the result is
returned.
"""
# Check if the subclass supports correlation
if not self.supports_correlated:
if isinstance(correlation, np.ndarray) or correlation != 0:
raise ValueError(
"{} does not support uncertainty propagation"
" with correlation."
"".format(self.__class__.__name__)
)
if other_nddata is not None:
# Get the other uncertainty (and convert it to a matching one)
other_uncert = self._convert_uncertainty(other_nddata.uncertainty)
if operation.__name__ == "add":
result = self._propagate_add(other_uncert, result_data, correlation)
elif operation.__name__ == "subtract":
result = self._propagate_subtract(
other_uncert, result_data, correlation
)
elif operation.__name__ == "multiply":
result = self._propagate_multiply(
other_uncert, result_data, correlation
)
elif operation.__name__ in ["true_divide", "divide"]:
result = self._propagate_divide(other_uncert, result_data, correlation)
else:
raise ValueError(f"unsupported operation: {operation.__name__}")
else:
# assume this is a collapsing operation:
result = self._propagate_collapse(operation, axis)
return self.__class__(result, copy=False)
def _convert_uncertainty(self, other_uncert):
"""Checks if the uncertainties are compatible for propagation.
Checks if the other uncertainty is `NDUncertainty`-like and if so
verify that the uncertainty_type is equal. If the latter is not the
case try returning ``self.__class__(other_uncert)``.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The other uncertainty.
Returns
-------
other_uncert : `NDUncertainty` subclass
but converted to a compatible `NDUncertainty` subclass if
possible and necessary.
Raises
------
IncompatibleUncertaintiesException:
If the other uncertainty cannot be converted to a compatible
`NDUncertainty` subclass.
"""
if isinstance(other_uncert, NDUncertainty):
if self.uncertainty_type == other_uncert.uncertainty_type:
return other_uncert
else:
return self.__class__(other_uncert)
else:
raise IncompatibleUncertaintiesException
@abstractmethod
def _propagate_add(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
def represent_as(self, other_uncert):
"""Convert this uncertainty to a different uncertainty type.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The `NDUncertainty` subclass to convert to.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
An instance of ``other_uncert`` subclass containing the uncertainty
converted to the new uncertainty type.
Raises
------
TypeError
If either the initial or final subclasses do not support
conversion, a `TypeError` is raised.
"""
as_variance = getattr(self, "_convert_to_variance", None)
if as_variance is None:
raise TypeError(
f"{type(self)} does not support conversion to another uncertainty type."
)
from_variance = getattr(other_uncert, "_convert_from_variance", None)
if from_variance is None:
raise TypeError(
f"{other_uncert.__name__} does not support conversion from "
"another uncertainty type."
)
return from_variance(as_variance())
class UnknownUncertainty(NDUncertainty):
"""This class implements any unknown uncertainty type.
The main purpose of having an unknown uncertainty class is to prevent
uncertainty propagation.
Parameters
----------
args, kwargs :
see `NDUncertainty`
"""
@property
def supports_correlated(self):
"""`False` : Uncertainty propagation is *not* possible for this class."""
return False
@property
def uncertainty_type(self):
"""``"unknown"`` : `UnknownUncertainty` implements any unknown \
uncertainty type.
"""
return "unknown"
def _data_unit_to_uncertainty_unit(self, value):
"""
No way to convert if uncertainty is unknown.
"""
return None
def _convert_uncertainty(self, other_uncert):
"""Raise an Exception because unknown uncertainty types cannot
implement propagation.
"""
msg = "Uncertainties of unknown type cannot be propagated."
raise IncompatibleUncertaintiesException(msg)
def _propagate_add(self, other_uncert, result_data, correlation):
"""Not possible for unknown uncertainty types."""
return None
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
class _VariancePropagationMixin:
"""
Propagation of uncertainties for variances, also used to perform error
propagation for variance-like uncertainties (standard deviation and inverse
variance).
"""
def _propagate_collapse(self, numpy_op, axis=None):
"""
Error propagation for collapse operations on variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
numpy_op : function
Numpy operation like `np.sum` or `np.max` to use in the collapse
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
axis : tuple, optional
Axis on which to compute collapsing operations.
"""
try:
result_unit_sq = self.parent_nddata.unit**2
except (AttributeError, TypeError):
result_unit_sq = None
if self.array is not None:
# Formula: sigma**2 = dA
if numpy_op in [np.min, np.max]:
# Find the indices of the min/max in parent data along each axis,
# return the uncertainty at the corresponding entry:
return self._get_err_at_extremum(numpy_op, axis=axis)
# np.sum and np.mean operations use similar pattern
# to `_propagate_add_sub`, for example:
else:
# lookup the mapping for to_variance and from_variance for this
# numpy operation:
to_variance = collapse_to_variance_mapping[numpy_op]
from_variance = collapse_from_variance_mapping[numpy_op]
masked_uncertainty = np.ma.masked_array(
self.array, self.parent_nddata.mask
)
if (
self.unit is not None
and to_variance(self.unit) != self.parent_nddata.unit**2
):
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = (
to_variance(masked_uncertainty << self.unit)
.to(result_unit_sq)
.value
)
else:
this = to_variance(masked_uncertainty)
return from_variance(this, axis=axis)
def _get_err_at_extremum(self, extremum, axis):
"""
Return the value of the ``uncertainty`` array at the indices
which satisfy the ``extremum`` function applied to the ``measurement`` array,
where we expect ``extremum`` to be np.argmax or np.argmin, and
we expect a two-dimensional output.
Assumes the ``measurement`` and ``uncertainty`` array dimensions
are ordered such that the zeroth dimension is the one to preserve.
For example, if you start with array with shape (a, b, c), this
function applies the ``extremum`` function to the last two dimensions,
with shapes b and c.
This operation is difficult to cast in a vectorized way. Here
we implement it with a list comprehension, which is likely not the
most performant solution.
"""
if axis is not None and not hasattr(axis, "__len__"):
# this is a single axis:
axis = [axis]
if extremum is np.min:
arg_extremum = np.ma.argmin
elif extremum is np.max:
arg_extremum = np.ma.argmax
all_axes = np.arange(self.array.ndim)
if axis is None:
# collapse over all dimensions
ind = arg_extremum(np.asanyarray(self.parent_nddata).ravel())
return self.array.ravel()[ind]
# collapse an ND array over arbitrary dimensions:
preserve_axes = [ax for ax in all_axes if ax not in axis]
meas = np.ma.masked_array(
_move_preserved_axes_first(self.parent_nddata.data, preserve_axes),
_move_preserved_axes_first(self.parent_nddata.mask, preserve_axes),
)
err = _move_preserved_axes_first(self.array, preserve_axes)
result = np.array(
[e[np.unravel_index(arg_extremum(m), m.shape)] for m, e in zip(meas, err)]
)
return _unravel_preserved_axes(
self.parent_nddata.data,
result,
preserve_axes,
)
def _propagate_add_sub(
self,
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for addition or subtraction of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
if subtract:
correlation_sign = -1
else:
correlation_sign = 1
try:
result_unit_sq = result_data.unit**2
except AttributeError:
result_unit_sq = None
if other_uncert.array is not None:
# Formula: sigma**2 = dB
if other_uncert.unit is not None and result_unit_sq != to_variance(
other_uncert.unit
):
# If the other uncertainty has a unit and this unit differs
# from the unit of the result convert it to the results unit
other = (
to_variance(other_uncert.array << other_uncert.unit)
.to(result_unit_sq)
.value
)
else:
other = to_variance(other_uncert.array)
else:
other = 0
if self.array is not None:
# Formula: sigma**2 = dA
if (
self.unit is not None
and to_variance(self.unit) != self.parent_nddata.unit**2
):
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = to_variance(self.array << self.unit).to(result_unit_sq).value
else:
this = to_variance(self.array)
else:
this = 0
# Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB)
# Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB)
# (sign depends on whether addition or subtraction)
# Determine the result depending on the correlation
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = 2 * correlation * np.sqrt(this * other)
result = this + other + correlation_sign * corr
else:
result = this + other
return from_variance(result)
def _propagate_multiply_divide(
self,
other_uncert,
result_data,
correlation,
divide=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for multiplication or division of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
divide : bool, optional
If ``True``, propagate for division, otherwise propagate for
multiplication.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
# For multiplication we don't need the result as quantity
if isinstance(result_data, Quantity):
result_data = result_data.value
if divide:
correlation_sign = -1
else:
correlation_sign = 1
if other_uncert.array is not None:
# We want the result to have a unit consistent with the parent, so
# we only need to convert the unit of the other uncertainty if it
# is different from its data's unit.
if (
other_uncert.unit
and to_variance(1 * other_uncert.unit)
!= ((1 * other_uncert.parent_nddata.unit) ** 2).unit
):
d_b = (
to_variance(other_uncert.array << other_uncert.unit)
.to((1 * other_uncert.parent_nddata.unit) ** 2)
.value
)
else:
d_b = to_variance(other_uncert.array)
# Formula: sigma**2 = |A|**2 * d_b
right = np.abs(self.parent_nddata.data**2 * d_b)
else:
right = 0
if self.array is not None:
# Just the reversed case
if (
self.unit
and to_variance(1 * self.unit)
!= ((1 * self.parent_nddata.unit) ** 2).unit
):
d_a = (
to_variance(self.array << self.unit)
.to((1 * self.parent_nddata.unit) ** 2)
.value
)
else:
d_a = to_variance(self.array)
# Formula: sigma**2 = |B|**2 * d_a
left = np.abs(other_uncert.parent_nddata.data**2 * d_a)
else:
left = 0
# Multiplication
#
# The fundamental formula is:
# sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# This formula is not very handy since it generates NaNs for every
# zero in A and B. So we rewrite it:
#
# Multiplication Formula:
# sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB)))
# sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB)))
#
# Division
#
# The fundamental formula for division is:
# sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# As with multiplication, it is convenient to rewrite this to avoid
# nans where A is zero.
#
# Division formula (rewritten):
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2 * cor * A *sqrt(dAdB) / B**3
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B
# sigma**2 = multiplication formula/B**4 (and sign change in
# the correlation)
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = (
2
* correlation
* np.sqrt(d_a * d_b)
* self.parent_nddata.data
* other_uncert.parent_nddata.data
)
else:
corr = 0
if divide:
return from_variance(
(left + right + correlation_sign * corr)
/ other_uncert.parent_nddata.data**4
)
else:
return from_variance(left + right + correlation_sign * corr)
class StdDevUncertainty(_VariancePropagationMixin, NDUncertainty):
"""Standard deviation uncertainty assuming first order gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `StdDevUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will have the same unit as the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
`StdDevUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, StdDevUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.1, 0.1, 0.1])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.2])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 2
>>> ndd.uncertainty
StdDevUncertainty(2)
.. note::
The unit will not be displayed.
"""
@property
def supports_correlated(self):
"""`True` : `StdDevUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
@property
def uncertainty_type(self):
"""``"std"`` : `StdDevUncertainty` implements standard deviation."""
return "std"
def _convert_uncertainty(self, other_uncert):
if isinstance(other_uncert, StdDevUncertainty):
return other_uncert
else:
raise IncompatibleUncertaintiesException
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=True,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=False,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=True,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_collapse(self, numpy_operation, axis):
# defer to _VariancePropagationMixin
return super()._propagate_collapse(numpy_operation, axis=axis)
def _data_unit_to_uncertainty_unit(self, value):
return value
def _convert_to_variance(self):
new_array = None if self.array is None else self.array**2
new_unit = None if self.unit is None else self.unit**2
return VarianceUncertainty(new_array, unit=new_unit)
@classmethod
def _convert_from_variance(cls, var_uncert):
new_array = None if var_uncert.array is None else var_uncert.array ** (1 / 2)
new_unit = None if var_uncert.unit is None else var_uncert.unit ** (1 / 2)
return cls(new_array, unit=new_unit)
class VarianceUncertainty(_VariancePropagationMixin, NDUncertainty):
"""
Variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `VarianceUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will be the square of the unit of the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`VarianceUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, VarianceUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=VarianceUncertainty([0.01, 0.01, 0.01]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.01, 0.01, 0.01])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = VarianceUncertainty([0.04], unit='m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.04])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 4
>>> ndd.uncertainty
VarianceUncertainty(4)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"var"`` : `VarianceUncertainty` implements variance."""
return "var"
@property
def supports_correlated(self):
"""`True` : `VarianceUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert, result_data, correlation, subtract=False
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert, result_data, correlation, subtract=True
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert, result_data, correlation, divide=False
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert, result_data, correlation, divide=True
)
def _data_unit_to_uncertainty_unit(self, value):
return value**2
def _convert_to_variance(self):
return self
@classmethod
def _convert_from_variance(cls, var_uncert):
return var_uncert
def _inverse(x):
"""Just a simple inverse for use in the InverseVariance."""
return 1 / x
class InverseVariance(_VariancePropagationMixin, NDUncertainty):
"""
Inverse variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `InverseVariance`. The class can handle if the uncertainty has a unit
that differs from (but is convertible to) the parents `NDData` unit. The
unit of the resulting uncertainty will the inverse square of the unit of
the resulting data. Also support for correlation is possible but requires
the correlation as input. It cannot handle correlation determination
itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`InverseVariance` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, InverseVariance
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=InverseVariance([100, 100, 100]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([100, 100, 100])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = InverseVariance([25], unit='1/m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([25])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 0.25
>>> ndd.uncertainty
InverseVariance(0.25)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"ivar"`` : `InverseVariance` implements inverse variance."""
return "ivar"
@property
def supports_correlated(self):
"""`True` : `InverseVariance` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=True,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=False,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=True,
to_variance=_inverse,
from_variance=_inverse,
)
def _data_unit_to_uncertainty_unit(self, value):
return 1 / value**2
def _convert_to_variance(self):
new_array = None if self.array is None else 1 / self.array
new_unit = None if self.unit is None else 1 / self.unit
return VarianceUncertainty(new_array, unit=new_unit)
@classmethod
def _convert_from_variance(cls, var_uncert):
new_array = None if var_uncert.array is None else 1 / var_uncert.array
new_unit = None if var_uncert.unit is None else 1 / var_uncert.unit
return cls(new_array, unit=new_unit)
|
7089e2af3df65fcd0ea4cd5a2105d4c716478b4e9ff1ee77195f503b76f90f98 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDData class.
from copy import deepcopy
import numpy as np
from astropy import log
from astropy.units import Quantity, Unit
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.metadata import MetaData
from astropy.wcs.wcsapi import SlicedLowLevelWCS # noqa: F401
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS, HighLevelWCSWrapper
from .nddata_base import NDDataBase
from .nduncertainty import NDUncertainty, UnknownUncertainty
__all__ = ["NDData"]
_meta_doc = """`dict`-like : Additional meta information about the dataset."""
class NDData(NDDataBase):
"""
A container for `numpy.ndarray`-based datasets, using the
`~astropy.nddata.NDDataBase` interface.
The key distinction from raw `numpy.ndarray` is the presence of
additional metadata such as uncertainty, mask, unit, a coordinate system
and/or a dictionary containing further meta information. This class *only*
provides a container for *storing* such datasets. For further functionality
take a look at the ``See also`` section.
See also: https://docs.astropy.org/en/stable/nddata/
Parameters
----------
data : `numpy.ndarray`-like or `NDData`-like
The dataset.
uncertainty : any type, optional
Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, for example ``"std"`` for standard deviation or
``"var"`` for variance. A metaclass defining such an interface is
`NDUncertainty` - but isn't mandatory. If the uncertainty has no such
attribute the uncertainty is stored as `UnknownUncertainty`.
Defaults to ``None``.
mask : any type, optional
Mask for the dataset. Masks should follow the ``numpy`` convention that
**valid** data points are marked by ``False`` and **invalid** ones with
``True``.
Defaults to ``None``.
wcs : any type, optional
World coordinate system (WCS) for the dataset.
Default is ``None``.
meta : `dict`-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty `collections.OrderedDict` is created.
Default is ``None``.
unit : unit-like, optional
Unit for the dataset. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the arguments as copy. ``True`` copies
every attribute before saving it while ``False`` tries to save every
parameter as reference.
Note however that it is not always possible to save the input as
reference.
Default is ``False``.
.. versionadded:: 1.2
psf : `numpy.ndarray` or None, optional
Image representation of the PSF. In order for convolution to be flux-
preserving, this should generally be normalized to sum to unity.
Raises
------
TypeError
In case ``data`` or ``meta`` don't meet the restrictions.
Notes
-----
Each attribute can be accessed through the homonymous instance attribute:
``data`` in a `NDData` object can be accessed through the `data`
attribute::
>>> from astropy.nddata import NDData
>>> nd = NDData([1,2,3])
>>> nd.data
array([1, 2, 3])
Given a conflicting implicit and an explicit parameter during
initialization, for example the ``data`` is a `~astropy.units.Quantity` and
the unit parameter is not ``None``, then the implicit parameter is replaced
(without conversion) by the explicit one and a warning is issued::
>>> import numpy as np
>>> import astropy.units as u
>>> q = np.array([1,2,3,4]) * u.m
>>> nd2 = NDData(q, unit=u.cm)
INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata]
>>> nd2.data # doctest: +FLOAT_CMP
array([100., 200., 300., 400.])
>>> nd2.unit
Unit("cm")
See Also
--------
NDDataRef
NDDataArray
"""
# Instead of a custom property use the MetaData descriptor also used for
# Tables. It will check if the meta is dict-like or raise an exception.
meta = MetaData(doc=_meta_doc, copy=False)
def __init__(
self,
data,
uncertainty=None,
mask=None,
wcs=None,
meta=None,
unit=None,
copy=False,
psf=None,
):
# Rather pointless since the NDDataBase does not implement any setting
# but before the NDDataBase did call the uncertainty
# setter. But if anyone wants to alter this behavior again the call
# to the superclass NDDataBase should be in here.
super().__init__()
# Check if data is any type from which to collect some implicitly
# passed parameters.
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# Of course we need to check the data because subclasses with other
# init-logic might be passed in here. We could skip these
# tests if we compared for self.__class__ but that has other
# drawbacks.
# Comparing if there is an explicit and an implicit unit parameter.
# If that is the case use the explicit one and issue a warning
# that there might be a conflict. In case there is no explicit
# unit just overwrite the unit parameter with the NDData.unit
# and proceed as if that one was given as parameter. Same for the
# other parameters.
if unit is None and data.unit is not None:
unit = data.unit
elif unit is not None and data.unit is not None:
log.info("overwriting NDData's current unit with specified unit.")
if uncertainty is not None and data.uncertainty is not None:
log.info(
"overwriting NDData's current "
"uncertainty with specified uncertainty."
)
elif data.uncertainty is not None:
uncertainty = data.uncertainty
if mask is not None and data.mask is not None:
log.info("overwriting NDData's current mask with specified mask.")
elif data.mask is not None:
mask = data.mask
if wcs is not None and data.wcs is not None:
log.info("overwriting NDData's current wcs with specified wcs.")
elif data.wcs is not None:
wcs = data.wcs
if psf is not None and data.psf is not None:
log.info("Overwriting NDData's current psf with specified psf.")
elif data.psf is not None:
psf = data.psf
if meta is not None and data.meta is not None:
log.info("overwriting NDData's current meta with specified meta.")
elif data.meta is not None:
meta = data.meta
# get the data attribute as it is, and continue to process it:
data = data.data
# if the data is wrapped by astropy.utils.masked.Masked:
if isinstance(data, Masked):
# first get the mask if one is available:
if hasattr(data, "mask"):
if mask is not None:
log.info(
"overwriting Masked Quantity's current mask with specified mask."
)
else:
mask = data.mask
if isinstance(data, MaskedNDArray):
if unit is not None and hasattr(data, "unit") and data.unit != unit:
log.info(
"overwriting MaskedNDArray's current unit with specified unit."
)
data = data.to(unit).value
elif unit is None and hasattr(data, "unit"):
unit = data.unit
data = data.value
# now get the unmasked ndarray:
data = np.asarray(data)
if isinstance(data, Quantity):
# this is a Quantity:
if unit is not None and data.unit != unit:
log.info("overwriting Quantity's current unit with specified unit.")
data = data.to(unit)
elif unit is None and data.unit is not None:
unit = data.unit
data = data.value
if isinstance(data, np.ma.masked_array):
if mask is not None:
log.info(
"overwriting masked ndarray's current mask with specified mask."
)
else:
mask = data.mask
data = data.data
if isinstance(data, Quantity):
# this is a Quantity:
if unit is not None and data.unit != unit:
log.info("overwriting Quantity's current unit with specified unit.")
data = data.to(unit)
elif unit is None and data.unit is not None:
unit = data.unit
data = data.value
if isinstance(data, np.ndarray):
# check for mask from np.ma.masked_ndarray
if hasattr(data, "mask"):
if mask is not None:
log.info(
"overwriting masked ndarray's current mask with specified mask."
)
else:
mask = data.mask
# Quick check on the parameters if they match the requirements.
if (
not hasattr(data, "shape")
or not hasattr(data, "__getitem__")
or not hasattr(data, "__array__")
):
# Data doesn't look like a numpy array, try converting it to
# one.
data = np.array(data, subok=True, copy=False)
# Another quick check to see if what we got looks like an array
# rather than an object (since numpy will convert a
# non-numerical/non-string inputs to an array of objects).
if data.dtype == "O":
raise TypeError("could not convert data to numpy array.")
if unit is not None:
unit = Unit(unit)
if copy:
# Data might have been copied before but no way of validating
# without another variable.
data = deepcopy(data)
mask = deepcopy(mask)
wcs = deepcopy(wcs)
psf = deepcopy(psf)
meta = deepcopy(meta)
uncertainty = deepcopy(uncertainty)
# Actually - copying the unit is unnecessary but better safe
# than sorry :-)
unit = deepcopy(unit)
# Store the attributes
self._data = data
self.mask = mask
self._wcs = None
if wcs is not None:
# Validate the wcs
self.wcs = wcs
self.meta = meta # TODO: Make this call the setter sometime
self._unit = unit
# Call the setter for uncertainty to further check the uncertainty
self.uncertainty = uncertainty
self.psf = psf
def __str__(self):
data = str(self.data)
unit = f" {self.unit}" if self.unit is not None else ""
return data + unit
def __repr__(self):
prefix = self.__class__.__name__ + "("
data = np.array2string(self.data, separator=", ", prefix=prefix)
unit = f", unit='{self.unit}'" if self.unit is not None else ""
return f"{prefix}{data}{unit})"
@property
def data(self):
"""
`~numpy.ndarray`-like : The stored dataset.
"""
return self._data
@property
def mask(self):
"""
any type : Mask for the dataset, if any.
Masks should follow the ``numpy`` convention that valid data points are
marked by ``False`` and invalid ones with ``True``.
"""
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
"""
`~astropy.units.Unit` : Unit for the dataset, if any.
"""
return self._unit
@property
def wcs(self):
"""
any type : A world coordinate system (WCS) for the dataset, if any.
"""
return self._wcs
@wcs.setter
def wcs(self, wcs):
if self._wcs is not None and wcs is not None:
raise ValueError(
"You can only set the wcs attribute with a WCS if no WCS is present."
)
if wcs is None or isinstance(wcs, BaseHighLevelWCS):
self._wcs = wcs
elif isinstance(wcs, BaseLowLevelWCS):
self._wcs = HighLevelWCSWrapper(wcs)
else:
raise TypeError(
"The wcs argument must implement either the high or low level WCS API."
)
@property
def psf(self):
return self._psf
@psf.setter
def psf(self, value):
self._psf = value
@property
def uncertainty(self):
"""
any type : Uncertainty in the dataset, if any.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``'std'`` for standard deviation or
``'var'`` for variance. A metaclass defining such an interface is
`~astropy.nddata.NDUncertainty` but isn't mandatory.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, "uncertainty_type"):
log.info("uncertainty should have attribute uncertainty_type.")
value = UnknownUncertainty(value, copy=False)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
value = value.__class__(value, copy=False)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = self
self._uncertainty = value
|
28dcb4e6d150498dfad7a5707aa2df09dbffc6fcb6ed6ce67fbf2b09d53e248f | """
A module that provides functions for manipulating bit masks and data quality
(DQ) arrays.
"""
import numbers
import warnings
from collections import OrderedDict
import numpy as np
__all__ = [
"bitfield_to_boolean_mask",
"interpret_bit_flags",
"BitFlagNameMap",
"extend_bit_flag_map",
"InvalidBitFlag",
]
_ENABLE_BITFLAG_CACHING = True
_MAX_UINT_TYPE = np.maximum_sctype(np.uint)
_SUPPORTED_FLAGS = int(np.bitwise_not(0, dtype=_MAX_UINT_TYPE, casting="unsafe"))
def _is_bit_flag(n):
"""
Verifies if the input number is a bit flag (i.e., an integer number that is
an integer power of 2).
Parameters
----------
n : int
A positive integer number. Non-positive integers are considered not to
be "flags".
Returns
-------
bool
``True`` if input ``n`` is a bit flag and ``False`` if it is not.
"""
if n < 1:
return False
return bin(n).count("1") == 1
def _is_int(n):
return (isinstance(n, numbers.Integral) and not isinstance(n, bool)) or (
isinstance(n, np.generic) and np.issubdtype(n, np.integer)
)
class InvalidBitFlag(ValueError):
"""Indicates that a value is not an integer that is a power of 2."""
pass
class BitFlag(int):
"""Bit flags: integer values that are powers of 2."""
def __new__(cls, val, doc=None):
if isinstance(val, tuple):
if doc is not None:
raise ValueError("Flag's doc string cannot be provided twice.")
val, doc = val
if not (_is_int(val) and _is_bit_flag(val)):
raise InvalidBitFlag(
"Value '{}' is not a valid bit flag: bit flag value must be "
"an integral power of two.".format(val)
)
s = int.__new__(cls, val)
if doc is not None:
s.__doc__ = doc
return s
class BitFlagNameMeta(type):
def __new__(mcls, name, bases, members):
for k, v in members.items():
if not k.startswith("_"):
v = BitFlag(v)
attr = [k for k in members.keys() if not k.startswith("_")]
attrl = list(map(str.lower, attr))
if _ENABLE_BITFLAG_CACHING:
cache = OrderedDict()
for b in bases:
for k, v in b.__dict__.items():
if k.startswith("_"):
continue
kl = k.lower()
if kl in attrl:
idx = attrl.index(kl)
raise AttributeError(
f"Bit flag '{attr[idx]:s}' was already defined."
)
if _ENABLE_BITFLAG_CACHING:
cache[kl] = v
members = {
k: v if k.startswith("_") else BitFlag(v) for k, v in members.items()
}
if _ENABLE_BITFLAG_CACHING:
cache.update(
{k.lower(): v for k, v in members.items() if not k.startswith("_")}
)
members = {"_locked": True, "__version__": "", **members, "_cache": cache}
else:
members = {"_locked": True, "__version__": "", **members}
return super().__new__(mcls, name, bases, members)
def __setattr__(cls, name, val):
if name == "_locked":
return super().__setattr__(name, True)
else:
if name == "__version__":
if cls._locked:
raise AttributeError("Version cannot be modified.")
return super().__setattr__(name, val)
err_msg = f"Bit flags are read-only. Unable to reassign attribute {name}"
if cls._locked:
raise AttributeError(err_msg)
namel = name.lower()
if _ENABLE_BITFLAG_CACHING:
if not namel.startswith("_") and namel in cls._cache:
raise AttributeError(err_msg)
else:
for b in cls.__bases__:
if not namel.startswith("_") and namel in list(
map(str.lower, b.__dict__)
):
raise AttributeError(err_msg)
if namel in list(map(str.lower, cls.__dict__)):
raise AttributeError(err_msg)
val = BitFlag(val)
if _ENABLE_BITFLAG_CACHING and not namel.startswith("_"):
cls._cache[namel] = val
return super().__setattr__(name, val)
def __getattr__(cls, name):
if _ENABLE_BITFLAG_CACHING:
flagnames = cls._cache
else:
flagnames = {k.lower(): v for k, v in cls.__dict__.items()}
flagnames.update(
{k.lower(): v for b in cls.__bases__ for k, v in b.__dict__.items()}
)
try:
return flagnames[name.lower()]
except KeyError:
raise AttributeError(f"Flag '{name}' not defined")
def __getitem__(cls, key):
return cls.__getattr__(key)
def __add__(cls, items):
if not isinstance(items, dict):
if not isinstance(items[0], (tuple, list)):
items = [items]
items = dict(items)
return extend_bit_flag_map(
cls.__name__ + "_" + "_".join(list(items)), cls, **items
)
def __iadd__(cls, other):
raise NotImplementedError(
"Unary '+' is not supported. Use binary operator instead."
)
def __delattr__(cls, name):
raise AttributeError(
f"{cls.__name__}: cannot delete {cls.mro()[-2].__name__} member."
)
def __delitem__(cls, name):
raise AttributeError(
f"{cls.__name__}: cannot delete {cls.mro()[-2].__name__} member."
)
def __repr__(cls):
return f"<{cls.mro()[-2].__name__} '{cls.__name__}'>"
class BitFlagNameMap(metaclass=BitFlagNameMeta):
"""
A base class for bit flag name maps used to describe data quality (DQ)
flags of images by provinding a mapping from a mnemonic flag name to a flag
value.
Mapping for a specific instrument should subclass this class.
Subclasses should define flags as class attributes with integer values
that are powers of 2. Each bit flag may also contain a string
comment following the flag value.
Examples
--------
>>> from astropy.nddata.bitmask import BitFlagNameMap
>>> class ST_DQ(BitFlagNameMap):
... __version__ = '1.0.0' # optional
... CR = 1, 'Cosmic Ray'
... CLOUDY = 4 # no docstring comment
... RAINY = 8, 'Dome closed'
...
>>> class ST_CAM1_DQ(ST_DQ):
... HOT = 16
... DEAD = 32
"""
pass
def extend_bit_flag_map(cls_name, base_cls=BitFlagNameMap, **kwargs):
"""
A convenience function for creating bit flags maps by subclassing an
existing map and adding additional flags supplied as keyword arguments.
Parameters
----------
cls_name : str
Class name of the bit flag map to be created.
base_cls : BitFlagNameMap, optional
Base class for the new bit flag map.
**kwargs : int
Each supplied keyword argument will be used to define bit flag
names in the new map. In addition to bit flag names, ``__version__`` is
allowed to indicate the version of the newly created map.
Examples
--------
>>> from astropy.nddata.bitmask import extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8)
>>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32)
>>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys
16
>>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes
16
"""
new_cls = BitFlagNameMeta.__new__(
BitFlagNameMeta, cls_name, (base_cls,), {"_locked": False}
)
for k, v in kwargs.items():
try:
setattr(new_cls, k, v)
except AttributeError as e:
if new_cls[k] != int(v):
raise e
new_cls._locked = True
return new_cls
def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None):
"""
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
string of comma-, ``'|'``-, or ``'+'``-separated list of flags),
the returned bit mask is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or
``'+'``-separated list of integer bit flags or mnemonic flag names,
or a Python list of integer bit flags. If ``bit_flags`` is a `str`
and if it is prepended with '~', then the output bit mask will have
its bits flipped (compared to simple sum of input flags).
For input ``bit_flags`` that is already a bit mask or a Python list
of bit flags, bit-flipping can be controlled through ``flip_bits``
parameter.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
.. note::
Only one flag separator is supported at a time. ``bit_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32)
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)',
... flag_name_map=ST_DQ))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return ~int(bit_flags) if flip_bits else int(bit_flags)
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, str):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ["", "NONE", "INDEF"]:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find("~")
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count("(")
nrpar = bit_flags.count(")")
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parentheses in bit flag list.")
lpar_pos = bit_flags.find("(")
rpar_pos = bit_flags.rfind(")")
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError(
"Incorrect syntax (incorrect use of parenthesis) in bit flag list."
)
bit_flags = bit_flags[1:-1].strip()
if sum(k in bit_flags for k in "+,|") > 1:
raise ValueError(
"Only one type of bit flag separator may be used in one "
"expression. Allowed separators are: '+', '|', or ','."
)
if "," in bit_flags:
bit_flags = bit_flags.split(",")
elif "+" in bit_flags:
bit_flags = bit_flags.split("+")
elif "|" in bit_flags:
bit_flags = bit_flags.split("|")
else:
if bit_flags == "":
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
if flag_name_map is not None:
try:
int(bit_flags[0])
except ValueError:
bit_flags = [flag_name_map[f] for f in bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, "__iter__"):
if not all(_is_int(flag) for flag in bit_flags):
if flag_name_map is not None and all(
isinstance(flag, str) for flag in bit_flags
):
bit_flags = [flag_name_map[f] for f in bit_flags]
else:
raise TypeError(
"Every bit flag in a list must be either an "
"integer flag value or a 'str' flag name."
)
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not _is_bit_flag(v) and not allow_non_flags:
raise ValueError(
f"Input list contains invalid (not powers of two) bit flag: {v}"
)
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
def bitfield_to_boolean_mask(
bitfield,
ignore_flags=0,
flip_bits=None,
good_mask_value=False,
dtype=np.bool_,
flag_name_map=None,
):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (default = 0)
An integer bit mask, `None`, a Python list of bit flags, a comma-,
or ``'|'``-separated, ``'+'``-separated string list of integer
bit flags or mnemonic flag names that indicate what bits in the input
``bitfield`` should be *ignored* (i.e., zeroed), or `None`.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
``'+'``(or ``'|'``)-separated list of integer bit flags that should
be added (bitwise OR) together to create an integer bit mask.
For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent
and indicate that bit flags 4 and 8 in the input ``bitfield``
array should be ignored when generating boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpreted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
Only one flag separator is supported at a time. ``ignore_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
mask : ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> flag_map = bitmask.extend_bit_flag_map(
... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32
... )
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(
ignore_flags, flip_bits=flip_bits, flag_name_map=flag_name_map
)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & _SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(
ignore_mask, dtype=bitfield.dtype.type, casting="unsafe"
)
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting="unsafe")
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
29a9bdb07488853df051bb92e8161161b8ebec927c1fcea1d2b0db9de2e516be | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import itertools
import numpy as np
from astropy import log
from astropy import units as u
from astropy.io import fits, registry
from astropy.utils.decorators import sharedmethod
from astropy.wcs import WCS
from .compat import NDDataArray
from .nduncertainty import (
InverseVariance,
NDUncertainty,
StdDevUncertainty,
VarianceUncertainty,
)
__all__ = ["CCDData", "fits_ccddata_reader", "fits_ccddata_writer"]
_known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance)
_unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties}
_unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties}
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarily disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand, operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`."
return sharedmethod(inner)
return decorator
def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit):
if uncertainty_type is StdDevUncertainty:
return unit == parent_unit
elif uncertainty_type is VarianceUncertainty:
return unit == (parent_unit**2)
elif uncertainty_type is InverseVariance:
return unit == (1 / (parent_unit**2))
raise ValueError(f"unsupported uncertainty type: {uncertainty_type}")
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
----------
data : `~astropy.nddata.CCDData`-like or array-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
psf : `numpy.ndarray` or None, optional
Image representation of the PSF at the center of this image. In order
for convolution to be flux-preserving, this should generally be
normalized to sum to unity.
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Attributes
----------
known_invalid_fits_unit_strings
A dictionary that maps commonly-used fits unit name strings that are
technically invalid to the correct valid unit type (or unit string).
This is primarily for variant names like "ELECTRONS/S" which are not
formally valid, but are unambiguous and frequently enough encountered
that it is convenient to map them to the correct unit.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if "meta" not in kwd:
kwd["meta"] = kwd.pop("header", None)
if "header" in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
if self._wcs is not None:
llwcs = self._wcs.low_level_wcs
if not isinstance(llwcs, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = llwcs
# Check if a unit is set. This can be temporarily disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
if value is not None and not isinstance(value, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def psf(self):
return self._psf
@psf.setter
def psf(self, value):
if value is not None and not isinstance(value, np.ndarray):
raise TypeError("The psf must be a numpy array.")
self._psf = value
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, "_parent_nddata", None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as data.")
self._uncertainty = StdDevUncertainty(value)
log.info(
"array provided for uncertainty; assuming it is a "
"StdDevUncertainty."
)
else:
raise TypeError(
"uncertainty must be an instance of a "
"NDUncertainty object or a numpy array."
)
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
if as_image_hdu:
hdus = [fits.ImageHDU(self.data, header)]
else:
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError(
f"only uncertainties of type {_known_uncertainties} can be saved."
)
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit
):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(
self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty
)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError(
"adding the flags to a HDU is not supported at this time."
)
if hdu_psf and self.psf is not None:
# The PSF is an image, so write it as a separate ImageHDU.
hdu_psf = fits.ImageHDU(self.psf, name=hdu_psf)
hdus.append(hdu_psf)
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta[f"HIERARCH {key.upper()}"] = (
short_name,
f"Shortened name for {key}",
)
self.meta[short_name] = value
else:
self.meta[key] = value
# A dictionary mapping "known" invalid fits unit
known_invalid_fits_unit_strings = {
"ELECTRONS/S": u.electron / u.s,
"ELECTRONS": u.electron,
"electrons": u.electron,
}
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = ["JD-OBS", "MJD-OBS", "DATE-OBS"]
_PCs = {"PC1_1", "PC1_2", "PC2_1", "PC2_2"}
_CDs = {"CD1_1", "CD1_2", "CD2_1", "CD2_2"}
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info(
"An exception happened while extracting WCS information from "
"the Header.\n{}: {}".format(type(exc).__name__, str(exc))
)
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = "{}_{}_{}"
polynomials = ["A", "B", "AP", "BP"]
for poly in polynomials:
order = wcs.sip.__getattribute__(f"{poly.lower()}_order")
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(
filename,
hdu=0,
unit=None,
hdu_uncertainty="UNCERT",
hdu_mask="MASK",
hdu_flags=None,
key_uncertainty_type="UTYPE",
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, str, tuple of (str, int), optional
Index or other identifier of the Header Data Unit of the FITS
file from which CCDData should be initialized. If zero and
no data in the primary HDU, it will search for the first
extension HDU with data. The header will be added to the primary HDU.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
hdu_psf : str or None, optional
FITS extension from which the psf image should be initialized. If the
extension does not exist the psf of the CCDData is `None`.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
"do_not_scale_image_data": "Image data must be scaled.",
"scale_back": "Scale information is not preserved.",
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f"unsupported keyword: {key}."
raise TypeError(f"{prefix} {msg}")
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, "None")
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError("loading flags is currently not supported.")
if hdu_psf is not None and hdu_psf in hdus:
psf = hdus[hdu_psf].data
else:
psf = None
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (
hdus.info(hdu)[i][3] == "ImageHDU"
and hdus.fileinfo(i)["datSpan"] > 0
):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if "bunit" in hdr:
fits_unit_string = hdr["bunit"]
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == "adu":
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
"The Header value for the key BUNIT ({}) cannot be "
"interpreted as valid unit. To successfully read the "
"file as CCDData you can pass in a valid `unit` "
"argument explicitly or change the header of the FITS "
"file before reading it.".format(fits_unit_string)
)
else:
log.info(
"using the unit {} passed to the FITS reader instead "
"of the unit {} in the FITS file.".format(unit, fits_unit_string)
)
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(
hdus[hdu].data,
meta=hdr,
unit=use_unit,
mask=mask,
uncertainty=uncertainty,
wcs=wcs,
psf=psf,
)
return ccd_data
def fits_ccddata_writer(
ccd_data,
filename,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Write CCDData object to FITS file.
Parameters
----------
ccd_data : CCDData
Object to write.
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of
the default `~astropy.io.fits.PrimaryHDU`.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask,
hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type,
hdu_flags=hdu_flags,
as_image_hdu=as_image_hdu,
hdu_psf=hdu_psf,
)
if as_image_hdu:
hdu.insert(0, fits.PrimaryHDU())
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader("fits", CCDData, fits_ccddata_reader)
registry.register_writer("fits", CCDData, fits_ccddata_writer)
registry.register_identifier("fits", CCDData, fits.connect.is_fits)
|
a2d625c3624a8f38bef851643a7b346b7af558d30b455ed6543c33662d6a2127 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import numpy as np
import astropy.units as u
from astropy.coordinates import ITRS, CartesianRepresentation, SphericalRepresentation
from astropy.utils import unbroadcast
from .wcs import WCS, WCSSUB_LATITUDE, WCSSUB_LONGITUDE
__doctest_skip__ = ["wcs_to_celestial_frame", "celestial_frame_to_wcs"]
__all__ = [
"obsgeo_to_frame",
"add_stokes_axis_to_wcs",
"celestial_frame_to_wcs",
"wcs_to_celestial_frame",
"proj_plane_pixel_scales",
"proj_plane_pixel_area",
"is_proj_plane_distorted",
"non_celestial_pixel_scales",
"skycoord_to_pixel",
"pixel_to_skycoord",
"custom_wcs_to_frame_mappings",
"custom_frame_to_wcs_mappings",
"pixel_to_pixel",
"local_partial_pixel_derivatives",
"fit_wcs_from_points",
]
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = "STOKES"
newwcs.wcs.cname[add_before_ind] = "STOKES"
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
FK4NoETerms,
Galactic,
SphericalRepresentation,
)
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from astropy.time import Time
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4]
ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == "" and xcoord == "RA--" and ycoord == "DEC-":
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.0:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == "FK4":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4(equinox=equinox)
elif radesys == "FK4-NO-E":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4NoETerms(equinox=equinox)
elif radesys == "FK5":
if equinox is not None:
equinox = Time(equinox, format="jyear")
frame = FK5(equinox=equinox)
elif radesys == "ICRS":
frame = ICRS()
else:
if xcoord == "GLON" and ycoord == "GLAT":
frame = Galactic()
elif xcoord == "TLON" and ycoord == "TLAT":
# The default representation for ITRS is cartesian, but for WCS
# purposes, we need the spherical representation.
frame = ITRS(
representation_type=SphericalRepresentation,
obstime=wcs.wcs.dateobs or None,
)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection="TAN"):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
BaseRADecFrame,
FK4NoETerms,
Galactic,
)
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = "RA--"
ycoord = "DEC-"
if isinstance(frame, ICRS):
wcs.wcs.radesys = "ICRS"
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = "FK4-NO-E"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = "FK4"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = "FK5"
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = "GLON"
ycoord = "GLAT"
elif isinstance(frame, ITRS):
xcoord = "TLON"
ycoord = "TLAT"
wcs.wcs.radesys = "ITRS"
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + "-" + projection, ycoord + "-" + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if callable(mappings):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if callable(mappings):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError(
"Could not determine celestial frame corresponding to the specified WCS object"
)
def celestial_frame_to_wcs(frame, projection="TAN"):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError(
"Could not determine WCS corresponding to the specified coordinate frame."
)
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : ndarray
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs) # fmt: skip
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if pixarea == 0.0:
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return cd_unitary_err < maxerr
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1 - np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd)) * u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(
getattr(wcs, dist_attr) is not None
for dist_attr in ["cpdis1", "cpdis2", "det2im1", "det2im2", "sip"]
)
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode="all"):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == "all":
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == "wcs":
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : `~astropy.coordinates.SkyCoord` subclass
The celestial coordinates. Whatever ``cls`` type is.
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == "all":
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == "wcs":
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
def _unique_with_order_preserved(items):
"""
Return a list of unique items in the list provided, preserving the order
in which they are found.
"""
new_items = []
for item in items:
if item not in new_items:
new_items.append(item)
return new_items
def _pixel_to_world_correlation_matrix(wcs):
"""
Return a correlation matrix between the pixel coordinates and the
high level world coordinates, along with the list of high level world
coordinate classes.
The shape of the matrix is ``(n_world, n_pix)``, where ``n_world`` is the
number of high level world coordinates.
"""
# We basically want to collapse the world dimensions together that are
# combined into the same high-level objects.
# Get the following in advance as getting these properties can be expensive
all_components = wcs.low_level_wcs.world_axis_object_components
all_classes = wcs.low_level_wcs.world_axis_object_classes
axis_correlation_matrix = wcs.low_level_wcs.axis_correlation_matrix
components = _unique_with_order_preserved([c[0] for c in all_components])
matrix = np.zeros((len(components), wcs.pixel_n_dim), dtype=bool)
for iworld in range(wcs.world_n_dim):
iworld_unique = components.index(all_components[iworld][0])
matrix[iworld_unique] |= axis_correlation_matrix[iworld]
classes = [all_classes[component][0] for component in components]
return matrix, classes
def _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out):
"""
Correlation matrix between the input and output pixel coordinates for a
pixel -> world -> pixel transformation specified by two WCS instances.
The first WCS specified is the one used for the pixel -> world
transformation and the second WCS specified is the one used for the world ->
pixel transformation. The shape of the matrix is
``(n_pixel_out, n_pixel_in)``.
"""
matrix1, classes1 = _pixel_to_world_correlation_matrix(wcs_in)
matrix2, classes2 = _pixel_to_world_correlation_matrix(wcs_out)
if len(classes1) != len(classes2):
raise ValueError("The two WCS return a different number of world coordinates")
# Check if classes match uniquely
unique_match = True
mapping = []
for class1 in classes1:
matches = classes2.count(class1)
if matches == 0:
raise ValueError("The world coordinate types of the two WCS do not match")
elif matches > 1:
unique_match = False
break
else:
mapping.append(classes2.index(class1))
if unique_match:
# Classes are unique, so we need to re-order matrix2 along the world
# axis using the mapping we found above.
matrix2 = matrix2[mapping]
elif classes1 != classes2:
raise ValueError(
"World coordinate order doesn't match and automatic matching is ambiguous"
)
matrix = np.matmul(matrix2.T, matrix1)
return matrix
def _split_matrix(matrix):
"""
Given an axis correlation matrix from a WCS object, return information about
the individual WCS that can be split out.
The output is a list of tuples, where each tuple contains a list of
pixel dimensions and a list of world dimensions that can be extracted to
form a new WCS. For example, in the case of a spectral cube with the first
two world coordinates being the celestial coordinates and the third
coordinate being an uncorrelated spectral axis, the matrix would look like::
array([[ True, True, False],
[ True, True, False],
[False, False, True]])
and this function will return ``[([0, 1], [0, 1]), ([2], [2])]``.
"""
pixel_used = []
split_info = []
for ipix in range(matrix.shape[1]):
if ipix in pixel_used:
continue
pixel_include = np.zeros(matrix.shape[1], dtype=bool)
pixel_include[ipix] = True
n_pix_prev, n_pix = 0, 1
while n_pix > n_pix_prev:
world_include = matrix[:, pixel_include].any(axis=1)
pixel_include = matrix[world_include, :].any(axis=0)
n_pix_prev, n_pix = n_pix, np.sum(pixel_include)
pixel_indices = list(np.nonzero(pixel_include)[0])
world_indices = list(np.nonzero(world_include)[0])
pixel_used.extend(pixel_indices)
split_info.append((pixel_indices, world_indices))
return split_info
def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
# Shortcut for scalars
if np.isscalar(inputs[0]):
world_outputs = wcs_in.pixel_to_world(*inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
return wcs_out.world_to_pixel(*world_outputs)
# Remember original shape
original_shape = inputs[0].shape
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
split_info = _split_matrix(matrix)
outputs = [None] * wcs_out.pixel_n_dim
for pixel_in_indices, pixel_out_indices in split_info:
pixel_inputs = []
for ipix in range(wcs_in.pixel_n_dim):
if ipix in pixel_in_indices:
pixel_inputs.append(unbroadcast(inputs[ipix]))
else:
pixel_inputs.append(inputs[ipix].flat[0])
pixel_inputs = np.broadcast_arrays(*pixel_inputs)
world_outputs = wcs_in.pixel_to_world(*pixel_inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
pixel_outputs = wcs_out.world_to_pixel(*world_outputs)
if wcs_out.pixel_n_dim == 1:
pixel_outputs = (pixel_outputs,)
for ipix in range(wcs_out.pixel_n_dim):
if ipix in pixel_out_indices:
outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape)
return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
def local_partial_pixel_derivatives(wcs, *pixel, normalize_by_world=False):
"""
Return a matrix of shape ``(world_n_dim, pixel_n_dim)`` where each entry
``[i, j]`` is the partial derivative d(world_i)/d(pixel_j) at the requested
pixel position.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS transformation to evaluate the derivatives for.
*pixel : float
The scalar pixel coordinates at which to evaluate the derivatives.
normalize_by_world : bool
If `True`, the matrix is normalized so that for each world entry
the derivatives add up to 1.
"""
# Find the world coordinates at the requested pixel
pixel_ref = np.array(pixel)
world_ref = np.array(wcs.pixel_to_world_values(*pixel_ref))
# Set up the derivative matrix
derivatives = np.zeros((wcs.world_n_dim, wcs.pixel_n_dim))
for i in range(wcs.pixel_n_dim):
pixel_off = pixel_ref.copy()
pixel_off[i] += 1
world_off = np.array(wcs.pixel_to_world_values(*pixel_off))
derivatives[:, i] = world_off - world_ref
if normalize_by_world:
derivatives /= derivatives.sum(axis=0)[:, np.newaxis]
return derivatives
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
lat_resids = lat - lat2
lon_resids = lon - lon2
# In case the longitude has wrapped around
lon_resids = np.mod(lon_resids - 180.0, 360.0) - 180.0
resids = np.concatenate((lon_resids * np.cos(np.radians(lat)), lat_resids))
return resids
def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names):
"""Objective function for fitting SIP.
Parameters
----------
params : array
Fittable parameters. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
u, v: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
from astropy.modeling.models import SIP # here to avoid circular import
# unpack params
crpix = params[0:2]
cdx = params[2:6].reshape((2, 2))
a_params = params[6 : 6 + len(coeff_names)]
b_params = params[6 + len(coeff_names) :]
# assign to wcs, used for transformations in this function
w_obj.wcs.cd = cdx
w_obj.wcs.crpix = crpix
a_coeff, b_coeff = {}, {}
for i in range(len(coeff_names)):
a_coeff["A_" + coeff_names[i]] = a_params[i]
b_coeff["B_" + coeff_names[i]] = b_params[i]
sip = SIP(
crpix=crpix, a_order=order, b_order=order, a_coeff=a_coeff, b_coeff=b_coeff
)
fuv, guv = sip(u, v)
xo, yo = np.dot(cdx, np.array([u + fuv - crpix[0], v + guv - crpix[1]]))
# use all pix2world in case `projection` contains distortion table
x, y = w_obj.all_world2pix(lon, lat, 0)
x, y = np.dot(w_obj.wcs.cd, (x - w_obj.wcs.crpix[0], y - w_obj.wcs.crpix[1]))
resids = np.concatenate((x - xo, y - yo))
return resids
def fit_wcs_from_points(
xy, world_coords, proj_point="center", projection="TAN", sip_degree=None
):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS.
Fits a WCS object to matched set of input detector and sky coordinates.
Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
-----
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- Units in all output WCS objects will always be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for ``world_coords`` and ``proj_point``, the frame for
``world_coords`` will override as the frame for the output WCS.
- If a WCS object is passed in to ``projection`` the CD/PC matrix will
be used as an initial guess for the fit. If this is known to be
significantly off and may throw off the fit, set to the identity matrix
(for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)])
Parameters
----------
xy : (`numpy.ndarray`, `numpy.ndarray`) tuple
x & y pixel coordinates.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match ``world_coords`` if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_degree : None or int
If set to a non-zero integer value, will fit SIP of degree
``sip_degree`` to model geometric distortion. Defaults to None, meaning
no distortion corrections will be fit.
Returns
-------
wcs : `~astropy.wcs.WCS`
The best-fit WCS to the points given.
"""
from scipy.optimize import least_squares
import astropy.units as u
from astropy.coordinates import SkyCoord # here to avoid circular import
from .wcs import Sip
xp, yp = xy
try:
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
except AttributeError:
unit_sph = world_coords.unit_spherical
lon, lat = unit_sph.lon.deg, unit_sph.lat.deg
# verify input
if (type(proj_point) != type(world_coords)) and (proj_point != "center"):
raise ValueError(
"proj_point must be set to 'center', or an"
+ "`~astropy.coordinates.SkyCoord` object with "
+ "a pair of points."
)
use_center_as_proj_point = str(proj_point) == "center"
if not use_center_as_proj_point:
assert proj_point.size == 1
proj_codes = [
"AZP",
"SZP",
"TAN",
"STG",
"SIN",
"ARC",
"ZEA",
"AIR",
"CYP",
"CEA",
"CAR",
"MER",
"SFL",
"PAR",
"MOL",
"AIT",
"COP",
"COE",
"COD",
"COO",
"BON",
"PCO",
"TSC",
"CSC",
"QSC",
"HPX",
"XPH",
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError(
"Must specify valid projection code from list of "
+ "supported types: ",
", ".join(proj_codes),
)
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame, projection=projection)
else: # if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1.0, 1.0) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__("pc")
if (type(sip_degree) != type(None)) and (type(sip_degree) != int):
raise ValueError("sip_degree must be None, or integer.")
# compute bounding box for sources in image coordinates:
xpmin, xpmax, ypmin, ypmax = xp.min(), xp.max(), yp.min(), yp.max()
# set pixel_shape to span of input points
wcs.pixel_shape = (
1 if xpmax <= 0.0 else int(np.ceil(xpmax)),
1 if ypmax <= 0.0 else int(np.ceil(ypmax)),
)
# determine CRVAL from input
close = lambda l, p: p[np.argmin(np.abs(l))]
if use_center_as_proj_point: # use center of input points
sc1 = SkyCoord(lon.min() * u.deg, lat.max() * u.deg)
sc2 = SkyCoord(lon.max() * u.deg, lat.min() * u.deg)
pa = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
midpoint_sc = sc1.directional_offset_by(pa, sep / 2)
wcs.wcs.crval = (midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg)
wcs.wcs.crpix = ((xpmax + xpmin) / 2.0, (ypmax + ypmin) / 2.0)
else: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (
close(lon - wcs.wcs.crval[0], xp + 1),
close(lon - wcs.wcs.crval[1], yp + 1),
)
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
# Use bounds to require that the fit center pixel is on the input image
if xpmin == xpmax:
xpmin, xpmax = xpmin - 0.5, xpmax + 0.5
if ypmin == ypmax:
ypmin, ypmax = ypmin - 0.5, ypmax + 0.5
p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()])
fit = least_squares(
_linear_wcs_fit,
p0,
args=(lon, lat, xp, yp, wcs),
bounds=[
[-np.inf, -np.inf, -np.inf, -np.inf, xpmin + 1, ypmin + 1],
[np.inf, np.inf, np.inf, np.inf, xpmax + 1, ypmax + 1],
],
)
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_degree:
degree = sip_degree
if "-SIP" not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + "-SIP" for x in wcs.wcs.ctype]
coef_names = [
f"{i}_{j}"
for i in range(degree + 1)
for j in range(degree + 1)
if (i + j) < (degree + 1) and (i + j) > 1
]
p0 = np.concatenate(
(
np.array(wcs.wcs.crpix),
wcs.wcs.cd.flatten(),
np.zeros(2 * len(coef_names)),
)
)
fit = least_squares(
_sip_fit,
p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names),
bounds=[
[xpmin + 1, ypmin + 1] + [-np.inf] * (4 + 2 * len(coef_names)),
[xpmax + 1, ypmax + 1] + [np.inf] * (4 + 2 * len(coef_names)),
],
)
coef_fit = (
list(fit.x[6 : 6 + len(coef_names)]),
list(fit.x[6 + len(coef_names) :]),
)
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree + 1, degree + 1))
b_vals = np.zeros((degree + 1, degree + 1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(
a_vals,
b_vals,
np.zeros((degree + 1, degree + 1)),
np.zeros((degree + 1, degree + 1)),
wcs.wcs.crpix,
)
return wcs
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an ITRS coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time associated with the coordinate, will be passed to
`~astropy.coordinates.ITRS` as the obstime keyword.
Returns
-------
~astropy.coordinates.ITRS
An `~astropy.coordinates.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (
obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
):
raise ValueError(
f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array"
)
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
|
2b5c3c48b90f033058d221490dd3c8e142a49e40b58f1f1d6687470fb1c3a3b1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import enum
import operator
import os
import threading
from datetime import date, datetime, timedelta
from time import strftime
from warnings import warn
import erfa
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.extern import _strptime
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo, data_info_factory
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # noqa: F401
from .formats import (
TIME_DELTA_FORMATS,
TIME_FORMATS,
TimeAstropyTime,
TimeDatetime,
TimeJD,
TimeUnique,
)
from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS
from .utils import day_frac
__all__ = [
"TimeBase",
"Time",
"TimeDelta",
"TimeInfo",
"TimeInfoBase",
"update_leap_seconds",
"TIME_SCALES",
"STANDARD_TIME_SCALES",
"TIME_DELTA_SCALES",
"ScaleValueError",
"OperandTypeError",
"TimeDeltaMissingUnitWarning",
]
STANDARD_TIME_SCALES = ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc")
LOCAL_SCALES = ("local",)
TIME_TYPES = {
scale: scales for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales
}
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {
("tai", "tcb"): ("tt", "tdb"),
("tai", "tcg"): ("tt",),
("tai", "ut1"): ("utc",),
("tai", "tdb"): ("tt",),
("tcb", "tcg"): ("tdb", "tt"),
("tcb", "tt"): ("tdb",),
("tcb", "ut1"): ("tdb", "tt", "tai", "utc"),
("tcb", "utc"): ("tdb", "tt", "tai"),
("tcg", "tdb"): ("tt",),
("tcg", "ut1"): ("tt", "tai", "utc"),
("tcg", "utc"): ("tt", "tai"),
("tdb", "ut1"): ("tt", "tai", "utc"),
("tdb", "utc"): ("tt", "tai"),
("tt", "ut1"): ("tai", "utc"),
("tt", "utc"): ("tai",),
}
GEOCENTRIC_SCALES = ("tai", "tt", "tcg")
BARYCENTRIC_SCALES = ("tcb", "tdb")
ROTATIONAL_SCALES = ("ut1",)
TIME_DELTA_TYPES = {
scale: scales
for scales in (
GEOCENTRIC_SCALES,
BARYCENTRIC_SCALES,
ROTATIONAL_SCALES,
LOCAL_SCALES,
)
for scale in scales
}
TIME_DELTA_SCALES = (
GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
)
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {
("tt", "tai"): None,
("tai", "tt"): None,
("tcg", "tt"): -erfa.ELG,
("tt", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcg", "tai"): -erfa.ELG,
("tai", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcb", "tdb"): -erfa.ELB,
("tdb", "tcb"): erfa.ELB / (1.0 - erfa.ELB),
}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
"mean": {
"IAU2006": {"function": erfa.gmst06, "scales": ("ut1", "tt")},
"IAU2000": {"function": erfa.gmst00, "scales": ("ut1", "tt")},
"IAU1982": {"function": erfa.gmst82, "scales": ("ut1",), "include_tio": False},
},
"apparent": {
"IAU2006A": {"function": erfa.gst06a, "scales": ("ut1", "tt")},
"IAU2000A": {"function": erfa.gst00a, "scales": ("ut1", "tt")},
"IAU2000B": {"function": erfa.gst00b, "scales": ("ut1",)},
"IAU1994": {"function": erfa.gst94, "scales": ("ut1",), "include_tio": False},
},
}
class _LeapSecondsCheck(enum.Enum):
NOT_STARTED = 0 # No thread has reached the check
RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held)
DONE = 2 # update_leap_seconds has completed
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED
_LEAP_SECONDS_LOCK = threading.RLock()
def _compress_array_dims(arr):
"""Compress array by allowing at most 2 * edgeitems + 1 in each dimension.
Parameters
----------
arr : array-like
Array to compress.
Returns
-------
out : array-like
Compressed array.
"""
idxs = []
edgeitems = np.get_printoptions()["edgeitems"]
# Build up a list of index arrays for each dimension, allowing no more than
# 2 * edgeitems + 1 elements in each dimension.
for dim in range(arr.ndim):
if arr.shape[dim] > 2 * edgeitems:
# The middle [edgeitems] value does not matter as it gets replaced
# by ... in the output.
idxs.append(
np.concatenate(
[np.arange(edgeitems), [edgeitems], np.arange(-edgeitems, 0)]
)
)
else:
idxs.append(np.arange(arr.shape[dim]))
# Use the magic np.ix_ function to effectively treat each index array as a
# slicing operator.
idxs_ix = np.ix_(*idxs)
out = arr[idxs_ix]
return out
class TimeInfoBase(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
This base class is common between TimeInfo and TimeDeltaInfo.
"""
attr_names = MixinInfo.attr_names | {"serialize_method"}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = (
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
"_delta_ut1_utc",
"_delta_tdb_tt",
)
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = "value"
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == "formatted_value":
out = ("value",)
elif method == "jd1_jd2":
out = ("jd1", "jd2")
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {
"fits": "jd1_jd2",
"ecsv": "formatted_value",
"hdf5": "jd1_jd2",
"yaml": "jd1_jd2",
"parquet": "jd1_jd2",
None: "jd1_jd2",
}
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
Returns
-------
arrays : list of ndarray
"""
parent = self._parent
jd_approx = parent.jd
jd_remainder = (parent - parent.__class__(jd_approx, format="jd")).jd
return [jd_approx, jd_remainder]
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(
names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats],
)
)
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
if "jd1" in map and "jd2" in map:
# Initialize as JD but revert to desired format and out_subfmt (if needed)
format = map.pop("format")
out_subfmt = map.pop("out_subfmt", None)
map["format"] = "jd"
map["val"] = map.pop("jd1")
map["val2"] = map.pop("jd2")
out = self._parent_cls(**map)
out.format = format
if out_subfmt is not None:
out.out_subfmt = out_subfmt
else:
map["val"] = map.pop("value")
out = self._parent_cls(**map)
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError("input columns have inconsistent locations")
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
tm_attrs = {
attr: getattr(col0, attr) for attr in ("scale", "location", "precision")
}
out = self._parent_cls(jd1, jd2, format="jd", **tm_attrs)
out.format = col0.format
out.out_subfmt = col0.out_subfmt
out.in_subfmt = col0.in_subfmt
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (
self._serialize_context == "ecsv"
and map["format"] == "datetime64"
and "value" in map
):
map["value"] = map["value"].astype("U")
# The datetime format is serialized as ISO with no loss of precision.
if map["format"] == "datetime" and "value" in map:
map["value"] = np.vectorize(lambda x: x.isoformat())(map["value"])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (
map["format"] == "datetime64"
and "value" in map
and map["value"].dtype.kind == "U"
):
map["value"] = map["value"].astype("datetime64")
# Convert back to datetime objects for datetime format.
if map["format"] == "datetime" and "value" in map:
from datetime import datetime
map["value"] = np.vectorize(datetime.fromisoformat)(map["value"])
delta_ut1_utc = map.pop("_delta_ut1_utc", None)
delta_tdb_tt = map.pop("_delta_tdb_tt", None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_extra_attrs = ("format", "scale")
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd1 = np.zeros(shape, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
out = self._parent_cls(jd1, jd2, format="jd", scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeBase(ShapedLikeNDArray):
"""Base time class from which Time and TimeDelta inherit."""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __getnewargs__(self):
return (self._time,)
def _init_from_vals(
self,
val,
val2,
format,
scale,
copy,
precision=None,
in_subfmt=None,
out_subfmt=None,
):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = "*"
if out_subfmt is None:
out_subfmt = "*"
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError(
"Input val and val2 have inconsistent shape; "
"they cannot be broadcast together."
)
if scale is not None:
if not (isinstance(scale, str) and scale.lower() in self.SCALES):
raise ScaleValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(self.SCALES)}"
)
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(
val, val2, format, scale, precision, in_subfmt, out_subfmt
)
self._format = self._time.name
# Hack from #9969 to allow passing the location value that has been
# collected by the TimeAstropyTime format class up to the Time level.
# TODO: find a nicer way.
if hasattr(self._time, "_location"):
self.location = self._time._location
del self._time._location
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and (
val.dtype.kind in ("S", "U", "O", "M") or val.dtype.names
):
# Input is a string, object, datetime, or a table-like ndarray
# (structured array, recarray). These input types can be
# uniquely identified by the format classes.
formats = [
(name, cls)
for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)
]
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(("astropy_time", TimeAstropyTime))
elif not (isinstance(format, str) and format.lower() in self.FORMATS):
if format is None:
raise ValueError(
"No time format was given, and the input is not unique"
)
else:
raise ValueError(
f"Format {format!r} is not one of the allowed formats "
f"{sorted(self.FORMATS)}"
)
else:
formats = [(format, self.FORMATS[format])]
assert formats
problems = {}
for name, cls in formats:
try:
return cls(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError) as err:
# If ``format`` specified then there is only one possibility, so raise
# immediately and include the upstream exception message to make it
# easier for user to see what is wrong.
if len(formats) == 1:
raise ValueError(
f"Input values did not match the format class {format}:"
+ os.linesep
+ f"{err.__class__.__name__}: {err}"
) from err
else:
problems[name] = err
else:
raise ValueError(
"Input values did not match any of the formats where the format "
f"keyword is optional: {problems}"
) from problems[formats[0][0]]
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format."""
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
format_cls = self.FORMATS[format]
# Get the new TimeFormat object to contain time in new format. Possibly
# coerce in/out_subfmt to '*' (default) if existing subfmt values are
# not valid in the new format.
self._time = format_cls(
self._time.jd1,
self._time.jd2,
self._time._scale,
self.precision,
in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt),
out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
self._format = format
def to_string(self):
"""Output a string representation of the Time or TimeDelta object.
Similar to ``str(self.value)`` (which uses numpy array formatting) but
array values are evaluated only for the items that actually are output.
For large arrays this can be a substantial performance improvement.
Returns
-------
out : str
String representation of the time values.
"""
npo = np.get_printoptions()
if self.size < npo["threshold"]:
out = str(self.value)
else:
# Compress time object by allowing at most 2 * npo["edgeitems"] + 1
# in each dimension. Then force numpy to use "summary mode" of
# showing only the edge items by setting the size threshold to 0.
# TODO: use np.core.arrayprint._leading_trailing if we have support for
# np.concatenate. See #8610.
tm = _compress_array_dims(self)
with np.printoptions(threshold=0):
out = str(tm.value)
return out
def __repr__(self):
return "<{} object: scale='{}' format='{}' value={}>".format(
self.__class__.__name__, self.scale, self.format, self.to_string()
)
def __str__(self):
return self.to_string()
def __hash__(self):
try:
loc = getattr(self, "location", None)
if loc is not None:
loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m)
return hash((self.jd1, self.jd2, self.scale, loc))
except TypeError:
if self.ndim != 0:
reason = "(must be scalar)"
elif self.masked:
reason = "(value is masked)"
else:
raise
raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}")
@property
def scale(self):
"""Time scale."""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
if scale == "utc" or self.scale == "utc":
# If doing a transform involving UTC then check that the leap
# seconds table is up to date.
_check_leapsec()
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = "_get_delta_{}_{}".format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
jd1, jd2 = day_frac(jd1, jd2)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](
jd1,
jd2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
# Setting the out_subfmt property here does validation of ``val``
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in (
(self._time, "jd1"),
(self._time, "jd2"),
(self, "_delta_ut1_utc"),
(self, "_delta_tdb_tt"),
(self, "location"),
):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
if self._time.jd1.shape:
if isinstance(value, np.ndarray):
return value
else:
raise TypeError(
f"JD is an array ({self._time.jd1!r}) but value is not ({value!r})"
)
else:
# zero-dimensional array, is it safe to unbox?
if (
isinstance(value, np.ndarray)
and not value.shape
and not np.ma.is_masked(value)
):
if value.dtype.kind == "M":
# existing test doesn't want datetime64 converted
return value[()]
elif value.dtype.fields:
# Unpack but keep field names; .item() doesn't
# Still don't get python types in the fields
return value[()]
else:
return value.item()
else:
return value
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
def to_value(self, format, subfmt="*"):
"""Get time values expressed in specified output format.
This method allows representing the ``Time`` object in the desired
output ``format`` and optional sub-format ``subfmt``. Available
built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each
format can have its own sub-formats
For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can
be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with
the enhancement depending on platform), and 'decimal'
:class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the
number of digits is also chosen such that time values are represented
accurately.
For built-in date-like string formats, one of 'date_hms', 'date_hm', or
'date' (or 'longdate_hms', etc., for 5-digit years in
`~astropy.time.TimeFITS`). For sub-formats including seconds, the
number of digits used for the fractional seconds is as set by
`~astropy.time.Time.precision`.
Parameters
----------
format : str
The format in which one wants the time values. Default: the current
format.
subfmt : str or None, optional
Value or wildcard pattern to select the sub-format in which the
values should be given. The default of '*' picks the first
available for a given format, i.e., 'float' or 'date_hms'.
If `None`, use the instance's ``out_subfmt``.
"""
# TODO: add a precision argument (but ensure it is keyword argument
# only, to make life easier for TimeDelta.to_value()).
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
cache = self.cache["format"]
# Try to keep cache behaviour like it was in astropy < 4.0.
key = format if subfmt is None else (format, subfmt)
if key not in cache:
if format == self.format:
tm = self
else:
tm = self.replicate(format=format)
# Some TimeFormat subclasses may not be able to handle being passes
# on a out_subfmt. This includes some core classes like
# TimeBesselianEpochString that do not have any allowed subfmts. But
# those do deal with `self.out_subfmt` internally, so if subfmt is
# the same, we do not pass it on.
kwargs = {}
if subfmt is not None and subfmt != tm.out_subfmt:
kwargs["out_subfmt"] = subfmt
try:
value = tm._time.to_value(parent=tm, **kwargs)
except TypeError as exc:
# Try validating subfmt, e.g. for formats like 'jyear_str' that
# do not implement out_subfmt in to_value() (because there are
# no allowed subformats). If subfmt is not valid this gives the
# same exception as would have occurred if the call to
# `to_value()` had succeeded.
tm._time._select_subfmts(subfmt)
# Subfmt was valid, so fall back to the original exception to see
# if it was lack of support for out_subfmt as a call arg.
if "unexpected keyword argument 'out_subfmt'" in str(exc):
raise ValueError(
f"to_value() method for format {format!r} does not "
"support passing a 'subfmt' argument"
) from None
else:
# Some unforeseen exception so raise.
raise
value = tm._shaped_like_input(value)
cache[key] = value
return cache[key]
@property
def value(self):
"""Time value(s) in current format."""
return self.to_value(self.format, None)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, self.__class__):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0 : idx0 + n_values] = values
out._time.jd1[idx0 + n_values :] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values :] = self._time.jd2[idx0:]
return out
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError(
f"{self.__class__.__name__} object is read-only. Make a "
'copy() or set "writeable" attribute to True.'
)
else:
raise ValueError(
f"scalar {self.__class__.__name__} object is read-only."
)
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ("_delta_tdb_tt", "_delta_ut1_utc"):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def isclose(self, other, atol=None):
"""Returns a boolean or boolean array where two Time objects are
element-wise equal within a time tolerance.
This evaluates the expression below::
abs(self - other) <= atol
Parameters
----------
other : `~astropy.time.Time`
Time object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is two bits in the 128-bit JD time representation,
equivalent to about 40 picosecs.
"""
if atol is None:
# Note: use 2 bits instead of 1 bit based on experience in precision
# tests, since taking the difference with a UTC time means one has
# to do a scale change.
atol = 2 * np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
try:
# Separate these out so user sees where the problem is
dt = self - other
dt = abs(dt)
out = dt <= atol
except Exception as err:
raise TypeError(
"'other' argument must support subtraction with Time "
"and return a value that supports comparison with "
f"{atol.__class__.__name__}: {err}"
)
return out
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply("copy", format=format)
def replicate(self, format=None, copy=False, cls=None):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply("copy" if copy else "replicate", format=format, cls=cls)
def _apply(self, method, *args, format=None, cls=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == "replicate":
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(cls or self.__class__)
tm._time = TimeJD(
jd1,
jd2,
self.scale,
precision=0,
in_subfmt="*",
out_subfmt="*",
from_jd=True,
)
# Optional ndarray attributes.
for attr in ("_delta_ut1_utc", "_delta_tdb_tt", "location"):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only an array scalar and the method would return a view,
# since in that case nothing would change).
if getattr(val, "shape", ()):
val = apply_method(val)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined and the
# time object is not a scalar (issue #10688).
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError(f"format must be one of {list(tm.FORMATS)}")
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(
tm._time.jd1,
tm._time.jd2,
tm._time._scale,
precision=self.precision,
in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt),
out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
index = [
indices
if i == axis
else np.arange(s).reshape(
(1,) * (i if keepdims or i < axis else i - 1)
+ (s,)
+ (1,) * (ndim - i - (1 if keepdims or i > axis else 2))
)
for i, s in enumerate(self.shape)
]
return tuple(index)
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# First get the minimum at normal precision.
jd1, jd2 = self.jd1, self.jd2
approx = np.min(jd1 + jd2, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (jd1 - approx) + jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = np.max(jd1 + jd2, axis, keepdims=True)
dt = (jd1 - approx) + jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1, kind="stable"):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure that
the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and
that corresponding attributes are copied. Internally, it uses
:func:`~numpy.lexsort`, and hence no sort method can be chosen.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the last
axis.
kind : 'stable', optional
Sorting is done with :func:`~numpy.lexsort` so this argument is ignored, but
kept for compatibility with :func:`~numpy.argsort`. The sorting is stable,
meaning that the order of equal elements is preserved.
Returns
-------
indices : ndarray
An array of indices that sort the time array.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = jd1 + jd2
remainder = (jd1 - approx) + jd2
if axis is None:
return np.lexsort((remainder.ravel(), approx.ravel()))
else:
return np.lexsort(keys=(remainder, approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)]
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
"""Mean along a given axis.
This is similar to :meth:`~numpy.ndarray.mean`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2`` is
used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.mean``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
Similarly, the ``dtype`` argument is also present for compatibility
only; it has no meaning for `Time`.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
dtype : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
out : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for
details.
Returns
-------
m : Time
A new Time instance containing the mean values
"""
if dtype is not None:
raise ValueError("Cannot set ``dtype`` on `Time` instances")
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
where = where & ~self.mask
where_broadcasted = np.broadcast_to(where, self.shape)
kwargs = dict(
axis=axis,
keepdims=keepdims,
where=where,
)
divisor = np.sum(where_broadcasted, axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
raise ValueError(
"Mean over zero elements is not supported as it would give an undefined"
" time;see issue https://github.com/astropy/astropy/issues/6509"
)
jd1, jd2 = day_frac(
val1=np.sum(np.ma.getdata(self.jd1), **kwargs),
val2=np.sum(np.ma.getdata(self.jd2), **kwargs),
divisor=divisor,
)
result = type(self)(
val=jd1,
val2=jd2,
format="jd",
scale=self.scale,
copy=False,
)
result.format = self.format
return result
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache["scale"]
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
return self.to_value(attr, subfmt=None)
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError(
"Cannot convert TimeDelta with "
"undefined scale to any defined scale."
)
else:
raise ScaleValueError(
f"Cannot convert {self.__class__.__name__} with scale "
f"'{self.scale}' to scale '{attr}'"
)
else:
# Should raise AttributeError
return self.__getattribute__(attr)
def __dir__(self):
return sorted(set(super().__dir__()) | set(self.SCALES) | set(self.FORMATS))
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError(
"Attribute shape must match or be broadcastable to that of "
"Time object. Typically, give either a single value or "
"one for each time."
)
return val
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented.
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError(
f"Cannot compare {self.__class__.__name__} instances with "
f"scales '{self.scale}' and '{other.scale}'"
)
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.0)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
class Time(TimeBase):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
If not given, assumed to be the center of the Earth for time scale
transformations to and from the solar-system barycenter.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, Time):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(
self,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
if not hasattr(self, "location"):
self.location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(
val, val2, format, scale, copy, precision, in_subfmt, out_subfmt
)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (
self.location.size > 1 and self.location.shape != self.shape
):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape, subok=True)
except Exception as err:
raise ValueError(
f"The location with shape {self.location.shape} cannot be "
f"broadcast against time with shape {self.shape}. "
"Typically, either give a single location or one for each time."
) from err
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object."""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif (self_location is None and value.location is not None) or (
self_location is not None and value.location is None
):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError(
"cannot set to Time with different location: expected "
f"location={self_location} and got location={value.location}"
)
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(
value,
scale=self.scale,
format=self.format,
location=self_location,
)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible Time object: {err}"
)
return value
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime : :class:`~astropy.time.Time`
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format="datetime", scale="utc")
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : str, sequence, or ndarray
Objects containing time data of type string
format_string : str
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ("U", "S"):
raise TypeError(
"Expected type is string, a bytes-like object or a sequence "
f"of these. Got dtype '{time_array.dtype.kind}'"
)
to_string = (
str
if time_array.dtype.kind == "U"
else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, "U30"])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}".format(
*time_tuple
)
format = kwargs.pop("format", None)
out = cls(*iterator.operands[1:], format="isot", **kwargs)
if format is not None:
out.format = format
return out
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : str
Format definition of return string.
Returns
-------
formatted : str or numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate("iso")._time.str_kwargs():
date_tuple = date(sk["year"], sk["mon"], sk["day"]).timetuple()
datetime_tuple = (
sk["year"],
sk["mon"],
sk["day"],
sk["hour"],
sk["min"],
sk["sec"],
date_tuple[6],
date_tuple[7],
-1,
)
fmtd_str = format_spec
if "%f" in fmtd_str:
fmtd_str = fmtd_str.replace(
"%f",
"{frac:0{precision}}".format(
frac=sk["fracsec"], precision=self.precision
),
)
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
def light_travel_time(
self, skycoord, kind="barycentric", location=None, ephemeris=None
):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ("barycentric", "heliocentric"):
raise ValueError(
"'kind' parameter must be one of 'heliocentric' or 'barycentric'"
)
if location is None:
if self.location is None:
raise ValueError(
"An EarthLocation needs to be set or passed in to calculate bary- "
"or heliocentric corrections"
)
location = self.location
from astropy.coordinates import (
GCRS,
HCRS,
ICRS,
CartesianRepresentation,
UnitSphericalRepresentation,
solar_system_ephemeris,
)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError(
"Supplied location does not have a valid `get_itrs` method"
)
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == "heliocentric":
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (
skycoord.icrs.represent_as(UnitSphericalRepresentation)
.represent_as(CartesianRepresentation)
.xyz
)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale="tdb")
def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
"""
if isinstance(longitude, str) and longitude == "tio":
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(
longitude=longitude,
function=erfa.era00,
scales=("ut1",),
include_tio=include_tio,
)
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS:
raise ValueError(
"The kind of sidereal time has to be "
+ " or ".join(sorted(SIDEREAL_TIME_MODELS))
)
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models)[-1]
elif model.upper() not in available_models:
raise ValueError(
f"Model {model} not implemented for {kind} sidereal time; "
f"available models are {sorted(available_models)}"
)
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ("tio", "greenwich"):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs["include_tio"] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
"apparent",
sorted(SIDEREAL_TIME_MODELS["apparent"]),
"mean",
sorted(SIDEREAL_TIME_MODELS["mean"]),
)
def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True):
"""Calculate a local sidereal time or Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance.
function : callable
The ERFA function to use.
scales : tuple of str
The time scales that the function requires on input.
include_tio : bool, optional
Whether to includes the TIO locator corrected for polar motion.
Should be `False` for pre-2000 IAU models. Default: `True`.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time or Earth rotation angle, with units of hourangle.
"""
from astropy.coordinates import EarthLocation, Longitude
from astropy.coordinates.builtin_frames.utils import get_polar_motion
from astropy.coordinates.matrix_utilities import rotation_matrix
if longitude is None:
if self.location is None:
raise ValueError(
"No longitude is given but the location for "
"the Time object is not set."
)
longitude = self.location.lon
elif isinstance(longitude, EarthLocation):
longitude = longitude.lon
else:
# Sanity check on input; default unit is degree.
longitude = Longitude(longitude, u.degree, copy=False)
theta = self._call_erfa(function, scales)
if include_tio:
# TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio;
# maybe posisble to factor out to one or the other.
sp = self._call_erfa(erfa.sp00, ("tt",))
xp, yp = get_polar_motion(self)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (
rotation_matrix(longitude, "z")
@ rotation_matrix(-yp, "x", unit=u.radian)
@ rotation_matrix(-xp, "y", unit=u.radian)
@ rotation_matrix(theta + sp, "z", unit=u.radian)
)
# Solve for angle.
angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian
else:
angle = longitude + (theta << u.radian)
return Longitude(angle, u.hourangle)
def _call_erfa(self, function, scales):
# TODO: allow erfa functions to be used on Time with __array_ufunc__.
erfa_parameters = [
getattr(getattr(self, scale)._time, jd_part)
for scale in scales
for jd_part in ("jd1", "jd2_filled")
]
result = function(*erfa_parameters)
if self.masked:
result[self.mask] = np.nan
return result
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : `~astropy.utils.iers.IERS`, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. Default: `~astropy.utils.iers.earth_orientation_table`
(which in turn defaults to the combined version provided by
`~astropy.utils.iers.IERS_Auto`).
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA
>>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, "_delta_ut1_utc"):
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = "utc"
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == "ut1":
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, "_delta_tdb_tt"):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ("tt", "tdb"):
raise ValueError(
"Accessing the delta_tdb_tt attribute is only "
"possible for TT or TDB time scales"
)
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
# Assume geocentric.
self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0.0, 0.0, 0.0)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1,
jd2,
ut,
lon.to_value(u.radian),
rxy.to_value(u.km),
z.to_value(u.km),
)
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = not isinstance(other, Time)
if other_is_delta: # T - Tdelta
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot subtract Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError(
"Cannot subtract Time instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
self_time = (
self._time if self.scale in TIME_DELTA_SCALES else self.tai._time
)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(
self_time.jd1, self_time.jd2, format="jd", scale=self_time.scale
)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
# T + Tdelta = T
# T + T = error
if isinstance(other, Time):
raise OperandTypeError(self, other, "+")
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot add Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
# Reverse addition is possible: <something-Tdelta-ish> + T
# but there is no case of <something> - T, so no __rsub__.
def __radd__(self, other):
return self.__add__(other)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
scale = self.scale
if scale == "utc":
self = self.tai
result = super().mean(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
if scale == "utc":
result = result.utc
result.out_subfmt = self.out_subfmt
location = self.location
if self.location is not None:
if self.location.shape:
if axis is None:
axis_normalized = tuple(range(self.ndim))
elif isinstance(axis, int):
axis_normalized = (axis,)
else:
axis_normalized = axis
sl = [slice(None)] * self.location.ndim
for a in axis_normalized:
sl[a] = slice(0, 1)
if np.any(self.location != self.location[tuple(sl)]):
raise ValueError(
"`location` must be constant over the reduction axes."
)
if not keepdims:
for a in axis_normalized:
sl[a] = 0
location = self.location[tuple(sl)]
result.location = location
return result
def __array_function__(self, function, types, args, kwargs):
"""
Wrap numpy functions.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
"""
if function in CUSTOM_FUNCTIONS:
f = CUSTOM_FUNCTIONS[function]
return f(*args, **kwargs)
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
return super().__array_function__(function, types, args, kwargs)
def to_datetime(self, timezone=None, leap_second_strict="raise"):
# TODO: this could likely go through to_value, as long as that
# had an **kwargs part that was just passed on to _time.
tm = self.replicate(format="datetime")
return tm._shaped_like_input(
tm._time.to_value(timezone, leap_second_strict=leap_second_strict)
)
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta."""
pass
class TimeDelta(TimeBase):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
For more information see:
- https://docs.astropy.org/en/stable/time/
- https://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s). For numerical inputs without units,
"jd" is assumed and values are interpreted as days.
A deprecation warning is raised in this case. To avoid the warning,
either specify the format or add units to the input values.
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, TimeDelta):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
format = format or self._get_format(val)
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
@staticmethod
def _get_format(val):
if isinstance(val, timedelta):
return "datetime"
if getattr(val, "unit", None) is None:
warn(
"Numerical value without unit or explicit format passed to"
" TimeDelta, assuming days",
TimeDeltaMissingUnitWarning,
)
return "jd"
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1,
jd2 + offset2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
def _add_sub(self, other, op):
"""Perform common elements of addition / subtraction for two delta times."""
# If not a TimeDelta then see if it can be turned into a TimeDelta.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
raise TypeError(
"Cannot add TimeDelta instances with scales '{}' and '{}'".format(
self.scale, other.scale
)
)
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = op(self._time.jd1, other._time.jd1)
jd2 = op(self._time.jd2, other._time.jd2)
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __add__(self, other):
# If other is a Time then use Time.__add__ to do the calculation.
if isinstance(other, Time):
return other.__add__(self)
return self._add_sub(other, operator.add)
def __sub__(self, other):
# TimeDelta - Time is an error
if isinstance(other, Time):
raise OperandTypeError(self, other, "-")
return self._add_sub(other, operator.sub)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, "*")
elif (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See Also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(
unit, equivalencies=equivalencies
)
def to_value(self, *args, **kwargs):
"""Get time delta values expressed in specified output format or unit.
This method is flexible and handles both conversion to a specified
``TimeDelta`` format / sub-format AND conversion to a specified unit.
If positional argument(s) are provided then the first one is checked
to see if it is a valid ``TimeDelta`` format, and next it is checked
to see if it is a valid unit or unit string.
To convert to a ``TimeDelta`` format and optional sub-format the options
are::
tm = TimeDelta(1.0 * u.s)
tm.to_value('jd') # equivalent of tm.jd
tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object
tm.to_value('jd', subfmt='decimal')
tm.to_value(format='jd', subfmt='decimal')
To convert to a unit with optional equivalencies, the options are::
tm.to_value('hr') # convert to u.hr (hours)
tm.to_value('hr', []) # specify equivalencies as a positional arg
tm.to_value('hr', equivalencies=[])
tm.to_value(unit='hr', equivalencies=[])
The built-in `~astropy.time.TimeDelta` options for ``format`` are:
{'jd', 'sec', 'datetime'}.
For the two numerical formats 'jd' and 'sec', the available ``subfmt``
options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with the
enhancement depending on platform), and 'decimal' instances of
:class:`decimal.Decimal` for full precision. For the 'str' and 'bytes'
sub-formats, the number of digits is also chosen such that time values
are represented accurately. Default: as set by ``out_subfmt`` (which by
default picks the first available for a given format, i.e., 'float').
Parameters
----------
format : str, optional
The format in which one wants the `~astropy.time.TimeDelta` values.
Default: the current format.
subfmt : str, optional
Possible sub-format in which the values should be given. Default: as
set by ``out_subfmt`` (which by default picks the first available
for a given format, i.e., 'float' or 'date_hms').
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally or
within a context.
Returns
-------
value : ndarray or scalar
The value in the format or units specified.
See Also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
if not (args or kwargs):
raise TypeError("to_value() missing required format or unit argument")
# TODO: maybe allow 'subfmt' also for units, keeping full precision
# (effectively, by doing the reverse of quantity_day_frac)?
# This way, only equivalencies could lead to possible precision loss.
if "format" in kwargs or (
args != () and (args[0] is None or args[0] in self.FORMATS)
):
# Super-class will error with duplicate arguments, etc.
return super().to_value(*args, **kwargs)
# With positional arguments, we try parsing the first one as a unit,
# so that on failure we can give a more informative exception.
if args:
try:
unit = u.Unit(args[0])
except ValueError as exc:
raise ValueError(
"first argument is not one of the known "
f"formats ({list(self.FORMATS)}) and failed to parse as a unit."
) from exc
args = (unit,) + args[1:]
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(
*args, **kwargs
)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object."""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible TimeDelta object: {err}"
)
return value
def isclose(self, other, atol=None, rtol=0.0):
"""Returns a boolean or boolean array where two TimeDelta objects are
element-wise equal within a time tolerance.
This effectively evaluates the expression below::
abs(self - other) <= atol + rtol * abs(other)
Parameters
----------
other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Quantity or TimeDelta object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is one bit in the 128-bit JD time representation,
equivalent to about 20 picosecs.
rtol : float
Relative tolerance for equality
"""
try:
other_day = other.to_value(u.day)
except Exception as err:
raise TypeError(f"'other' argument must support conversion to days: {err}")
if atol is None:
atol = np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
return np.isclose(
self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)
)
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in "OSUMaV":
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = "" if op is None else f" for {op}"
super().__init__(
"Unsupported operand type(s){}: '{}' and '{}'".format(
op_string, left.__class__.__name__, right.__class__.__name__
)
)
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
|
af1b9e3a13d945c4cb839b1c9dc826f1ed4961db2e26b640733f9b5f68fbd272 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import datetime
import fnmatch
import re
import time
import warnings
from collections import OrderedDict, defaultdict
from decimal import Decimal
import erfa
import numpy as np
import astropy.units as u
from astropy.utils.decorators import classproperty, lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from . import _parse_times, conf, utils
from .utils import day_frac, quantity_day_frac, two_product, two_sum
__all__ = [
"AstropyDatetimeLeapSecondWarning",
"TimeFormat",
"TimeJD",
"TimeMJD",
"TimeFromEpoch",
"TimeUnix",
"TimeUnixTai",
"TimeCxcSec",
"TimeGPS",
"TimeDecimalYear",
"TimePlotDate",
"TimeUnique",
"TimeDatetime",
"TimeString",
"TimeISO",
"TimeISOT",
"TimeFITS",
"TimeYearDayTime",
"TimeEpochDate",
"TimeBesselianEpoch",
"TimeJulianEpoch",
"TimeDeltaFormat",
"TimeDeltaSec",
"TimeDeltaJD",
"TimeEpochDateString",
"TimeBesselianEpochString",
"TimeJulianEpochString",
"TIME_FORMATS",
"TIME_DELTA_FORMATS",
"TimezoneInfo",
"TimeDeltaDatetime",
"TimeDatetime64",
"TimeYMDHMS",
"TimeNumeric",
"TimeDeltaNumeric",
]
__doctest_skip__ = ["TimePlotDate"]
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {
"TDT": "tt",
"ET": "tt",
"GMT": "utc",
"UT": "utc",
"IAT": "tai",
}
class AstropyDatetimeLeapSecondWarning(AstropyUserWarning):
"""Warning for leap second when converting to datetime.datetime object."""
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
if isinstance(subfmt_in, str):
for strptime_code, regex in (
("%Y", r"(?P<year>\d\d\d\d)"),
("%m", r"(?P<mon>\d{1,2})"),
("%d", r"(?P<mday>\d{1,2})"),
("%H", r"(?P<hour>\d{1,2})"),
("%M", r"(?P<min>\d{1,2})"),
("%S", r"(?P<sec>\d{1,2})"),
):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if "%" not in subfmt_in:
subfmt_tuple = (
subfmt_tuple[0],
re.compile(subfmt_in + "$"),
subfmt_tuple[2],
)
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormat:
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
Quantities with time units are allowed for formats where the
interpretation is unambiguous.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = "utc" # As of astropy 0.4
subfmts = ()
_registry = TIME_FORMATS
def __init__(
self, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False
):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
self._jd1, self._jd2 = None, None
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __init_subclass__(cls, **kwargs):
# Register time formats that define a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if "name" in cls.__dict__ and cls.name != "astropy_time":
# FIXME: check here that we're not introducing a collision with
# an existing method or attribute; problem is it could be either
# astropy.time.Time or astropy.time.TimeDelta, and at the point
# where this is run neither of those classes have necessarily been
# constructed yet.
if "value" in cls.__dict__ and not hasattr(cls.value, "fget"):
raise ValueError("If defined, 'value' must be a property")
cls._registry[cls.name] = cls
# If this class defines its own subfmts, preprocess the definitions.
if "subfmts" in cls.__dict__:
cls.subfmts = _regexify_subfmts(cls.subfmts)
return super().__init_subclass__(**kwargs)
@classmethod
def _get_allowed_subfmt(cls, subfmt):
"""Get an allowed subfmt for this class, either the input ``subfmt``
if this is valid or '*' as a default. This method gets used in situations
where the format of an existing Time object is changing and so the
out_ or in_subfmt may need to be coerced to the default '*' if that
``subfmt`` is no longer valid.
"""
try:
cls._select_subfmts(subfmt)
except ValueError:
subfmt = "*"
return subfmt
@property
def in_subfmt(self):
return self._in_subfmt
@in_subfmt.setter
def in_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._in_subfmt = subfmt
@property
def out_subfmt(self):
return self._out_subfmt
@out_subfmt.setter
def out_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._out_subfmt = subfmt
@property
def jd1(self):
return self._jd1
@jd1.setter
def jd1(self, jd1):
self._jd1 = _validate_jd_for_storage(jd1)
if self._jd2 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
@property
def jd2(self):
return self._jd2
@jd2.setter
def jd2(self, jd2):
self._jd2 = _validate_jd_for_storage(jd2)
if self._jd1 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale."""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if "mask" not in self.cache:
self.cache["mask"] = np.isnan(self.jd2)
if self.cache["mask"].shape:
self.cache["mask"].flags.writeable = False
return self.cache["mask"]
@property
def masked(self):
if "masked" not in self.cache:
self.cache["masked"] = bool(np.any(self.mask))
return self.cache["masked"]
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, val):
# Verify precision is 0-9 (inclusive)
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError("precision attribute must be an int between 0 and 9")
self._precision = val
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
# val1 cannot contain nan, but val2 can contain nan
isfinite1 = np.isfinite(val1)
if val1.size > 1: # Calling .all() on a scalar is surprisingly slow
isfinite1 = (
isfinite1.all()
) # Note: arr.all() about 3x faster than np.all(arr)
elif val1.size == 0:
isfinite1 = False
ok1 = (
val1.dtype.kind == "f"
and val1.dtype.itemsize >= 8
and isfinite1
or val1.size == 0
)
ok2 = (
val2 is None
or (
val2.dtype.kind == "f"
and val2.dtype.itemsize >= 8
and not np.any(np.isinf(val2))
)
or val2.size == 0
)
if not (ok1 and ok2):
raise TypeError(
f"Input values for {self.name} class must be finite doubles"
)
if getattr(val1, "unit", None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without losing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances."
)
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1.0 / getattr(self, "unit", 1.0)
if factor != 1.0:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, "unit", None) is not None:
raise TypeError("Cannot mix float and Quantity inputs")
if val2 is None:
val2 = np.array(0, dtype=val1.dtype)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_SCALES}"
)
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2 in specified
``out_subfmt``.
This is the base method that ignores ``parent`` and uses the ``value``
property to compute the output. This is done by temporarily setting
``self.out_subfmt`` and calling ``self.value``. This is required for
legacy Format subclasses prior to astropy 4.0 New code should instead
implement the value functionality in ``to_value()`` and then make the
``value`` property be a simple call to ``self.to_value()``.
Parameters
----------
parent : object
Parent `~astropy.time.Time` object associated with this
`~astropy.time.TimeFormat` object
out_subfmt : str or None
Output subformt (use existing self.out_subfmt if `None`)
Returns
-------
value : numpy.array, numpy.ma.array
Array or masked array of formatted time representation values
"""
# Get value via ``value`` property, overriding out_subfmt temporarily if needed.
if out_subfmt is not None:
out_subfmt_orig = self.out_subfmt
try:
self.out_subfmt = out_subfmt
value = self.value
finally:
self.out_subfmt = out_subfmt_orig
else:
value = self.value
return self.mask_if_needed(value)
@property
def value(self):
raise NotImplementedError
@classmethod
def _select_subfmts(cls, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
If no subformat matches pattern then a ValueError is raised. A special
case is a format with no allowed subformats, i.e. subfmts=(), and
pattern='*'. This is OK and happens when this method is used for
validation of an out_subfmt.
"""
if not isinstance(pattern, str):
raise ValueError("subfmt attribute must be a string")
elif pattern == "*":
return cls.subfmts
subfmts = [x for x in cls.subfmts if fnmatch.fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
if len(cls.subfmts) == 0:
raise ValueError(f"subformat not allowed for format {cls.name}")
else:
subfmt_names = [x[0] for x in cls.subfmts]
raise ValueError(
f"subformat {pattern!r} must match one of "
f"{subfmt_names} for format {cls.name}"
)
return subfmts
class TimeNumeric(TimeFormat):
subfmts = (
("float", np.float64, None, np.add),
("long", np.longdouble, utils.longdouble_to_twoval, utils.twoval_to_longdouble),
("decimal", np.object_, utils.decimal_to_twoval, utils.twoval_to_decimal),
("str", np.str_, utils.decimal_to_twoval, utils.twoval_to_string),
("bytes", np.bytes_, utils.bytes_to_twoval, utils.twoval_to_bytes),
)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
# Save original state of val2 because the super()._check_val_type below
# may change val2 from None to np.array(0). The value is saved in order
# to prevent a useless and slow call to np.result_type() below in the
# most common use-case of providing only val1.
orig_val2_is_none = val2 is None
if val1.dtype.kind == "f":
val1, val2 = super()._check_val_type(val1, val2)
elif not orig_val2_is_none or not (
val1.dtype.kind in "US"
or (
val1.dtype.kind == "O"
and all(isinstance(v, Decimal) for v in val1.flat)
)
):
raise TypeError(
f"for {self.name} class, input should be doubles, string, or Decimal, "
"and second values are only allowed for doubles."
)
val_dtype = (
val1.dtype if orig_val2_is_none else np.result_type(val1.dtype, val2.dtype)
)
subfmts = self._select_subfmts(self.in_subfmt)
for subfmt, dtype, convert, _ in subfmts:
if np.issubdtype(val_dtype, dtype):
break
else:
raise ValueError("input type not among selected sub-formats.")
if convert is not None:
try:
val1, val2 = convert(val1, val2)
except Exception:
raise TypeError(
f"for {self.name} class, input should be (long) doubles, string, "
"or Decimal, and second values are only allowed for "
"(long) doubles."
)
return val1, val2
def to_value(self, jd1=None, jd2=None, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2.
Subclasses that require ``parent`` or to adjust the jds should
override this method.
"""
# TODO: do this in __init_subclass__?
if self.__class__.value.fget is not self.__class__.to_value:
return self.value
if jd1 is None:
jd1 = self.jd1
if jd2 is None:
jd2 = self.jd2
if out_subfmt is None:
out_subfmt = self.out_subfmt
subfmt = self._select_subfmts(out_subfmt)[0]
kwargs = {}
if subfmt[0] in ("str", "bytes"):
unit = getattr(self, "unit", 1)
digits = int(np.ceil(np.log10(unit / np.finfo(float).eps)))
# TODO: allow a way to override the format.
kwargs["fmt"] = f".{digits}f"
value = subfmt[3](jd1, jd2, **kwargs)
return self.mask_if_needed(value)
value = property(to_value)
class TimeJD(TimeNumeric):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = "jd"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
class TimeMJD(TimeNumeric):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = "mjd"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h).
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd1 = self.jd1 - erfa.DJM0 # This cannot lose precision.
jd2 = self.jd2
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
def _check_val_type_not_quantity(format_name, val1, val2):
# If val2 is a Quantity, the super() call that follows this check
# will raise a TypeError.
if hasattr(val1, "to") and getattr(val1, "unit", None) is not None:
raise ValueError(
f"cannot use Quantities for {format_name!r} format, as the unit of year "
"is defined as 365.25 days, while the length of year is variable "
"in this format. Use float instead."
)
class TimeDecimalYear(TimeNumeric):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year.
For example 2000.5 corresponds to the ISO time '2000-07-02 00:00:00'.
Since for this format the length of the year varies between 365 and
366 days, it is not possible to use Quantity input, in which a year
is always 365.25 days.
"""
name = "decimalyear"
def _check_val_type(self, val1, val2):
_check_val_type_not_quantity(self.name, val1, val2)
# if val2 is a Quantity, super() will raise a TypeError.
return super()._check_val_type(val1, val2)
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format="jd")
t_end = Time(jd1_end, jd2_end, scale=self.scale, format="jd")
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
def to_value(self, **kwargs):
scale = self.scale.upper().encode("ascii")
iy_start, ims, ids, ihmsfs = erfa.d2dtf(
scale, 0, self.jd1, self.jd2_filled # precision=0
)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
# Trying to be precise, but more than float64 not useful.
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return super().to_value(jd1=decimalyear, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeFromEpoch(TimeNumeric):
"""
Base class for times that represent the interval from a particular
epoch as a numerical multiple of a unit time interval (e.g. seconds
or days).
"""
@classproperty(lazy=True)
def _epoch(cls):
# Ideally we would use `def epoch(cls)` here and not have the instance
# property below. However, this breaks the sphinx API docs generation
# in a way that was not resolved. See #10406 for details.
return Time(
cls.epoch_val,
cls.epoch_val2,
scale=cls.epoch_scale,
format=cls.epoch_format,
)
@property
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
return self._epoch
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1.0 / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# For the usual case that scale is the same as epoch_scale, we only need
# to ensure that abs(jd2) <= 0.5. Since abs(self.epoch.jd2) <= 0.5 and
# abs(frac) <= 0.5, we can do simple (fast) checks and arithmetic here
# without another call to day_frac(). Note also that `round(jd2.item())`
# is about 10x faster than `np.round(jd2)`` for a scalar.
if self.epoch.scale == self.scale:
jd1_extra = np.round(jd2) if jd2.shape else round(jd2.item())
jd1 += jd1_extra
jd2 -= jd1_extra
self.jd1, self.jd2 = jd1, jd2
return
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(
Time(jd1, jd2, scale=self.epoch_scale, format="jd"), self.scale
)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale '{self.epoch_scale}' "
f"to specified scale '{self.scale}', got error:\n{err}"
) from err
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None, **kwargs):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError("cannot compute value without parent Time object")
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale "
f"'{self.epoch_scale}' to specified scale '{self.scale}', "
f"got error:\n{err}"
) from err
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
# This factor is guaranteed to be exactly representable, which
# means time_from_epoch1 is calculated exactly.
factor = 1.0 / self.unit
time_from_epoch1 = (jd1 - self.epoch.jd1) * factor
time_from_epoch2 = (jd2 - self.epoch.jd2) * factor
return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time (UTC): seconds from 1970-01-01 00:00:00 UTC, ignoring leap seconds.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = "unix"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1970-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "iso"
class TimeUnixTai(TimeUnix):
"""
Unix time (TAI): SI seconds elapsed since 1970-01-01 00:00:00 TAI (see caveats).
This will generally differ from standard (UTC) Unix time by the cumulative
integral number of leap seconds introduced into UTC since 1972-01-01 UTC
plus the initial offset of 10 seconds at that date.
This convention matches the definition of linux CLOCK_TAI
(https://www.cl.cam.ac.uk/~mgk25/posix-clocks.html),
and the Precision Time Protocol
(https://en.wikipedia.org/wiki/Precision_Time_Protocol), which
is also used by the White Rabbit protocol in High Energy Physics:
https://white-rabbit.web.cern.ch.
Caveats:
- Before 1972, fractional adjustments to UTC were made, so the difference
between ``unix`` and ``unix_tai`` time is no longer an integer.
- Because of the fractional adjustments, to be very precise, ``unix_tai``
is the number of seconds since ``1970-01-01 00:00:00 TAI`` or equivalently
``1969-12-31 23:59:51.999918 UTC``. The difference between TAI and UTC
at that epoch was 8.000082 sec.
- On the day of a positive leap second the difference between ``unix`` and
``unix_tai`` times increases linearly through the day by 1.0. See also the
documentation for the `~astropy.time.TimeUnix` class.
- Negative leap seconds are possible, though none have been needed to date.
Examples
--------
>>> # get the current offset between TAI and UTC
>>> from astropy.time import Time
>>> t = Time('2020-01-01', scale='utc')
>>> t.unix_tai - t.unix
37.0
>>> # Before 1972, the offset between TAI and UTC was not integer
>>> t = Time('1970-01-01', scale='utc')
>>> t.unix_tai - t.unix # doctest: +FLOAT_CMP
8.000082
>>> # Initial offset of 10 seconds in 1972
>>> t = Time('1972-01-01', scale='utc')
>>> t.unix_tai - t.unix
10.0
"""
name = "unix_tai"
epoch_val = "1970-01-01 00:00:00"
epoch_scale = "tai"
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = "cxcsec"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1998-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "tt"
epoch_format = "iso"
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
-----
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = "gps"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1980-01-06 00:00:19"
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = "tai"
epoch_format = "iso"
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC.
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = "plot_date"
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "jd"
@lazyproperty
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
try:
# Matplotlib >= 3.3 has a get_epoch() function
from matplotlib.dates import get_epoch
except ImportError:
# If no get_epoch() then the epoch is '0001-01-01'
_epoch = self._epoch
else:
# Get the matplotlib date epoch as an ISOT string in UTC
epoch_utc = get_epoch()
from erfa import ErfaWarning
with warnings.catch_warnings():
# Catch possible dubious year warnings from erfa
warnings.filterwarnings("ignore", category=ErfaWarning)
_epoch = Time(epoch_utc, scale="utc", format="isot")
_epoch.format = "jd"
return _epoch
class TimeStardate(TimeFromEpoch):
"""
Stardate: date units from 2318-07-05 12:00:00 UTC.
For example, stardate 41153.7 is 00:52 on April 30, 2363.
See http://trekguide.com/Stardates.htm#TNG for calculations and reference points.
"""
name = "stardate"
unit = 0.397766856 # Stardate units per day
epoch_val = "2318-07-05 11:00:00" # Date and time of stardate 00000.00
epoch_val2 = None
epoch_scale = "tai"
epoch_format = "iso"
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = "astropy_time"
def __new__(
cls, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False
):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (
isinstance(val1_0, Time)
and all(type(val) is type(val1_0) for val in val1.flat)
):
raise TypeError(
f"Input values for {cls.name} class must all be the same "
"astropy Time type."
)
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
# Collect individual location values and merge into a single location.
if any(tm.location is not None for tm in val1):
if any(tm.location is None for tm in val1):
raise ValueError(
"cannot concatenate times unless all locations "
"are set or no locations are set"
)
locations = []
for tm in val1:
location = np.broadcast_to(
tm.location, tm._time.jd1.shape, subok=True
)
locations.append(np.atleast_1d(location))
location = np.concatenate(locations)
else:
location = None
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
location = val1_0.location
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(
jd1, jd2, scale, precision, in_subfmt, out_subfmt, from_jd=True
)
# Make a temporary hidden attribute to transfer location back to the
# parent Time object where it needs to live.
self._location = location
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object.
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = "datetime"
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError(
f"Input values for {self.name} class must be datetime objects"
)
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2."""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer(
[val1, None, None, None, None, None, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None] + 5 * [np.intc] + [np.double],
)
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"), *iterator.operands[1:]
)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(
self, timezone=None, leap_second_strict="raise", parent=None, out_subfmt=None
):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime object.
Since the `~datetime.datetime` class does not natively handle leap seconds, the
behavior when converting a time within a leap second is controlled by the
``leap_second_strict`` argument. For example::
>>> from astropy.time import Time
>>> t = Time("2015-06-30 23:59:60.500")
>>> print(t.to_datetime(leap_second_strict='silent'))
2015-07-01 00:00:00.500000
Parameters
----------
timezone : {`~datetime.tzinfo`, None}, optional
If not `None`, return timezone-aware datetime.
leap_second_strict : str, optional
If ``raise`` (default), raise an exception if the time is within a leap
second. If ``warn`` then issue a warning. If ``silent`` then silently
handle the leap second.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if out_subfmt is not None:
# Out_subfmt not allowed for this format, so raise the standard
# exception by trying to validate the value.
self._select_subfmts(out_subfmt)
if timezone is not None:
if self._scale != "utc":
raise ScaleValueError(
f"scale is {self._scale}, must be 'utc' when timezone is supplied."
)
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode("ascii")
iys, ims, ids, ihmsfs = erfa.d2dtf(
scale, 6, self.jd1, self.jd2_filled # 6 for microsec
)
ihrs = ihmsfs["h"]
imins = ihmsfs["m"]
isecs = ihmsfs["s"]
ifracs = ihmsfs["f"]
iterator = np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=7 * [None] + [object],
)
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
isec = isec - 1
in_leap_second = True
else:
in_leap_second = False
if timezone is not None:
dt = datetime.datetime(
iy, im, id, ihr, imin, isec, ifracsec, tzinfo=TimezoneInfo()
).astimezone(timezone)
else:
dt = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
if in_leap_second:
dt += datetime.timedelta(seconds=1)
msg = (
f"Time {dt} is within a leap second but `datetime` does not "
"support leap seconds. Use the `leap_second_strict` argument "
"of the `Time.to_datetime()` method with value of 'raise', 'warn', "
"or 'silent' to control how leap seconds are handled."
)
if leap_second_strict == "raise":
raise ValueError(msg)
elif leap_second_strict == "warn":
warnings.warn(msg, AstropyDatetimeLeapSecondWarning)
elif leap_second_strict != "silent":
raise ValueError(
f"leap_second_strict must be 'raise', 'warn', or 'silent', "
f"not '{leap_second_strict}'"
)
out[...] = dt
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimeYMDHMS(TimeUnique):
"""
ymdhms: A Time format to represent Time as year, month, day, hour,
minute, second (thus the name ymdhms).
Acceptable inputs must have keys or column names in the "YMDHMS" set of
``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``:
- Dict with keys in the YMDHMS set
- NumPy structured array, record array or astropy Table, or single row
of those types, with column names in the YMDHMS set
One can supply a subset of the YMDHMS values, for instance only 'year',
'month', and 'day'. Inputs have the following defaults::
'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0
When the input is supplied as a ``dict`` then each value can be either a
scalar value or an array. The values will be broadcast to a common shape.
Example::
>>> from astropy.time import Time
>>> t = Time({'year': 2015, 'month': 2, 'day': 3,
... 'hour': 12, 'minute': 13, 'second': 14.567},
... scale='utc')
>>> t.iso
'2015-02-03 12:13:14.567'
>>> t.ymdhms.year
2015
"""
name = "ymdhms"
def _check_val_type(self, val1, val2):
"""
This checks inputs for the YMDHMS format.
It is bit more complex than most format checkers because of the flexible
input that is allowed. Also, it actually coerces ``val1`` into an appropriate
dict of ndarrays that can be used easily by ``set_jds()``. This is useful
because it makes it easy to get default values in that routine.
Parameters
----------
val1 : ndarray or None
val2 : ndarray or None
Returns
-------
val1_as_dict, val2 : val1 as dict or None, val2 is always None
"""
if val2 is not None:
raise ValueError("val2 must be None for ymdhms format")
ymdhms = ["year", "month", "day", "hour", "minute", "second"]
if val1.dtype.names:
# Convert to a dict of ndarray
val1_as_dict = {name: val1[name] for name in val1.dtype.names}
elif val1.shape == (0,):
# Input was empty list [], so set to None and set_jds will handle this
return None, None
elif (
val1.dtype.kind == "O"
and val1.shape == ()
and isinstance(val1.item(), dict)
):
# Code gets here for input as a dict. The dict input
# can be either scalar values or N-d arrays.
# Extract the item (which is a dict) and broadcast values to the
# same shape here.
names = val1.item().keys()
values = val1.item().values()
val1_as_dict = dict(zip(names, np.broadcast_arrays(*values)))
else:
raise ValueError("input must be dict or table-like")
# Check that the key names now are good.
names = val1_as_dict.keys()
required_names = ymdhms[: len(names)]
def comma_repr(vals):
return ", ".join(repr(val) for val in vals)
bad_names = set(names) - set(ymdhms)
if bad_names:
raise ValueError(
f"{comma_repr(bad_names)} not allowed as YMDHMS key name(s)"
)
if set(names) != set(required_names):
raise ValueError(
f"for {len(names)} input key names "
f"you must supply {comma_repr(required_names)}"
)
return val1_as_dict, val2
def set_jds(self, val1, val2):
if val1 is None:
# Input was empty list []
jd1 = np.array([], dtype=np.float64)
jd2 = np.array([], dtype=np.float64)
else:
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"),
val1["year"],
val1.get("month", 1),
val1.get("day", 1),
val1.get("hour", 0),
val1.get("minute", 0),
val1.get("second", 0),
)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
scale = self.scale.upper().encode("ascii")
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9, self.jd1, self.jd2_filled)
out = np.empty(
self.jd1.shape,
dtype=[
("year", "i4"),
("month", "i4"),
("day", "i4"),
("hour", "i4"),
("minute", "i4"),
("second", "f8"),
],
)
out["year"] = iys
out["month"] = ims
out["day"] = ids
out["hour"] = ihmsfs["h"]
out["minute"] = ihmsfs["m"]
out["second"] = ihmsfs["s"] + ihmsfs["f"] * 10 ** (-9)
out = out.view(np.recarray)
return self.mask_if_needed(out)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0 * u.day, dst=0 * u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity`, optional
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity`, optional
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : str or None, optional
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = "UTC"
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
**Fast C-based parser**
Time format classes can take advantage of a fast C-based parser if the times
are represented as fixed-format strings with year, month, day-of-month,
hour, minute, second, OR year, day-of-year, hour, minute, second. This can
be a factor of 20 or more faster than the pure Python parser.
Fixed format means that the components always have the same number of
characters. The Python parser will accept ``2001-9-2`` as a date, but the C
parser would require ``2001-09-02``.
A subclass in this case must define a class attribute ``fast_parser_pars``
which is a `dict` with all of the keys below. An inherited attribute is not
checked, only an attribute in the class ``__dict__``.
- ``delims`` (tuple of int): ASCII code for character at corresponding
``starts`` position (0 => no character)
- ``starts`` (tuple of int): position where component starts (including
delimiter if present). Use -1 for the month component for format that use
day of year.
- ``stops`` (tuple of int): position where component ends. Use -1 to
continue to end of string, or for the month component for formats that use
day of year.
- ``break_allowed`` (tuple of int): if true (1) then the time string can
legally end just before the corresponding component (e.g. "2000-01-01"
is a valid time but "2000-01-01 12" is not).
- ``has_day_of_year`` (int): 0 if dates have year, month, day; 1 if year,
day-of-year
"""
def __init_subclass__(cls, **kwargs):
if "fast_parser_pars" in cls.__dict__:
fpp = cls.fast_parser_pars
fpp = np.array(
list(
zip(
map(chr, fpp["delims"]),
fpp["starts"],
fpp["stops"],
fpp["break_allowed"],
)
),
_parse_times.dt_pars,
)
if cls.fast_parser_pars["has_day_of_year"]:
fpp["start"][1] = fpp["stop"][1] = -1
cls._fast_parser = _parse_times.create_parser(fpp)
super().__init_subclass__(**kwargs)
def _check_val_type(self, val1, val2):
if val1.dtype.kind not in ("S", "U") and val1.size:
raise TypeError(f"Input values for {self.name} class must be strings")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ("year", "mon", "mday", "hour", "min", "sec")
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex(".")
except Exception:
timestr_has_fractional_digits = False
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
timestr_has_fractional_digits = True
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
subfmt_has_sec = "%S" in strptime_fmt_or_regex
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, "tm_" + component) for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [
int(tm.get(component, default))
for component, default in zip(components, defaults)
]
subfmt_has_sec = "sec" in tm
# Add fractional seconds if they were in the original time string
# and the subformat has seconds. A time like "2022-08-01.123" will
# never pass this for a format like ISO and will raise a parsing
# exception.
if timestr_has_fractional_digits:
if subfmt_has_sec:
vals[-1] = vals[-1] + fracsec
else:
continue
return vals
else:
raise ValueError(f"Time {timestr} does not match {self.name} format")
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2."""
# If specific input subformat is required then use the Python parser.
# Also do this if Time format class does not define `use_fast_parser` or
# if the fast parser is entirely disabled. Note that `use_fast_parser`
# is ignored for format classes that don't have a fast parser.
if (
self.in_subfmt != "*"
or "_fast_parser" not in self.__class__.__dict__
or conf.use_fast_parser == "False"
):
jd1, jd2 = self.get_jds_python(val1, val2)
else:
try:
jd1, jd2 = self.get_jds_fast(val1, val2)
except Exception:
# Fall through to the Python parser unless fast is forced.
if conf.use_fast_parser == "force":
raise
else:
jd1, jd2 = self.get_jds_python(val1, val2)
self.jd1 = jd1
self.jd2 = jd2
def get_jds_python(self, val1, val2):
"""Parse the time strings contained in val1 and get jd1, jd2."""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None, None, None, None, None, None],
flags=["zerosize_ok"],
op_dtypes=[None] + 5 * [np.intc] + [np.double],
)
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
(
iy[...],
im[...],
id[...],
ihr[...],
imin[...],
dsec[...],
) = self.parse_string(val, subfmts)
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"), *iterator.operands[1:]
)
jd1, jd2 = day_frac(jd1, jd2)
return jd1, jd2
def get_jds_fast(self, val1, val2):
"""Use fast C parser to parse time strings in val1 and get jd1, jd2."""
# Handle bytes or str input and convert to uint8. We need to the
# dtype _parse_times.dt_u1 instead of uint8, since otherwise it is
# not possible to create a gufunc with structured dtype output.
# See note about ufunc type resolver in pyerfa/erfa/ufunc.c.templ.
if val1.dtype.kind == "U":
# Note: val1.astype('S') is *very* slow, so we check ourselves
# that the input is pure ASCII.
val1_uint32 = val1.view((np.uint32, val1.dtype.itemsize // 4))
if np.any(val1_uint32 > 127):
raise ValueError("input is not pure ASCII")
# It might be possible to avoid making a copy via astype with
# cleverness in parse_times.c but leave that for another day.
chars = val1_uint32.astype(_parse_times.dt_u1)
else:
chars = val1.view((_parse_times.dt_u1, val1.dtype.itemsize))
# Call the fast parsing ufunc.
time_struct = self._fast_parser(chars)
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"),
time_struct["year"],
time_struct["month"],
time_struct["day"],
time_struct["hour"],
time_struct["minute"],
time_struct["second"],
)
return day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = (self.scale.upper().encode("ascii"),)
iys, ims, ids, ihmsfs = erfa.d2dtf(
scale, self.precision, self.jd1, self.jd2_filled
)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
yday = None
has_yday = "{yday:" in str_fmt
ihrs = ihmsfs["h"]
imins = ihmsfs["m"]
isecs = ihmsfs["s"]
ifracs = ihmsfs["f"]
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs], flags=["zerosize_ok"]
):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {
"year": int(iy),
"mon": int(im),
"day": int(id),
"hour": int(ihr),
"min": int(imin),
"sec": int(isec),
"fracsec": int(ifracsec),
"yday": yday,
}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith("{sec:02d}"):
str_fmt += ".{fracsec:0" + str(self.precision) + "d}"
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "iso"
subfmts = (
(
"date_hms",
"%Y-%m-%d %H:%M:%S",
# XXX To Do - use strftime for output ??
"{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}",
),
(
"date_hm",
"%Y-%m-%d %H:%M",
"{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}",
),
("date", "%Y-%m-%d", "{year:d}-{mon:02d}-{day:02d}"),
)
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000-01-12 13:14:15.678"
# 01234567890123456789012
# yyyy-mm-dd hh:mm:ss.fff
# Parsed as ('yyyy', '-mm', '-dd', ' hh', ':mm', ':ss', '.fff')
fast_parser_pars = dict(
delims=(0, ord("-"), ord("-"), ord(" "), ord(":"), ord(":"), ord(".")),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0,
)
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith("Z"):
if self.scale != "utc":
raise ValueError("Time input terminating in 'Z' must have scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "isot"
subfmts = (
(
"date_hms",
"%Y-%m-%dT%H:%M:%S",
"{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"date_hm",
"%Y-%m-%dT%H:%M",
"{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}",
),
("date", "%Y-%m-%d", "{year:d}-{mon:02d}-{day:02d}"),
)
# See TimeISO for explanation
fast_parser_pars = dict(
delims=(0, ord("-"), ord("-"), ord("T"), ord(":"), ord(":"), ord(".")),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0,
)
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "yday"
subfmts = (
(
"date_hms",
"%Y:%j:%H:%M:%S",
"{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}",
),
("date_hm", "%Y:%j:%H:%M", "{year:d}:{yday:03d}:{hour:02d}:{min:02d}"),
("date", "%Y:%j", "{year:d}:{yday:03d}"),
)
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000:123:13:14:15.678"
# 012345678901234567890
# yyyy:ddd:hh:mm:ss.fff
# Parsed as ('yyyy', ':ddd', ':hh', ':mm', ':ss', '.fff')
#
# delims: character at corresponding `starts` position (0 => no character)
# starts: position where component starts (including delimiter if present)
# stops: position where component ends (-1 => continue to end of string)
fast_parser_pars = dict(
delims=(0, 0, ord(":"), ord(":"), ord(":"), ord(":"), ord(".")),
starts=(0, -1, 4, 8, 11, 14, 17),
stops=(3, -1, 7, 10, 13, 16, -1),
# Break allowed before:
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=1,
)
class TimeDatetime64(TimeISOT):
name = "datetime64"
def _check_val_type(self, val1, val2):
if not val1.dtype.kind == "M":
if val1.size > 0:
raise TypeError(
f"Input values for {self.name} class must be datetime64 objects"
)
else:
val1 = np.array([], "datetime64[D]")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = "2000"
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ["datetime64[M]", "datetime64[Y]"]:
val1 = val1.astype("datetime64[D]")
val1 = val1.astype("S")
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype("datetime64")
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = "fits"
subfmts = (
(
"date_hms",
(
r"(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T"
r"(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)"
),
"{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"date",
r"(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)",
"{year:04d}-{mon:02d}-{day:02d}",
),
(
"longdate_hms",
(
r"(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T"
r"(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)"
),
"{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"longdate",
r"(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)",
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(
subfmt[0],
subfmt[1] + r"(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?",
subfmt[2],
)
for subfmt in subfmts
)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present."""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError(f"Time {timestr} does not match {self.name} format")
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm["scale"] is not None:
warnings.warn(
"FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning,
)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm["scale"].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(TIME_SCALES)}"
)
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError(
f"Input strings for {self.name} class must all "
"have consistent time scales."
)
return [
int(tm["year"]),
int(tm["mon"]),
int(tm["mday"]),
int(tm.get("hour", 0)),
int(tm.get("min", 0)),
float(tm.get("sec", 0.0)),
]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if "long" not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = "long" + self.out_subfmt
return super().value
class TimeEpochDate(TimeNumeric):
"""
Base class for support of Besselian and Julian epoch dates.
"""
_default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
value = jd_to_epoch(self.jd1, self.jd2)
return super().to_value(jd1=value, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as value(s) like 1950.0.
Since for this format the length of the year varies, input needs to
be floating point; it is not possible to use Quantity input, for
which a year always equals 365.25 days.
"""
name = "byear"
epoch_to_jd = "epb2jd"
jd_to_epoch = "epb"
def _check_val_type(self, val1, val2):
_check_val_type_not_quantity(self.name, val1, val2)
# FIXME: is val2 really okay here?
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as value(s) like 2000.0."""
name = "jyear"
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = "epj2jd"
jd_to_epoch = "epj"
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None], op_dtypes=[val1.dtype, np.double], flags=["zerosize_ok"]
)
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError(f"Time {val} does not match {self.name} format")
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + "%." + str(self.precision) + "f"
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'."""
name = "byear_str"
epoch_to_jd = "epb2jd"
jd_to_epoch = "epb"
epoch_prefix = "B"
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'."""
name = "jyear_str"
epoch_to_jd = "epj2jd"
jd_to_epoch = "epj"
epoch_prefix = "J"
class TimeDeltaFormat(TimeFormat):
"""Base class for time delta representations."""
_registry = TIME_DELTA_FORMATS
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`.
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_DELTA_SCALES}"
)
return scale
class TimeDeltaNumeric(TimeDeltaFormat, TimeNumeric):
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1.0 / self.unit)
def to_value(self, **kwargs):
# Note that 1/unit is always exactly representable, so the
# following multiplications are exact.
factor = 1.0 / self.unit
jd1 = self.jd1 * factor
jd2 = self.jd2 * factor
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDeltaSec(TimeDeltaNumeric):
"""Time delta in SI seconds."""
name = "sec"
unit = 1.0 / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaNumeric):
"""Time delta in Julian days (86400 SI seconds)."""
name = "jd"
unit = 1.0
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta."""
name = "datetime"
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError(
f"Input values for {self.name} class must be datetime.timedelta objects"
)
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer(
[val1, None, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, np.double, np.double],
)
day = datetime.timedelta(days=1)
for val, jd1, jd2 in iterator:
jd1[...], other = divmod(val.item(), day)
jd2[...] = other / day
self.jd1, self.jd2 = day_frac(iterator.operands[-2], iterator.operands[-1])
@property
def value(self):
iterator = np.nditer(
[self.jd1, self.jd2, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, None, object],
)
for jd1, jd2, out in iterator:
jd1_, jd2_ = day_frac(jd1, jd2)
out[...] = datetime.timedelta(days=jd1_, microseconds=jd2_ * 86400 * 1e6)
return self.mask_if_needed(iterator.operands[-1])
def _validate_jd_for_storage(jd):
if isinstance(jd, (float, int)):
return np.array(jd, dtype=np.float_)
if isinstance(jd, np.generic) and (
jd.dtype.kind == "f" and jd.dtype.itemsize <= 8 or jd.dtype.kind in "iu"
):
return np.array(jd, dtype=np.float_)
elif isinstance(jd, np.ndarray) and jd.dtype.kind == "f" and jd.dtype.itemsize == 8:
return jd
else:
raise TypeError(
"JD values must be arrays (possibly zero-dimensional) "
f"of floats but we got {jd!r} of type {type(jd)}"
)
def _broadcast_writeable(jd1, jd2):
if jd1.shape == jd2.shape:
return jd1, jd2
# When using broadcast_arrays, *both* are flagged with
# warn-on-write, even the one that wasn't modified, and
# require "C" only clears the flag if it actually copied
# anything.
shape = np.broadcast(jd1, jd2).shape
if jd1.shape == shape:
s_jd1 = jd1
else:
s_jd1 = np.require(np.broadcast_to(jd1, shape), requirements=["C", "W"])
if jd2.shape == shape:
s_jd2 = jd2
else:
s_jd2 = np.require(np.broadcast_to(jd2, shape), requirements=["C", "W"])
return s_jd1, s_jd2
# Import symbols from core.py that are used in this module. This succeeds
# because __init__.py imports format.py just before core.py.
from .core import TIME_DELTA_SCALES, TIME_SCALES, ScaleValueError, Time # noqa: E402
|
81bf37dee8e7e354cbe60af963701de156fc9de91cf666702add65283fbca93e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["quantity_input"]
import inspect
import typing as T
from collections.abc import Sequence
from functools import wraps
from numbers import Number
import numpy as np
from .core import (
Unit,
UnitBase,
UnitsError,
add_enabled_equivalencies,
dimensionless_unscaled,
)
from .physical import PhysicalType, get_physical_type
from .quantity import Quantity
NoneType = type(None)
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
unit = get_physical_type(target)._unit
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise ValueError(f"Invalid unit or physical type {target!r}.") from None
allowed_units.append(unit)
return allowed_units
def _validate_arg_value(
param_name, func_name, arg, targets, equivalencies, strict_dimensionless=False
):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
# If dimensionless is an allowed unit and the argument is unit-less,
# allow numbers or numpy arrays with numeric dtypes
if (
dimensionless_unscaled in allowed_units
and not strict_dimensionless
and not hasattr(arg, "unit")
):
if isinstance(arg, Number):
return
elif isinstance(arg, np.ndarray) and np.issubdtype(arg.dtype, np.number):
return
for allowed_unit in allowed_units:
try:
if arg.unit.is_equivalent(allowed_unit, equivalencies=equivalencies):
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError(
f"Argument '{param_name}' to function '{func_name}'"
f" has {error_msg}. You should pass in an astropy "
"Quantity instead."
)
else:
error_msg = (
f"Argument '{param_name}' to function '{func_name}' must "
"be in units convertible to"
)
if len(targets) > 1:
targ_names = ", ".join([f"'{targ}'" for targ in targets])
raise UnitsError(f"{error_msg} one of: {targ_names}.")
else:
raise UnitsError(f"{error_msg} '{targets[0]}'.")
def _parse_annotation(target):
if target in (None, NoneType, inspect._empty):
return target
# check if unit-like
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
ptype = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
if isinstance(target, str):
raise ValueError(f"invalid unit or physical type {target!r}.") from None
else:
return ptype
else:
return unit
# could be a type hint
origin = T.get_origin(target)
if origin is T.Union:
return [_parse_annotation(t) for t in T.get_args(target)]
elif origin is not T.Annotated: # can't be Quantity[]
return False
# parse type hint
cls, *annotations = T.get_args(target)
if not issubclass(cls, Quantity) or not annotations:
return False
# get unit from type hint
unit, *rest = annotations
if not isinstance(unit, (UnitBase, PhysicalType)):
return False
return unit
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the
decorator, or by using function annotation syntax. Arguments to the
decorator take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator or
in the annotation. If the argument has no unit attribute, i.e. it is not
a Quantity object, a `ValueError` will be raised unless the argument is
an annotation. This is to allow non Quantity annotations to pass
through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
The original function is accessible by the attributed ``__wrapped__``.
See :func:`functools.wraps` for details.
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Or using a unit-aware Quantity annotation.
.. code-block:: python
@u.quantity_input
def myfunction(myangle: u.Quantity[u.arcsec]):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, strict_dimensionless=False, **kwargs):
self.equivalencies = kwargs.pop("equivalencies", [])
self.decorator_kwargs = kwargs
self.strict_dimensionless = strict_dimensionless
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (
param.name not in bound_args.arguments
and param.default is not param.empty
):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# parses to unit if it's an annotation (or list thereof)
targets = _parse_annotation(targets)
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isinstance(targets, Sequence):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets or NoneType in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [
t
for t in valid_targets
if isinstance(t, (str, UnitBase, PhysicalType))
]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(
param.name,
wrapped_function.__name__,
arg,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
# Return
ra = wrapped_signature.return_annotation
valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn)
if ra not in valid_empty:
target = (
ra
if T.get_origin(ra) not in (T.Annotated, T.Union)
else _parse_annotation(ra)
)
if isinstance(target, str) or not isinstance(target, Sequence):
target = [target]
valid_targets = [
t for t in target if isinstance(t, (str, UnitBase, PhysicalType))
]
_validate_arg_value(
"return",
wrapped_function.__name__,
return_,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
if len(valid_targets) > 0:
return_ <<= valid_targets[0]
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
c6560816485385322463f5ec5e5589cb1644d6d97c10fa4ed829beb9f150633d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units used in the CDS format, both the units
defined in `Centre de Données astronomiques de Strasbourg
<https://cds.unistra.fr/>`_ `Standards for Astronomical Catalogues 2.0
<https://vizier.unistra.fr/vizier/doc/catstd-3.2.htx>`_ format and the `complete
set of supported units <https://vizier.unistra.fr/viz-bin/Unit>`_.
This format is used by VOTable up to version 1.2.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.cds`
module::
>>> from astropy.units import cds
>>> q = 10. * cds.lyr # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import cds
>>> cds.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
"""Initialize CDS units module."""
# Local imports to avoid polluting top-level namespace
import numpy as np
from astropy import units as u
from astropy.constants import si as _si
from . import core
# The CDS format also supports power-of-2 prefixes as defined here:
# http://physics.nist.gov/cuu/Units/binary.html
prefixes = core.si_prefixes + core.binary_prefixes
# CDS only uses the short prefixes
prefixes = [(short, short, factor) for (short, long, factor) in prefixes]
# The following units are defined in alphabetical order, directly from
# here: https://vizier.unistra.fr/viz-bin/Unit
mapping = [
(["A"], u.A, "Ampere"),
(["a"], u.a, "year", ["P"]),
(["a0"], _si.a0, "Bohr radius"),
(["al"], u.lyr, "Light year", ["c", "d"]),
(["lyr"], u.lyr, "Light year"),
(["alpha"], _si.alpha, "Fine structure constant"),
((["AA", "Å"], ["Angstrom", "Angstroem"]), u.AA, "Angstrom"),
(["arcmin", "arcm"], u.arcminute, "minute of arc"),
(["arcsec", "arcs"], u.arcsecond, "second of arc"),
(["atm"], _si.atm, "atmosphere"),
(["AU", "au"], u.au, "astronomical unit"),
(["bar"], u.bar, "bar"),
(["barn"], u.barn, "barn"),
(["bit"], u.bit, "bit"),
(["byte"], u.byte, "byte"),
(["C"], u.C, "Coulomb"),
(["c"], _si.c, "speed of light", ["p"]),
(["cal"], 4.1854 * u.J, "calorie"),
(["cd"], u.cd, "candela"),
(["ct"], u.ct, "count"),
(["D"], u.D, "Debye (dipole)"),
(["d"], u.d, "Julian day", ["c"]),
((["deg", "°"], ["degree"]), u.degree, "degree"),
(["dyn"], u.dyn, "dyne"),
(["e"], _si.e, "electron charge", ["m"]),
(["eps0"], _si.eps0, "electric constant"),
(["erg"], u.erg, "erg"),
(["eV"], u.eV, "electron volt"),
(["F"], u.F, "Farad"),
(["G"], _si.G, "Gravitation constant"),
(["g"], u.g, "gram"),
(["gauss"], u.G, "Gauss"),
(["geoMass", "Mgeo"], u.M_earth, "Earth mass"),
(["H"], u.H, "Henry"),
(["h"], u.h, "hour", ["p"]),
(["hr"], u.h, "hour"),
(["\\h"], _si.h, "Planck constant"),
(["Hz"], u.Hz, "Hertz"),
(["inch"], 0.0254 * u.m, "inch"),
(["J"], u.J, "Joule"),
(["JD"], u.d, "Julian day", ["M"]),
(["jovMass", "Mjup"], u.M_jup, "Jupiter mass"),
(["Jy"], u.Jy, "Jansky"),
(["K"], u.K, "Kelvin"),
(["k"], _si.k_B, "Boltzmann"),
(["l"], u.l, "litre", ["a"]),
(["lm"], u.lm, "lumen"),
(["Lsun", "solLum"], u.solLum, "solar luminosity"),
(["lx"], u.lx, "lux"),
(["m"], u.m, "meter"),
(["mag"], u.mag, "magnitude"),
(["me"], _si.m_e, "electron mass"),
(["min"], u.minute, "minute"),
(["MJD"], u.d, "Julian day"),
(["mmHg"], 133.322387415 * u.Pa, "millimeter of mercury"),
(["mol"], u.mol, "mole"),
(["mp"], _si.m_p, "proton mass"),
(["Msun", "solMass"], u.solMass, "solar mass"),
((["mu0", "µ0"], []), _si.mu0, "magnetic constant"),
(["muB"], _si.muB, "Bohr magneton"),
(["N"], u.N, "Newton"),
(["Ohm"], u.Ohm, "Ohm"),
(["Pa"], u.Pa, "Pascal"),
(["pc"], u.pc, "parsec"),
(["ph"], u.ph, "photon"),
(["pi"], u.Unit(np.pi), "π"),
(["pix"], u.pix, "pixel"),
(["ppm"], u.Unit(1e-6), "parts per million"),
(["R"], _si.R, "gas constant"),
(["rad"], u.radian, "radian"),
(["Rgeo"], _si.R_earth, "Earth equatorial radius"),
(["Rjup"], _si.R_jup, "Jupiter equatorial radius"),
(["Rsun", "solRad"], u.solRad, "solar radius"),
(["Ry"], u.Ry, "Rydberg"),
(["S"], u.S, "Siemens"),
(["s", "sec"], u.s, "second"),
(["sr"], u.sr, "steradian"),
(["Sun"], u.Sun, "solar unit"),
(["T"], u.T, "Tesla"),
(["t"], 1e3 * u.kg, "metric tonne", ["c"]),
(["u"], _si.u, "atomic mass", ["da", "a"]),
(["V"], u.V, "Volt"),
(["W"], u.W, "Watt"),
(["Wb"], u.Wb, "Weber"),
(["yr"], u.a, "year"),
]
for entry in mapping:
if len(entry) == 3:
names, unit, doc = entry
excludes = []
else:
names, unit, doc, excludes = entry
core.def_unit(
names,
unit,
prefixes=prefixes,
namespace=_ns,
doc=doc,
exclude_prefixes=excludes,
)
core.def_unit(["µas"], u.microarcsecond, doc="microsecond of arc", namespace=_ns)
core.def_unit(["mas"], u.milliarcsecond, doc="millisecond of arc", namespace=_ns)
core.def_unit(
["---", "-"],
u.dimensionless_unscaled,
doc="dimensionless and unscaled",
namespace=_ns,
)
core.def_unit(["%"], u.percent, doc="percent", namespace=_ns)
# The Vizier "standard" defines this in units of "kg s-3", but
# that may not make a whole lot of sense, so here we just define
# it as its own new disconnected unit.
core.def_unit(["Crab"], prefixes=prefixes, namespace=_ns, doc="Crab (X-ray) flux")
_initialize_module()
###########################################################################
# DOCSTRING
if __doc__ is not None:
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable CDS units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`. This will disable
all of the "default" `astropy.units` units, since there
are some namespace clashes between the two.
This may be used with the ``with`` statement to enable CDS
units only temporarily.
"""
# Local imports to avoid cyclical import and polluting namespace
import inspect
from .core import set_enabled_units
return set_enabled_units(inspect.getmodule(enable))
|
873fb5756220db420f3a7c16c7e740cdc8dc9b4d209d3823c622e1d297379310 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions.
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from . import format as unit_format
from .utils import (
is_effectively_unity,
resolve_fractions,
sanitize_scale,
validate_power,
)
__all__ = [
"UnitsError",
"UnitsWarning",
"UnitConversionError",
"UnitTypeError",
"UnitBase",
"NamedUnit",
"IrreducibleUnit",
"Unit",
"CompositeUnit",
"PrefixUnit",
"UnrecognizedUnit",
"def_unit",
"get_current_unit_registry",
"set_enabled_units",
"add_enabled_units",
"set_enabled_equivalencies",
"add_enabled_equivalencies",
"set_enabled_aliases",
"add_enabled_aliases",
"dimensionless_unscaled",
"one",
]
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
if not (
funit is Unit(funit)
and (tunit is None or tunit is Unit(tunit))
and callable(a)
and callable(b)
):
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[], aliases={}):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._aliases = init._aliases.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {
k: v.copy() for k, v in init._by_physical_type.items()
}
else:
self._reset_units()
self._reset_equivalencies()
self._reset_aliases()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
self.add_enabled_aliases(aliases)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
def _reset_aliases(self):
self._aliases = {}
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if st in self._registry and unit != self._registry[st]:
raise ValueError(
f"Object with name {st!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them."
)
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
@property
def aliases(self):
return self._aliases
def set_enabled_aliases(self, aliases):
"""
Set aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
self._reset_aliases()
self.add_enabled_aliases(aliases)
def add_enabled_aliases(self, aliases):
"""
Add aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
for alias, unit in aliases.items():
if alias in self._registry and unit != self._registry[alias]:
raise ValueError(
f"{alias} already means {self._registry[alias]}, so "
f"cannot be used as an alias for {unit}."
)
if alias in self._aliases and unit != self._aliases[alias]:
raise ValueError(
f"{alias} already is an alias for {self._aliases[alias]}, so "
f"cannot be used as an alias for {unit}."
)
for alias, unit in aliases.items():
if alias not in self._registry and alias not in self._aliases:
self._aliases[alias] = unit
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +FLOAT_CMP
<Quantity -1.+1.2246468e-16j>
"""
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
def set_enabled_aliases(aliases):
"""
Set aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().set_enabled_aliases(aliases)
return context
def add_enabled_aliases(aliases):
"""
Add aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Since no aliases are enabled by default, generally it is recommended
to use `set_enabled_aliases`.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_aliases(aliases)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase:
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
_hash = None
_type_id = None
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit."""
return unit_format.Generic.to_string(self).encode("unicode_escape")
def __str__(self):
"""Return string representation for unit."""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return f'Unit("{string}")'
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
if self._type_id is None:
unit = self.decompose()
self._type_id = tuple(zip((base.name for base in unit.bases), unit.powers))
return self._type_id
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. Perhaps you meant to_string()?"
)
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. Perhaps you meant to_string()?"
)
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. Perhaps you meant to_string()?"
)
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic, **kwargs):
r"""Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
**kwargs
Further options forwarded to the formatter. Currently
recognized is ``fraction``, which can take the following values:
- `False` : display unit bases with negative powers as they are;
- 'inline' or `True` : use a single-line fraction;
- 'multiline' : use a multiline fraction (available for the
'latex', 'console' and 'unicode' formats only).
Raises
------
TypeError
If ``format`` is of the wrong type.
ValueError
If ``format`` or ``fraction`` are not recognized.
Examples
--------
>>> import astropy.units as u
>>> kms = u.Unit('km / s')
>>> kms.to_string() # Generic uses fraction='inline' by default
'km / s'
>>> kms.to_string('latex') # Latex uses fraction='multiline' by default
'$\\mathrm{\\frac{km}{s}}$'
>>> print(kms.to_string('unicode', fraction=False))
km s⁻¹
>>> print(kms.to_string('unicode', fraction='inline'))
km / s
>>> print(kms.to_string('unicode', fraction='multiline'))
km
──
s
"""
f = unit_format.get_format(format)
return f.to_string(self, **kwargs)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies, ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or None
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __truediv__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rtruediv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self ** (-1))
except TypeError:
return NotImplemented
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, unit=self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, unit=self)
except TypeError:
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __rrshift__(self, m):
warnings.warn(
">> is not implemented. Did you mean to convert "
f"to a Quantity with unit {m} using '<<'?",
AstropyWarning,
)
return NotImplemented
def __hash__(self):
if self._hash is None:
parts = (
[str(self.scale)]
+ [x.name for x in self.bases]
+ [str(x) for x in self.powers]
)
self._hash = hash(tuple(parts))
return self._hash
def __getstate__(self):
# If we get pickled, we should *not* store the memoized members since
# hashes of strings vary between sessions.
state = self.__dict__.copy()
state.pop("_hash", None)
state.pop("_type_id", None)
return state
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1.0 or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1.0 or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.0
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, str, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other = Unit(other, parse_strict="silent")
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if self._get_physical_type_id() == other._get_physical_type_id():
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other / unit).decompose([a])
return True
except Exception:
pass
elif (a._is_equivalent(unit) and b._is_equivalent(other)) or (
b._is_equivalent(unit) and a._is_equivalent(other)
):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
ratio = other.decompose() / unit.decompose()
try:
ratio_in_funit = ratio.decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.0)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string("unscaled")
physical_type = unit.physical_type
if physical_type != "unknown":
unit_str = f"'{unit_str}' ({physical_type})"
else:
unit_str = f"'{unit_str}'"
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(f"{unit_str} and {other_str} are not convertible")
def _get_converter(self, other, equivalencies=[]):
"""Get a converter for values in ``self`` to ``other``.
If no conversion is necessary, returns ``unit_scale_converter``
(which is used as a check in quantity helpers).
"""
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
if scale == 1.0:
return unit_scale_converter
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies)
)
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, "equivalencies"):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
converter = self._get_converter(tunit, equivalencies)
except Exception:
pass
else:
return lambda v: b(converter(v))
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if self_decomposed.powers == other_decomposed.powers and all(
self_base is other_base
for (self_base, other_base) in zip(
self_decomposed.bases, other_decomposed.bases
)
):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(f"'{self!r}' is not a scaled version of '{other!r}'")
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit-like
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(Unit(other), equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : `~astropy.units.CompositeUnit`
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(
self, equivalencies=[], namespace=[], max_depth=2, depth=0, cached_results=None
):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
return all(base in namespace for base in unit.bases)
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [{unit}, set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit**power
tunit_decomposed = tunit_decomposed**power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append((len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth,
depth=depth + 1,
cached_results=cached_results,
)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append((len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
f"Cannot represent unit {self} in terms of the given units"
)
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(
self, equivalencies=[], units=None, max_depth=2, include_prefix_units=None
):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of `~astropy.units.Unit`, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (
isinstance(tunit, UnitBase)
and (include_prefix_units or not isinstance(tunit, PrefixUnit))
and has_bases_in_common_with_equiv(decomposed, tunit.decompose())
):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(
self._compose(
equivalencies=equivalencies,
namespace=units,
max_depth=max_depth,
depth=0,
cached_results={},
)
)
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Physical type(s) dimensionally compatible with the unit.
Returns
-------
`~astropy.units.physical.PhysicalType`
A representation of the physical type(s) of a unit.
Examples
--------
>>> from astropy import units as u
>>> u.m.physical_type
PhysicalType('length')
>>> (u.m ** 2 / u.s).physical_type
PhysicalType({'diffusivity', 'kinematic viscosity'})
Physical types can be compared to other physical types
(recommended in packages) or to strings.
>>> area = (u.m ** 2).physical_type
>>> area == u.m.physical_type ** 2
True
>>> area == "area"
True
`~astropy.units.physical.PhysicalType` objects can be used for
dimensional analysis.
>>> number_density = u.m.physical_type ** -3
>>> velocity = (u.m / u.s).physical_type
>>> number_density * velocity
PhysicalType('particle flux')
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also pull options from.
See :ref:`astropy:unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
HEADING_NAMES = ("Primary name", "Unit definition", "Aliases")
ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant
NO_EQUIV_UNITS_MSG = "There are no equivalent units"
def __repr__(self):
if len(self) == 0:
return self.NO_EQUIV_UNITS_MSG
else:
lines = self._process_equivalent_units(self)
lines.insert(0, self.HEADING_NAMES)
widths = [0] * self.ROW_LEN
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{}s}} | {{1:<{}s}} | {{2:<{}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = lines[0:1] + ["["] + [f"{x} ," for x in lines[1:]] + ["]"]
return "\n".join(lines)
def _repr_html_(self):
"""
Outputs a HTML table representation within Jupyter notebooks.
"""
if len(self) == 0:
return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>"
else:
# HTML tags to use to compose the table in HTML
blank_table = '<table style="width:50%">{}</table>'
blank_row_container = "<tr>{}</tr>"
heading_row_content = "<th>{}</th>" * self.ROW_LEN
data_row_content = "<td>{}</td>" * self.ROW_LEN
# The HTML will be rendered & the table is simple, so don't
# bother to include newlines & indentation for the HTML code.
heading_row = blank_row_container.format(
heading_row_content.format(*self.HEADING_NAMES)
)
data_rows = self._process_equivalent_units(self)
all_rows = heading_row
for row in data_rows:
html_row = blank_row_container.format(data_row_content.format(*row))
all_rows += html_row
return blank_table.format(all_rows)
@staticmethod
def _process_equivalent_units(equiv_units_data):
"""
Extract attributes, and sort, the equivalent units pre-formatting.
"""
processed_equiv_units = []
for u in equiv_units_data:
irred = u.decompose().to_string()
if irred == u.name:
irred = "irreducible"
processed_equiv_units.append((u.name, irred, ", ".join(u.aliases)))
processed_equiv_units.sort()
return processed_equiv_units
def find_equivalent_units(
self, equivalencies=[], units=None, include_prefix_units=False
):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of `~astropy.units.Unit`, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies,
units=units,
max_depth=1,
include_prefix_units=include_prefix_units,
)
results = {x.bases[0] for x in results if len(x.bases) == 1}
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError("st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return f"{names[1]} ({names[0]})"
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
f"Object with name {name!r} already exists in "
f"given namespace ({namespace[name]!r})."
)
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (
_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__getstate__(),
)
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1], _error_check=False)
raise UnitConversionError(
f"Unit {self} can not be decomposed into the requested bases"
)
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return f"UnrecognizedUnit({self})"
def __bytes__(self):
return self.name.encode("ascii", "replace")
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
f"The unit {self.name!r} is unrecognized, so all arithmetic operations "
"with it are invalid."
)
__pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = _unrecognized_operator
__lt__ = __gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator
def __eq__(self, other):
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
f"The unit {self.name!r} is unrecognized. It can not be converted "
"to other units."
)
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(type):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(
self,
s="",
represents=None,
format=None,
namespace=None,
doc=None,
parse_strict="raise",
):
# Short-circuit if we're already a unit
if hasattr(s, "_get_physical_type_id"):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
represents = CompositeUnit(
represents.value * represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers,
_error_check=False,
)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(
s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers,
_error_check=False,
)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc
)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode("ascii")
try:
return f.parse(s)
except NotImplementedError:
raise
except Exception as e:
if parse_strict == "silent":
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + " "
else:
format_clause = ""
msg = (
f"'{s}' did not parse as {format_clause}unit: {str(e)} "
"If this is meant to be a custom unit, "
"define it with 'u.def_unit'. To have it "
"recognized inside a file reader or other code, "
"enable it with 'u.add_enabled_units'. "
"For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html"
)
if parse_strict == "raise":
raise ValueError(msg)
elif parse_strict == "warn":
warnings.warn(msg, UnitsWarning)
else:
raise ValueError(
"'parse_strict' must be 'warn', 'raise' or 'silent'"
)
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [], _error_check=False)
elif isinstance(s, tuple):
from .structured import StructuredUnit
return StructuredUnit(s)
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError(f"{s} can not be converted to a Unit")
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From no arguments::
Unit()
Returns the dimensionless unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: https://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None, format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc, format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self._represents))
return self._hash
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers, _error_check=False)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
_decomposed_cache = None
def __init__(
self,
scale,
bases,
powers,
decompose=False,
decompose_bases=set(),
_error_check=True,
):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError("bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
if not decompose and len(bases) == 1 and powers[0] >= 0:
# Short-cut; with one unit there's nothing to expand and gather,
# as that has happened already when creating the unit. But do only
# positive powers, since for negative powers we need to re-sort.
unit = bases[0]
power = powers[0]
if power == 1:
scale *= unit.scale
self._bases = unit.bases
self._powers = unit.powers
elif power == 0:
self._bases = []
self._powers = []
else:
scale *= unit.scale**power
self._bases = unit.bases
self._powers = [
operator.mul(*resolve_fractions(p, power)) for p in unit.powers
]
self._scale = sanitize_scale(scale)
else:
# Regular case: use inputs as preliminary scale, bases, and powers,
# then "expand and gather" identical bases, sanitize the scale, &c.
self._scale = scale
self._bases = bases
self._powers = powers
self._expand_and_gather(decompose=decompose, bases=decompose_bases)
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return f"Unit(dimensionless with a scale of {self._scale})"
else:
return "Unit(dimensionless)"
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale**p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], "name", "")))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if not isinstance(base, IrreducibleUnit) or (
len(bases) and base not in bases
):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(
self.scale, self.bases, self.powers, decompose=True, decompose_bases=bases
)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(["Q"], ["quetta"], 1e30),
(["R"], ["ronna"], 1e27),
(["Y"], ["yotta"], 1e24),
(["Z"], ["zetta"], 1e21),
(["E"], ["exa"], 1e18),
(["P"], ["peta"], 1e15),
(["T"], ["tera"], 1e12),
(["G"], ["giga"], 1e9),
(["M"], ["mega"], 1e6),
(["k"], ["kilo"], 1e3),
(["h"], ["hecto"], 1e2),
(["da"], ["deka", "deca"], 1e1),
(["d"], ["deci"], 1e-1),
(["c"], ["centi"], 1e-2),
(["m"], ["milli"], 1e-3),
(["u"], ["micro"], 1e-6),
(["n"], ["nano"], 1e-9),
(["p"], ["pico"], 1e-12),
(["f"], ["femto"], 1e-15),
(["a"], ["atto"], 1e-18),
(["z"], ["zepto"], 1e-21),
(["y"], ["yocto"], 1e-24),
(["r"], ["ronto"], 1e-27),
(["q"], ["quecto"], 1e-30),
]
binary_prefixes = [
(["Ki"], ["kibi"], 2**10),
(["Mi"], ["mebi"], 2**20),
(["Gi"], ["gibi"], 2**30),
(["Ti"], ["tebi"], 2**40),
(["Pi"], ["pebi"], 2**50),
(["Ei"], ["exbi"], 2**60),
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == "u":
format["latex"] = r"\mu " + u.get_format_name("latex")
format["unicode"] = "\N{MICRO SIGN}" + u.get_format_name("unicode")
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(
names,
CompositeUnit(factor, [u], [1], _error_check=False),
namespace=namespace,
format=format,
)
def def_unit(
s,
represents=None,
doc=None,
format=None,
prefixes=False,
exclude_prefixes=[],
namespace=None,
):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `~astropy.units.UnitBase`
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc, format=format)
else:
result = IrreducibleUnit(s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(
result, excludes=exclude_prefixes, namespace=namespace, prefixes=prefixes
)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (np.ndarray, float, int, complex, np.void)):
return value
avalue = np.array(value)
if avalue.dtype.kind not in ["i", "f", "c"]:
raise ValueError(
"Value not scalar compatible or convertible to "
"an int, float, or complex array"
)
return avalue
def unit_scale_converter(val):
"""Function that just multiplies the value by unity.
This is a separate function so it can be recognized and
discarded in unit conversion.
"""
return 1.0 * _condition_arg(val)
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
449feb1859d7760eb7800946ae9dbb91d658d735d50cb60a531851d775e0a28b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Miscellaneous utilities for `astropy.units`.
None of the functions in the module are meant for use outside of the
package.
"""
import io
import re
from fractions import Fraction
import numpy as np
from numpy import finfo
_float_finfo = finfo(float)
# take float here to ensure comparison with another float is fast
# give a little margin since often multiple calculations happened
_JUST_BELOW_UNITY = float(1.0 - 4.0 * _float_finfo.epsneg)
_JUST_ABOVE_UNITY = float(1.0 + 4.0 * _float_finfo.eps)
def _get_first_sentence(s):
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(r".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace("\n", " ")
def _iter_unit_summary(namespace):
"""
Generates the ``(unit, doc, represents, aliases, prefixes)``
tuple used to format the unit summary docs in `generate_unit_summary`.
"""
from . import core
# Get all of the units, and keep track of which ones have SI
# prefixes
units = []
has_prefixes = set()
for key, val in namespace.items():
# Skip non-unit items
if not isinstance(val, core.UnitBase):
continue
# Skip aliases
if key != val.name:
continue
if isinstance(val, core.PrefixUnit):
# This will return the root unit that is scaled by the prefix
# attached to it
has_prefixes.add(val._represents.bases[0].name)
else:
units.append(val)
# Sort alphabetically, case insensitive
units.sort(key=lambda x: x.name.lower())
for unit in units:
doc = _get_first_sentence(unit.__doc__).strip()
represents = ""
if isinstance(unit, core.Unit):
represents = f":math:`{unit._represents.to_string('latex')[1:-1]}`"
aliases = ", ".join(f"``{x}``" for x in unit.aliases)
yield (
unit,
doc,
represents,
aliases,
"Yes" if unit.name in has_prefixes else "No",
)
def generate_unit_summary(namespace):
"""
Generates a summary of units from a given namespace. This is used
to generate the docstring for the modules that define the actual
units.
Parameters
----------
namespace : dict
A namespace containing units.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
docstring = io.StringIO()
docstring.write(
"""
.. list-table:: Available Units
:header-rows: 1
:widths: 10 20 20 20 1
* - Unit
- Description
- Represents
- Aliases
- SI Prefixes
"""
)
template = """
* - ``{}``
- {}
- {}
- {}
- {}
"""
for unit_summary in _iter_unit_summary(namespace):
docstring.write(template.format(*unit_summary))
return docstring.getvalue()
def generate_prefixonly_unit_summary(namespace):
"""
Generates table entries for units in a namespace that are just prefixes
without the base unit. Note that this is intended to be used *after*
`generate_unit_summary` and therefore does not include the table header.
Parameters
----------
namespace : dict
A namespace containing units that are prefixes but do *not* have the
base unit in their namespace.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
from . import PrefixUnit
faux_namespace = {}
for nm, unit in namespace.items():
if isinstance(unit, PrefixUnit):
base_unit = unit.represents.bases[0]
faux_namespace[base_unit.name] = base_unit
docstring = io.StringIO()
template = """
* - Prefixes for ``{}``
- {} prefixes
- {}
- {}
- Only
"""
for unit_summary in _iter_unit_summary(faux_namespace):
docstring.write(template.format(*unit_summary))
return docstring.getvalue()
def is_effectively_unity(value):
# value is *almost* always real, except, e.g., for u.mag**0.5, when
# it will be complex. Use try/except to ensure normal case is fast
try:
return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY
except TypeError: # value is complex
return (
_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY
and _JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY
)
def sanitize_scale(scale):
if is_effectively_unity(scale):
return 1.0
# Maximum speed for regular case where scale is a float.
if scale.__class__ is float:
return scale
# We cannot have numpy scalars, since they don't autoconvert to
# complex if necessary. They are also slower.
if hasattr(scale, "dtype"):
scale = scale.item()
# All classes that scale can be (int, float, complex, Fraction)
# have an "imag" attribute.
if scale.imag:
if abs(scale.real) > abs(scale.imag):
if is_effectively_unity(scale.imag / scale.real + 1):
return scale.real
elif is_effectively_unity(scale.real / scale.imag + 1):
return complex(0.0, scale.imag)
return scale
else:
return scale.real
def maybe_simple_fraction(p, max_denominator=100):
"""Fraction very close to x with denominator at most max_denominator.
The fraction has to be such that fraction/x is unity to within 4 ulp.
If such a fraction does not exist, returns the float number.
The algorithm is that of `fractions.Fraction.limit_denominator`, but
sped up by not creating a fraction to start with.
"""
if p == 0 or p.__class__ is int:
return p
n, d = p.as_integer_ratio()
a = n // d
# Normally, start with 0,1 and 1,0; here we have applied first iteration.
n0, d0 = 1, 0
n1, d1 = a, 1
while d1 <= max_denominator:
if _JUST_BELOW_UNITY <= n1 / (d1 * p) <= _JUST_ABOVE_UNITY:
return Fraction(n1, d1)
n, d = d, n - a * d
a = n // d
n0, n1 = n1, n0 + a * n1
d0, d1 = d1, d0 + a * d1
return p
def validate_power(p):
"""Convert a power to a floating point value, an integer, or a Fraction.
If a fractional power can be represented exactly as a floating point
number, convert it to a float, to make the math much faster; otherwise,
retain it as a `fractions.Fraction` object to avoid losing precision.
Conversely, if the value is indistinguishable from a rational number with a
low-numbered denominator, convert to a Fraction object.
Parameters
----------
p : float, int, Rational, Fraction
Power to be converted
"""
denom = getattr(p, "denominator", None)
if denom is None:
try:
p = float(p)
except Exception:
if not np.isscalar(p):
raise ValueError(
"Quantities and Units may only be raised to a scalar power"
)
else:
raise
# This returns either a (simple) Fraction or the same float.
p = maybe_simple_fraction(p)
# If still a float, nothing more to be done.
if isinstance(p, float):
return p
# Otherwise, check for simplifications.
denom = p.denominator
if denom == 1:
p = p.numerator
elif (denom & (denom - 1)) == 0:
# Above is a bit-twiddling hack to see if denom is a power of two.
# If so, float does not lose precision and will speed things up.
p = float(p)
return p
def resolve_fractions(a, b):
"""
If either input is a Fraction, convert the other to a Fraction
(at least if it does not have a ridiculous denominator).
This ensures that any operation involving a Fraction will use
rational arithmetic and preserve precision.
"""
# We short-circuit on the most common cases of int and float, since
# isinstance(a, Fraction) is very slow for any non-Fraction instances.
a_is_fraction = (
a.__class__ is not int and a.__class__ is not float and isinstance(a, Fraction)
)
b_is_fraction = (
b.__class__ is not int and b.__class__ is not float and isinstance(b, Fraction)
)
if a_is_fraction and not b_is_fraction:
b = maybe_simple_fraction(b)
elif not a_is_fraction and b_is_fraction:
a = maybe_simple_fraction(a)
return a, b
def quantity_asanyarray(a, dtype=None):
from .quantity import Quantity
if (
not isinstance(a, np.ndarray)
and not np.isscalar(a)
and any(isinstance(x, Quantity) for x in a)
):
return Quantity(a, dtype=dtype)
else:
# skip over some dtype deprecation.
dtype = np.float64 if dtype is np.inexact else dtype
return np.asanyarray(a, dtype=dtype)
|
c13be6a6cb160765dc808dc7f0359ba7aa62cb2120c96527c1bcc12d90fdd581 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines structured units and quantities.
"""
from __future__ import annotations # For python < 3.10
# Standard library
import operator
import numpy as np
from .core import UNITY, Unit, UnitBase
__all__ = ["StructuredUnit"]
DTYPE_OBJECT = np.dtype("O")
def _names_from_dtype(dtype):
"""Recursively extract field names from a dtype."""
names = []
for name in dtype.names:
subdtype = dtype.fields[name][0].base
if subdtype.names:
names.append([name, _names_from_dtype(subdtype)])
else:
names.append(name)
return tuple(names)
def _normalize_names(names):
"""Recursively normalize, inferring upper level names for unadorned tuples.
Generally, we want the field names to be organized like dtypes, as in
``(['pv', ('p', 'v')], 't')``. But we automatically infer upper
field names if the list is absent from items like ``(('p', 'v'), 't')``,
by concatenating the names inside the tuple.
"""
result = []
for name in names:
if isinstance(name, str) and len(name) > 0:
result.append(name)
elif (
isinstance(name, list)
and len(name) == 2
and isinstance(name[0], str)
and len(name[0]) > 0
and isinstance(name[1], tuple)
and len(name[1]) > 0
):
result.append([name[0], _normalize_names(name[1])])
elif isinstance(name, tuple) and len(name) > 0:
new_tuple = _normalize_names(name)
name = "".join([(i[0] if isinstance(i, list) else i) for i in new_tuple])
result.append([name, new_tuple])
else:
raise ValueError(
f"invalid entry {name!r}. Should be a name, "
"tuple of names, or 2-element list of the "
"form [name, tuple of names]."
)
return tuple(result)
class StructuredUnit:
"""Container for units for a structured Quantity.
Parameters
----------
units : unit-like, tuple of unit-like, or `~astropy.units.StructuredUnit`
Tuples can be nested. If a `~astropy.units.StructuredUnit` is passed
in, it will be returned unchanged unless different names are requested.
names : tuple of str, tuple or list; `~numpy.dtype`; or `~astropy.units.StructuredUnit`, optional
Field names for the units, possibly nested. Can be inferred from a
structured `~numpy.dtype` or another `~astropy.units.StructuredUnit`.
For nested tuples, by default the name of the upper entry will be the
concatenation of the names of the lower levels. One can pass in a
list with the upper-level name and a tuple of lower-level names to
avoid this. For tuples, not all levels have to be given; for any level
not passed in, default field names of 'f0', 'f1', etc., will be used.
Notes
-----
It is recommended to initialize the class indirectly, using
`~astropy.units.Unit`. E.g., ``u.Unit('AU,AU/day')``.
When combined with a structured array to produce a structured
`~astropy.units.Quantity`, array field names will take precedence.
Generally, passing in ``names`` is needed only if the unit is used
unattached to a `~astropy.units.Quantity` and one needs to access its
fields.
Examples
--------
Various ways to initialize a `~astropy.units.StructuredUnit`::
>>> import astropy.units as u
>>> su = u.Unit('(AU,AU/day),yr')
>>> su
Unit("((AU, AU / d), yr)")
>>> su.field_names
(['f0', ('f0', 'f1')], 'f1')
>>> su['f1']
Unit("yr")
>>> su2 = u.StructuredUnit(((u.AU, u.AU/u.day), u.yr), names=(('p', 'v'), 't'))
>>> su2 == su
True
>>> su2.field_names
(['pv', ('p', 'v')], 't')
>>> su3 = u.StructuredUnit((su2['pv'], u.day), names=(['p_v', ('p', 'v')], 't'))
>>> su3.field_names
(['p_v', ('p', 'v')], 't')
>>> su3.keys()
('p_v', 't')
>>> su3.values()
(Unit("(AU, AU / d)"), Unit("d"))
Structured units share most methods with regular units::
>>> su.physical_type
((PhysicalType('length'), PhysicalType({'speed', 'velocity'})), PhysicalType('time'))
>>> su.si
Unit("((1.49598e+11 m, 1.73146e+06 m / s), 3.15576e+07 s)")
"""
def __new__(cls, units, names=None):
dtype = None
if names is not None:
if isinstance(names, StructuredUnit):
dtype = names._units.dtype
names = names.field_names
elif isinstance(names, np.dtype):
if not names.fields:
raise ValueError("dtype should be structured, with fields.")
dtype = np.dtype([(name, DTYPE_OBJECT) for name in names.names])
names = _names_from_dtype(names)
else:
if not isinstance(names, tuple):
names = (names,)
names = _normalize_names(names)
if not isinstance(units, tuple):
units = Unit(units)
if isinstance(units, StructuredUnit):
# Avoid constructing a new StructuredUnit if no field names
# are given, or if all field names are the same already anyway.
if names is None or units.field_names == names:
return units
# Otherwise, turn (the upper level) into a tuple, for renaming.
units = units.values()
else:
# Single regular unit: make a tuple for iteration below.
units = (units,)
if names is None:
names = tuple(f"f{i}" for i in range(len(units)))
elif len(units) != len(names):
raise ValueError("lengths of units and field names must match.")
converted = []
for unit, name in zip(units, names):
if isinstance(name, list):
# For list, the first item is the name of our level,
# and the second another tuple of names, i.e., we recurse.
unit = cls(unit, name[1])
name = name[0]
else:
# We are at the lowest level. Check unit.
unit = Unit(unit)
if dtype is not None and isinstance(unit, StructuredUnit):
raise ValueError(
"units do not match in depth with field "
"names from dtype or structured unit."
)
converted.append(unit)
self = super().__new__(cls)
if dtype is None:
dtype = np.dtype(
[
((name[0] if isinstance(name, list) else name), DTYPE_OBJECT)
for name in names
]
)
# Decay array to void so we can access by field name and number.
self._units = np.array(tuple(converted), dtype)[()]
return self
def __getnewargs__(self):
"""When de-serializing, e.g. pickle, start with a blank structure."""
return (), None
@property
def field_names(self):
"""Possibly nested tuple of the field names of the parts."""
return tuple(
([name, unit.field_names] if isinstance(unit, StructuredUnit) else name)
for name, unit in self.items()
)
# Allow StructuredUnit to be treated as an (ordered) mapping.
def __len__(self):
return len(self._units.dtype.names)
def __getitem__(self, item):
# Since we are based on np.void, indexing by field number works too.
return self._units[item]
def values(self):
return self._units.item()
def keys(self):
return self._units.dtype.names
def items(self):
return tuple(zip(self._units.dtype.names, self._units.item()))
def __iter__(self):
yield from self._units.dtype.names
# Helpers for methods below.
def _recursively_apply(self, func, cls=None):
"""Apply func recursively.
Parameters
----------
func : callable
Function to apply to all parts of the structured unit,
recursing as needed.
cls : type, optional
If given, should be a subclass of `~numpy.void`. By default,
will return a new `~astropy.units.StructuredUnit` instance.
"""
applied = tuple(func(part) for part in self.values())
# Once not NUMPY_LT_1_23: results = np.void(applied, self._units.dtype).
results = np.array(applied, self._units.dtype)[()]
if cls is not None:
return results.view((cls, results.dtype))
# Short-cut; no need to interpret field names, etc.
result = super().__new__(self.__class__)
result._units = results
return result
def _recursively_get_dtype(self, value, enter_lists=True):
"""Get structured dtype according to value, using our field names.
This is useful since ``np.array(value)`` would treat tuples as lower
levels of the array, rather than as elements of a structured array.
The routine does presume that the type of the first tuple is
representative of the rest. Used in ``_get_converter``.
For the special value of ``UNITY``, all fields are assumed to be 1.0,
and hence this will return an all-float dtype.
"""
if enter_lists:
while isinstance(value, list):
value = value[0]
if value is UNITY:
value = (UNITY,) * len(self)
elif not isinstance(value, tuple) or len(self) != len(value):
raise ValueError(f"cannot interpret value {value} for unit {self}.")
descr = []
for (name, unit), part in zip(self.items(), value):
if isinstance(unit, StructuredUnit):
descr.append(
(name, unit._recursively_get_dtype(part, enter_lists=False))
)
else:
# Got a part associated with a regular unit. Gets its dtype.
# Like for Quantity, we cast integers to float.
part = np.array(part)
part_dtype = part.dtype
if part_dtype.kind in "iu":
part_dtype = np.dtype(float)
descr.append((name, part_dtype, part.shape))
return np.dtype(descr)
@property
def si(self):
"""The `StructuredUnit` instance in SI units."""
return self._recursively_apply(operator.attrgetter("si"))
@property
def cgs(self):
"""The `StructuredUnit` instance in cgs units."""
return self._recursively_apply(operator.attrgetter("cgs"))
# Needed to pass through Unit initializer, so might as well use it.
def _get_physical_type_id(self):
return self._recursively_apply(
operator.methodcaller("_get_physical_type_id"), cls=Structure
)
@property
def physical_type(self):
"""Physical types of all the fields."""
return self._recursively_apply(
operator.attrgetter("physical_type"), cls=Structure
)
def decompose(self, bases=set()):
"""The `StructuredUnit` composed of only irreducible units.
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
`~astropy.units.StructuredUnit`
With the unit for each field containing only irreducible units.
"""
return self._recursively_apply(operator.methodcaller("decompose", bases=bases))
def is_equivalent(self, other, equivalencies=[]):
"""`True` if all fields are equivalent to the other's fields.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The structured unit to compare with, or what can initialize one.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
The list will be applied to all fields.
Returns
-------
bool
"""
try:
other = StructuredUnit(other)
except Exception:
return False
if len(self) != len(other):
return False
for self_part, other_part in zip(self.values(), other.values()):
if not self_part.is_equivalent(other_part, equivalencies=equivalencies):
return False
return True
def _get_converter(self, other, equivalencies=[]):
if not isinstance(other, type(self)):
other = self.__class__(other, names=self)
converters = [
self_part._get_converter(other_part, equivalencies=equivalencies)
for (self_part, other_part) in zip(self.values(), other.values())
]
def converter(value):
if not hasattr(value, "dtype"):
value = np.array(value, self._recursively_get_dtype(value))
result = np.empty_like(value)
for name, converter_ in zip(result.dtype.names, converters):
result[name] = converter_(value[name])
# Index with empty tuple to decay array scalars to numpy void.
return result if result.shape else result[()]
return converter
def to(self, other, value=np._NoValue, equivalencies=[]):
"""Return values converted to the specified unit.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The unit to convert to. If necessary, will be converted to
a `~astropy.units.StructuredUnit` using the dtype of ``value``.
value : array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If a sequence, the first element must have
entries of the correct type to represent all elements (i.e.,
not have, e.g., a ``float`` where other elements have ``complex``).
If not given, assumed to have 1. in all fields.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s).
Raises
------
UnitsError
If units are inconsistent
"""
if value is np._NoValue:
# We do not have UNITY as a default, since then the docstring
# would list 1.0 as default, yet one could not pass that in.
value = UNITY
return self._get_converter(other, equivalencies=equivalencies)(value)
def to_string(self, format="generic"):
"""Output the unit in the given format as a string.
Units are separated by commas.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
Notes
-----
Structured units can be written to all formats, but can be
re-read only with 'generic'.
"""
parts = [part.to_string(format) for part in self.values()]
out_fmt = "({})" if len(self) > 1 else "({},)"
if format.startswith("latex"):
# Strip $ from parts and add them on the outside.
parts = [part[1:-1] for part in parts]
out_fmt = "$" + out_fmt + "$"
return out_fmt.format(", ".join(parts))
def _repr_latex_(self):
return self.to_string("latex")
__array_ufunc__ = None
def __mul__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict="silent")
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part * other for part in self.values())
return self.__class__(new_units, names=self)
if isinstance(other, StructuredUnit):
return NotImplemented
# Anything not like a unit, try initialising as a structured quantity.
try:
from .quantity import Quantity
return Quantity(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict="silent")
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part / other for part in self.values())
return self.__class__(new_units, names=self)
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __str__(self):
return self.to_string()
def __repr__(self):
return f'Unit("{self.to_string()}")'
def __eq__(self, other):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() == other.values()
def __ne__(self, other):
if not isinstance(other, type(self)):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() != other.values()
class Structure(np.void):
"""Single element structure for physical type IDs, etc.
Behaves like a `~numpy.void` and thus mostly like a tuple which can also
be indexed with field names, but overrides ``__eq__`` and ``__ne__`` to
compare only the contents, not the field names. Furthermore, this way no
`FutureWarning` about comparisons is given.
"""
# Note that it is important for physical type IDs to not be stored in a
# tuple, since then the physical types would be treated as alternatives in
# :meth:`~astropy.units.UnitBase.is_equivalent`. (Of course, in that
# case, they could also not be indexed by name.)
def __eq__(self, other):
if isinstance(other, np.void):
other = other.item()
return self.item() == other
def __ne__(self, other):
if isinstance(other, np.void):
other = other.item()
return self.item() != other
def _structured_unit_like_dtype(
unit: UnitBase | StructuredUnit, dtype: np.dtype
) -> StructuredUnit:
"""Make a `StructuredUnit` of one unit, with the structure of a `numpy.dtype`.
Parameters
----------
unit : UnitBase
The unit that will be filled into the structure.
dtype : `numpy.dtype`
The structure for the StructuredUnit.
Returns
-------
StructuredUnit
"""
if isinstance(unit, StructuredUnit):
# If unit is structured, it should match the dtype. This function is
# only used in Quantity, which performs this check, so it's fine to
# return as is.
return unit
# Make a structured unit
units = []
for name in dtype.names:
subdtype = dtype.fields[name][0]
if subdtype.names is not None:
units.append(_structured_unit_like_dtype(unit, subdtype))
else:
units.append(unit)
return StructuredUnit(tuple(units), names=dtype.names)
|
cb75e8c9e1bac2c809f18a25aad2610f528708c4667da2503a8fa274d73910ce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# STDLIB
import numbers
import operator
import re
import warnings
from fractions import Fraction
# THIRD PARTY
import numpy as np
# LOCAL
from astropy import config as _config
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from .core import (
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
get_current_unit_registry,
)
from .format import Base, Latex
from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit
from .quantity_helper.function_helpers import (
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .structured import StructuredUnit, _structured_unit_like_dtype
from .utils import is_effectively_unity
__all__ = [
"Quantity",
"SpecificTypeQuantity",
"QuantityInfoBase",
"QuantityInfo",
"allclose",
"isclose",
]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ["Quantity.*"]
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity.
"""
latex_array_threshold = _config.ConfigItem(
100,
"The maximum size an array Quantity can be before its LaTeX "
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
"negative number means that the value will instead be whatever numpy "
"gets from get_printoptions.",
)
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities.
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {"dtype", "unit"} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f"{val.value}"
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("value", "unit")
_construct_from_dict_args = ["value"]
_represent_as_dict_primary_data = "value"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop("shape")
dtype = attrs.pop("dtype")
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {
key: (data if key == "value" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
map["copy"] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
If `None`, the normal `numpy.dtype` introspection is used, e.g.
preventing upcasting of integers.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* `typing.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
from typing import Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError(
"unit annotation is not a Unit or PhysicalType"
) from None
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated[cls, unit]
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# inexact -> upcast to float dtype
float_default = dtype is np.inexact
if float_default:
dtype = None
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and isinstance(value, cls)):
value = value.view(cls)
if float_default and value.dtype.kind in "iu":
dtype = float
return np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (
r"\s*[+-]?"
r"((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|"
r"([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))"
r"([eE][+-]?\d+)?"
r"[.+-]?"
)
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError(
f'Cannot parse "{value}" as a {cls.__name__}. It does not '
"start with a number."
)
unit_string = v.string[v.end() :].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (
dtype is None
and not hasattr(value, "dtype")
and isinstance(unit, StructuredUnit)
):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
using_default_unit = False
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
using_default_unit = True
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError(
f"The unit attribute {value.unit!r} of the input could "
"not be parsed as an astropy Unit."
) from exc
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# For no-user-input unit, make sure the constructed unit matches the
# structure of the data.
if using_default_unit and value.dtype.names is not None:
unit = value_unit = _structured_unit_like_dtype(value_unit, value.dtype)
# check that array contains numbers or long int objects
if value.dtype.kind in "OSU" and not (
value.dtype.kind == "O" and isinstance(value.item(0), numbers.Number)
):
raise TypeError("The value must be a valid Python or Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if float_default and value.dtype.kind in "iuO":
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, "_quantity_class", cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Copy over the unit and possibly info. Note that the only way the
# unit can already be set is if one enters via _new_view(), where the
# unit is often different from that of self, and where propagation of
# info is not always desirable.
if self._unit is None:
unit = getattr(obj, "_unit", None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if "info" in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError(
"__array_wrap__ should not be used with a context any more since all "
"use should go through array_function. Please raise an issue on "
"https://github.com/astropy/astropy"
)
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity` or `NotImplemented`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
try:
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get("out", None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs["out"] = (out_array,) if function.nout == 1 else out_array
if method == "reduce" and "initial" in kwargs and unit is not None:
# Special-case for initial argument for reductions like
# np.add.reduce. This should be converted to the output unit as
# well, which is typically the same as the input unit (but can
# in principle be different: unitless for np.equal, radian
# for np.arctan2, though those are not necessarily useful!)
kwargs["initial"] = self._to_own_unit(
kwargs["initial"], check_precision=False, unit=unit
)
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, "value", input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
except (TypeError, ValueError, AttributeError) as e:
out_normalized = kwargs.get("out", tuple())
inputs_and_outputs = inputs + out_normalized
ignored_ufunc = (
None,
np.ndarray.__array_ufunc__,
type(self).__array_ufunc__,
)
if not all(
getattr(type(io), "__array_ufunc__", None) in ignored_ufunc
for io in inputs_and_outputs
):
return NotImplemented
else:
raise e
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
# Some np.linalg functions return namedtuple, which is handy to access
# elements by name, but cannot be directly initialized with an iterator.
result_cls = getattr(result, "_make", result.__class__)
return result_cls(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in zip(result, unit, out)
)
if out is None:
# View the result array as a Quantity with the proper unit.
return (
result
if unit is None
else self._new_view(result, unit, propagate_info=False)
)
elif isinstance(out, Quantity):
# For given Quantity output, just set the unit. We know the unit
# is not None and the output is of the correct Quantity subclass,
# as it was passed through check_output.
# (We cannot do this unconditionally, though, since it is possible
# for out to be ndarray and the unit to be dimensionless.)
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None, propagate_info=True):
"""Create a Quantity view of some array-like input, and set the unit.
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
propagate_info : bool, optional
Whether to transfer ``info`` if present. Default: `True`, as
appropriate for, e.g., unit conversions or slicing, where the
nature of the object does not change.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, "_quantity_class", Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
if propagate_info and "info" in self.__dict__:
view.info = self.info
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initializer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if isinstance(self._unit, StructuredUnit) or isinstance(
unit, StructuredUnit
):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict="silent")
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
f"{self.__class__.__name__} instances require normal units, "
f"not {unit.__class__} instances."
)
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(
unit, self.view(np.ndarray), equivalencies=equivalencies
)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See Also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See Also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(
to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""",
)
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("si"))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("cgs"))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return super().__dir__()
dir_values = set(super().__dir__())
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(equivalencies):
dir_values.update(equivalent.names)
return sorted(dir_values)
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member"
)
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies
)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'"
)
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented # try other.__rlshift__(self)
try:
factor = self.unit._to(other)
except UnitConversionError: # incompatible, or requires an Equivalency
return NotImplemented
except AttributeError: # StructuredUnit does not have `_to`
# In principle, in-place might be possible.
return NotImplemented
view = self.view(np.ndarray)
try:
view *= factor # operates on view
except TypeError:
# The error is `numpy.core._exceptions._UFuncOutputCastingError`,
# which inherits from `TypeError`.
return NotImplemented
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(
">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning,
)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
"""Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), other * self.unit, propagate_info=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
"""
Right Multiplication between `Quantity` objects and other objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), self.unit / other, propagate_info=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
"""Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(
1.0 / self.value, other / self.unit, propagate_info=False
)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(
self.value ** float(other), self.unit**other, propagate_info=False
)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value is not"
" iterable"
)
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(
self.view(np.ndarray)[key], self.unit[key], propagate_info=False
)
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value "
"does not support indexing"
)
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and "info" in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""This method raises ValueError, since truthiness of quantities is ambiguous,
especially for logarithmic units and temperatures. Use explicit comparisons.
"""
raise ValueError(
f"{type(self).__name__} truthiness is ambiguous, especially for logarithmic units"
" and temperatures. Use explicit comparisons."
)
def __len__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value has no len()"
)
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError(
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = " " + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string that uses
negative exponents instead of fractions
subfmt : str, optional
Subformat of the result. For the moment, only used for
``format='latex'`` and ``format='latex_inline'``. Supported
values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt
)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
formats["latex_inline"] = formats["latex"]
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f"{self.value}{self._unitstr:s}"
else:
# np.array2string properly formats arrays as well as scalars
return (
np.array2string(self.value, precision=precision, floatmode="fixed")
+ self._unitstr
)
# else, for the moment we assume format="latex" or "latex_inline".
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value, format_spec=format_spec)
def complex_formatter(value):
return "({}{}i)".format(
Latex.format_exponential_notation(value.real, format_spec=format_spec),
Latex.format_exponential_notation(
value.imag, format_spec="+" + format_spec
),
)
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(
conf.latex_array_threshold
if conf.latex_array_threshold > -1
else pops["threshold"]
),
formatter={
"float_kind": float_formatter,
"complex_kind": complex_formatter,
},
max_line_width=np.inf,
separator=",~",
)
latex_value = latex_value.replace("...", r"\dots")
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
if self.unit is None:
latex_unit = _UNIT_NOT_INITIALISED
elif format == "latex":
latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode
elif format == "latex_inline":
latex_unit = self.unit.to_string(format="latex_inline")[1:-1]
delimiter_left, delimiter_right = formats[format][subfmt]
return rf"{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}"
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
arrstr = np.array2string(
self.view(np.ndarray), separator=", ", prefix=prefixstr
)
return f"{prefixstr}{arrstr}{self._unitstr:s}>"
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format="latex", subfmt="inline")
def __format__(self, format_spec):
try:
return self.to_string(format=format_spec)
except ValueError:
# We might have a unit format not implemented in `to_string()`.
if format_spec in Base.registry:
if self.unit is dimensionless_unscaled:
return f"{self.value}"
else:
return f"{self.value} {format(self.unit, format_spec)}"
# Can the value be formatted on its own?
try:
return f"{format(self.value, format_spec)}{self._unitstr:s}"
except ValueError:
# Format the whole thing as a single string.
return format(f"{self.value}{self._unitstr:s}", format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, "scale"):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError(
"cannot make a list of Quantities. Get list of values with"
" q.value.tolist()."
)
def _to_own_unit(self, value, check_precision=True, *, unit=None):
"""Convert value to one's own unit (or that given).
Here, non-quantities are treated as dimensionless, and care is taken
for values of 0, infinity or nan, which are allowed to have any unit.
Parameters
----------
value : anything convertible to `~astropy.units.Quantity`
The value to be converted to the requested unit.
check_precision : bool
Whether to forbid conversion of float to integer if that changes
the input number. Default: `True`.
unit : `~astropy.units.Unit` or None
The unit to convert to. By default, the unit of ``self``.
Returns
-------
value : number or `~numpy.ndarray`
In the requested units.
"""
if unit is None:
unit = self.unit
try:
_value = value.to_value(unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if value is np.ma.masked or (
value is np.ma.masked_print_option and self.dtype.kind == "O"
):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if not hasattr(value, "unit") and can_have_arbitrary_unit(
as_quantity.value
):
_value = as_quantity.value
else:
raise
if self.dtype.kind == "i" and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all((self_dtype_array == _value) | np.isnan(_value)):
raise TypeError(
"cannot convert value type to array type without precision loss"
)
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
# TODO: is this the best place to do this?
if _value.dtype.names is not None:
_value = _value.astype(self.dtype, copy=False)
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),)))
def tostring(self, order="C"):
"""Not implemented, use ``.value.tostring()`` instead."""
raise NotImplementedError(
"cannot write Quantities to string. Write array with"
" q.value.tostring(...)."
)
def tobytes(self, order="C"):
"""Not implemented, use ``.value.tobytes()`` instead."""
raise NotImplementedError(
"cannot write Quantities to bytes. Write array with q.value.tobytes(...)."
)
def tofile(self, fid, sep="", format="%s"):
"""Not implemented, use ``.value.tofile()`` instead."""
raise NotImplementedError(
"cannot write Quantities to file. Write array with q.value.tofile(...)"
)
def dump(self, file):
"""Not implemented, use ``.value.dump()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to file. Write array with q.value.dump()"
)
def dumps(self):
"""Not implemented, use ``.value.dumps()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to string. Write array with q.value.dumps()"
)
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode="raise"):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode="raise"):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode="raise"):
raise NotImplementedError(
"cannot choose based on quantity. Choose using array with"
" q.value.choose(...)"
)
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind="quicksort", order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(
np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs
) # avoid numpy 1.6 problem
if NUMPY_LT_1_22:
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
else:
def argmax(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmax(axis=axis, out=out, keepdims=keepdims)
def argmin(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmin(axis=axis, out=out, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn(
f"function '{function.__name__}' is not known to astropy's Quantity."
" Will run it anyway, hoping it will treat ndarray subclasses"
" correctly. Please raise an issue at"
" https://github.com/astropy/astropy/issues.",
AstropyWarning,
)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(
issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types
):
raise TypeError(
f"the Quantity implementation cannot handle {function} "
"with the given arguments."
) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple(
(arg.value if isinstance(arg, Quantity) else arg) for arg in args
)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs["out"] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out)
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.var,
axis,
dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
unit=self.unit**2,
)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return self._wrap_function(
np.mean, axis, dtype, out=out, keepdims=keepdims, where=where
)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, "unit", dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)"
)
def any(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)"
)
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
if NUMPY_LT_1_22:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims)
else:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(
self, axis=None, out=None, keepdims=False, *, initial=None, where=True
):
if initial is not None:
initial = self._to_own_unit(initial)
return self._wrap_function(
np.nansum,
axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'".format(
type(self).__name__, self._equivalent_unit
)
+ (
", but no unit was given."
if unit is None
else f", so cannot set it to '{unit}'."
)
)
super()._set_unit(unit)
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
fe84b4760c16d5eba175367b32ca496f8a3d391b848c382d54952d7698d3a1b5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for retrieving solar system
ephemerides from jplephem.
"""
import os.path
import re
from urllib.parse import urlparse
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.utils import indent
from astropy.utils.data import download_file
from astropy.utils.decorators import classproperty, deprecated
from astropy.utils.state import ScienceState
from .builtin_frames import GCRS, ICRS
from .builtin_frames.utils import get_jd12
from .representation import CartesianRepresentation
from .sky_coordinate import SkyCoord
__all__ = [
"get_body",
"get_moon",
"get_body_barycentric",
"get_body_barycentric_posvel",
"solar_system_ephemeris",
]
DEFAULT_JPL_EPHEMERIS = "de430"
"""List of kernel pairs needed to calculate positions of a given object."""
BODY_NAME_TO_KERNEL_SPEC = {
"sun": [(0, 10)],
"mercury": [(0, 1), (1, 199)],
"venus": [(0, 2), (2, 299)],
"earth-moon-barycenter": [(0, 3)],
"earth": [(0, 3), (3, 399)],
"moon": [(0, 3), (3, 301)],
"mars": [(0, 4)],
"jupiter": [(0, 5)],
"saturn": [(0, 6)],
"uranus": [(0, 7)],
"neptune": [(0, 8)],
"pluto": [(0, 9)],
}
"""Indices to the plan94 routine for the given object."""
PLAN94_BODY_NAME_TO_PLANET_INDEX = {
"mercury": 1,
"venus": 2,
"earth-moon-barycenter": 3,
"mars": 4,
"jupiter": 5,
"saturn": 6,
"uranus": 7,
"neptune": 8,
}
_EPHEMERIS_NOTE = """
You can either give an explicit ephemeris or use a default, which is normally
a built-in ephemeris that does not require ephemeris files. To change
the default to be the JPL ephemeris::
>>> from astropy.coordinates import solar_system_ephemeris
>>> solar_system_ephemeris.set('jpl') # doctest: +SKIP
Use of any JPL ephemeris requires the jplephem package
(https://pypi.org/project/jplephem/).
If needed, the ephemeris file will be downloaded (and cached).
One can check which bodies are covered by a given ephemeris using::
>>> solar_system_ephemeris.bodies
('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune')
"""[
1:-1
]
class solar_system_ephemeris(ScienceState):
"""Default ephemerides for calculating positions of Solar-System bodies.
This can be one of the following:
- 'builtin': polynomial approximations to the orbital elements.
- 'dexxx[s]', for a JPL dynamical model, where xxx is the three digit
version number (e.g. de430), and the 's' is optional to specify the
'small' version of a kernel. The version number must correspond to an
ephemeris file available at:
https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/
- 'jpl': Alias for the default JPL ephemeris (currently, 'de430').
- URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format.
- PATH: (str) File path to a SPK ephemeris in SPICE binary (.bsp) format.
- `None`: Ensure an Exception is raised without an explicit ephemeris.
The default is 'builtin', which uses the ``epv00`` and ``plan94``
routines from the ``erfa`` implementation of the Standards Of Fundamental
Astronomy library.
Notes
-----
Any file required will be downloaded (and cached) when the state is set.
The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is
~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is
~10MB, and covers years 1950-2050 [2]_ (and similarly for the newer de440
and de440s). Older versions of the JPL ephemerides (such as the widely
used de200) can be used via their URL [3]_.
.. [1] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt
.. [2] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt
.. [3] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/
"""
_value = "builtin"
_kernel = None
@classmethod
def validate(cls, value):
# make no changes if value is None
if value is None:
return cls._value
# Set up Kernel; if the file is not in cache, this will download it.
cls.get_kernel(value)
return value
@classmethod
def get_kernel(cls, value):
# ScienceState only ensures the `_value` attribute is up to date,
# so we need to be sure any kernel returned is consistent.
if cls._kernel is None or cls._kernel.origin != value:
if cls._kernel is not None:
cls._kernel.daf.file.close()
cls._kernel = None
kernel = _get_kernel(value)
if kernel is not None:
kernel.origin = value
cls._kernel = kernel
return cls._kernel
@classproperty
def kernel(cls):
return cls.get_kernel(cls._value)
@classproperty
def bodies(cls):
if cls._value is None:
return None
if cls._value.lower() == "builtin":
return ("earth", "sun", "moon") + tuple(
PLAN94_BODY_NAME_TO_PLANET_INDEX.keys()
)
else:
return tuple(BODY_NAME_TO_KERNEL_SPEC.keys())
def _get_kernel(value):
"""
Try importing jplephem, download/retrieve from cache the Satellite Planet
Kernel corresponding to the given ephemeris.
"""
if value is None or value.lower() == "builtin":
return None
try:
from jplephem.spk import SPK
except ImportError:
raise ImportError(
"Solar system JPL ephemeris calculations require the jplephem package "
"(https://pypi.org/project/jplephem/)"
)
if value.lower() == "jpl":
# Get the default JPL ephemeris URL
value = DEFAULT_JPL_EPHEMERIS
if re.compile(r"de[0-9][0-9][0-9]s?").match(value.lower()):
value = (
"https://naif.jpl.nasa.gov/pub/naif/generic_kernels"
f"/spk/planets/{value.lower():s}.bsp"
)
elif os.path.isfile(value):
return SPK.open(value)
else:
try:
urlparse(value)
except Exception:
raise ValueError(
f"{value} was not one of the standard strings and "
"could not be parsed as a file path or URL"
)
return SPK.open(download_file(value, cache=True))
def _get_body_barycentric_posvel(body, time, ephemeris=None, get_velocity=True):
"""Calculate the barycentric position (and velocity) of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
get_velocity : bool, optional
Whether or not to calculate the velocity as well as the position.
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation` or tuple
Barycentric (ICRS) position or tuple of position and velocity.
Notes
-----
Whether or not velocities are calculated makes little difference for the
built-in ephemerides, but for most JPL ephemeris files, the execution time
roughly doubles.
"""
# If the ephemeris is to be taken from solar_system_ephemeris, or the one
# it already contains, use the kernel there. Otherwise, open the ephemeris,
# possibly downloading it, but make sure the file is closed at the end.
default_kernel = ephemeris is None or ephemeris is solar_system_ephemeris._value
kernel = None
try:
if default_kernel:
if solar_system_ephemeris.get() is None:
raise ValueError(_EPHEMERIS_NOTE)
kernel = solar_system_ephemeris.kernel
else:
kernel = _get_kernel(ephemeris)
jd1, jd2 = get_jd12(time, "tdb")
if kernel is None:
body = body.lower()
earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2)
if body == "earth":
body_pv_bary = earth_pv_bary
elif body == "moon":
# The moon98 documentation notes that it takes TT, but that TDB leads
# to errors smaller than the uncertainties in the algorithm.
# moon98 returns the astrometric position relative to the Earth.
moon_pv_geo = erfa.moon98(jd1, jd2)
body_pv_bary = erfa.pvppv(moon_pv_geo, earth_pv_bary)
else:
sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio)
if body == "sun":
body_pv_bary = sun_pv_bary
else:
try:
body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body]
except KeyError:
raise KeyError(
f"{body}'s position and velocity cannot be "
f"calculated with the '{ephemeris}' ephemeris."
)
body_pv_helio = erfa.plan94(jd1, jd2, body_index)
body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary)
body_pos_bary = CartesianRepresentation(
body_pv_bary["p"], unit=u.au, xyz_axis=-1, copy=False
)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_pv_bary["v"], unit=u.au / u.day, xyz_axis=-1, copy=False
)
else:
if isinstance(body, str):
# Look up kernel chain for JPL ephemeris, based on name
try:
kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()]
except KeyError:
raise KeyError(
f"{body}'s position cannot be calculated with "
f"the {ephemeris} ephemeris."
)
else:
# otherwise, assume the user knows what their doing and intentionally
# passed in a kernel chain
kernel_spec = body
# jplephem cannot handle multi-D arrays, so convert to 1D here.
jd1_shape = getattr(jd1, "shape", ())
if len(jd1_shape) > 1:
jd1, jd2 = jd1.ravel(), jd2.ravel()
# Note that we use the new jd1.shape here to create a 1D result array.
# It is reshaped below.
body_posvel_bary = np.zeros(
(2 if get_velocity else 1, 3) + getattr(jd1, "shape", ())
)
for pair in kernel_spec:
spk = kernel[pair]
if spk.data_type == 3:
# Type 3 kernels contain both position and velocity.
posvel = spk.compute(jd1, jd2)
if get_velocity:
body_posvel_bary += posvel.reshape(body_posvel_bary.shape)
else:
body_posvel_bary[0] += posvel[:4]
else:
# spk.generate first yields the position and then the
# derivative. If no velocities are desired, body_posvel_bary
# has only one element and thus the loop ends after a single
# iteration, avoiding the velocity calculation.
for body_p_or_v, p_or_v in zip(
body_posvel_bary, spk.generate(jd1, jd2)
):
body_p_or_v += p_or_v
body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape
body_pos_bary = CartesianRepresentation(
body_posvel_bary[0], unit=u.km, copy=False
)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_posvel_bary[1], unit=u.km / u.day, copy=False
)
return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary
finally:
if not default_kernel and kernel is not None:
kernel.daf.file.close()
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See Also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
{_EPHEMERIS_NOTE}
"""
return _get_body_barycentric_posvel(body, time, ephemeris)
def get_body_barycentric(body, time, ephemeris=None):
"""Calculate the barycentric position of a solar system body.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) position of the body in cartesian coordinates
See Also
--------
get_body_barycentric_posvel : to calculate both position and velocity.
Notes
-----
{_EPHEMERIS_NOTE}
"""
return _get_body_barycentric_posvel(body, time, ephemeris, get_velocity=False)
def _get_apparent_body_position(body, time, ephemeris, obsgeoloc=None):
"""Calculate the apparent position of body ``body`` relative to Earth.
This corrects for the light-travel time to the object.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``~astropy.coordinates.solar_system_ephemeris.set``
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, optional
The GCRS position of the observer
Returns
-------
cartesian_position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) apparent position of the body in cartesian coordinates
Notes
-----
{_EPHEMERIS_NOTE}
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
# Calculate position given approximate light travel time.
delta_light_travel_time = 20.0 * u.s
emitted_time = time
light_travel_time = 0.0 * u.s
earth_loc = get_body_barycentric("earth", time, ephemeris)
if obsgeoloc is not None:
earth_loc += obsgeoloc
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8 * u.s):
body_loc = get_body_barycentric(body, emitted_time, ephemeris)
earth_distance = (body_loc - earth_loc).norm()
delta_light_travel_time = light_travel_time - earth_distance / speed_of_light
light_travel_time = earth_distance / speed_of_light
emitted_time = time - light_travel_time
return get_body_barycentric(body, emitted_time, ephemeris)
def get_body(body, time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
location : `~astropy.coordinates.EarthLocation`, optional
Location of observer on the Earth. If not given, will be taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the body
Notes
-----
The coordinate returned is the apparent position, which is the position of
the body at time *t* minus the light travel time from the *body* to the
observing *location*.
{_EPHEMERIS_NOTE}
"""
if location is None:
location = time.location
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(time)
else:
obsgeoloc, obsgeovel = None, None
cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc)
icrs = ICRS(cartrep)
gcrs = icrs.transform_to(
GCRS(obstime=time, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
)
return SkyCoord(gcrs)
@deprecated("5.3", alternative='get_body("moon")')
def get_moon(time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
time : `~astropy.time.Time`
Time of observation
location : `~astropy.coordinates.EarthLocation`
Location of observer on the Earth. If none is supplied, taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the Moon
Notes
-----
The coordinate returned is the apparent position, which is the position of
the moon at time *t* minus the light travel time from the moon to the
observing *location*.
{_EPHEMERIS_NOTE}
"""
return get_body("moon", time, location=location, ephemeris=ephemeris)
# Add note about the ephemeris choices to the docstrings of relevant functions.
# Note: sadly, one cannot use f-strings for docstrings, so we format explicitly.
for f in [
f
for f in locals().values()
if callable(f) and f.__doc__ is not None and "{_EPHEMERIS_NOTE}" in f.__doc__
]:
f.__doc__ = f.__doc__.format(_EPHEMERIS_NOTE=indent(_EPHEMERIS_NOTE)[4:])
|
c97804866fa142f4972c09b5c283421050bad0628d08daf1b0715dd79c820be2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
This module contains formatting functions that are for internal use in
astropy.coordinates.angles. Mainly they are conversions from one format
of data to another.
"""
import threading
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils import format_exception, parsing
from astropy.utils.decorators import deprecated
from .errors import (
IllegalHourError,
IllegalHourWarning,
IllegalMinuteError,
IllegalMinuteWarning,
IllegalSecondError,
IllegalSecondWarning,
)
class _AngleParser:
"""
Parses the various angle formats including:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
* 1°2′3″N
This class should not be used directly. Use `parse_angle`
instead.
"""
# For safe multi-threaded operation all class (but not instance)
# members that carry state should be thread-local. They are stored
# in the following class member
_thread_local = threading.local()
def __init__(self):
# TODO: in principle, the parser should be invalidated if we change unit
# system (from CDS to FITS, say). Might want to keep a link to the
# unit_registry used, and regenerate the parser/lexer if it changes.
# Alternatively, perhaps one should not worry at all and just pre-
# generate the parser for each release (as done for unit formats).
# For some discussion of this problem, see
# https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
if "_parser" not in _AngleParser._thread_local.__dict__:
(
_AngleParser._thread_local._parser,
_AngleParser._thread_local._lexer,
) = self._make_parser()
@classmethod
def _get_simple_unit_names(cls):
simple_units = set(u.radian.find_equivalent_units(include_prefix_units=True))
simple_unit_names = set()
# We filter out degree and hourangle, since those are treated
# separately.
for unit in simple_units:
if unit != u.deg and unit != u.hourangle:
simple_unit_names.update(unit.names)
return sorted(simple_unit_names)
@classmethod
def _make_parser(cls):
# List of token names.
tokens = (
"SIGN",
"UINT",
"UFLOAT",
"COLON",
"DEGREE",
"HOUR",
"MINUTE",
"SECOND",
"SIMPLE_UNIT",
"EASTWEST",
"NORTHSOUTH",
)
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?"
# The above includes Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
t.value = float(t.value.replace("−", "-"))
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+−-]"
# The above include Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
if t.value == "+":
t.value = 1.0
else:
t.value = -1.0
return t
def t_EASTWEST(t):
r"[EW]$"
t.value = -1.0 if t.value == "W" else 1.0
return t
def t_NORTHSOUTH(t):
r"[NS]$"
# We cannot use lower-case letters otherwise we'll confuse
# s[outh] with s[econd]
t.value = -1.0 if t.value == "S" else 1.0
return t
def t_SIMPLE_UNIT(t):
t.value = u.Unit(t.value)
return t
t_SIMPLE_UNIT.__doc__ = "|".join(
f"(?:{x})" for x in cls._get_simple_unit_names()
)
t_COLON = ":"
t_DEGREE = r"d(eg(ree(s)?)?)?|°"
t_HOUR = r"hour(s)?|h(r)?|ʰ"
t_MINUTE = r"m(in(ute(s)?)?)?|′|\'|ᵐ"
t_SECOND = r"s(ec(ond(s)?)?)?|″|\"|ˢ"
# A string containing ignored characters (spaces)
t_ignore = " "
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
lexer = parsing.lex(lextab="angle_lextab", package="astropy/coordinates")
def p_angle(p):
"""
angle : sign hms eastwest
| sign dms dir
| sign arcsecond dir
| sign arcminute dir
| sign simple dir
"""
sign = p[1] * p[3]
value, unit = p[2]
if isinstance(value, tuple):
p[0] = ((sign * value[0],) + value[1:], unit)
else:
p[0] = (sign * value, unit)
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_eastwest(p):
"""
eastwest : EASTWEST
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_dir(p):
"""
dir : EASTWEST
| NORTHSOUTH
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_ufloat(p):
"""
ufloat : UFLOAT
| UINT
"""
p[0] = p[1]
def p_colon(p):
"""
colon : UINT COLON ufloat
| UINT COLON UINT COLON ufloat
"""
if len(p) == 4:
p[0] = (p[1], p[3])
elif len(p) == 6:
p[0] = (p[1], p[3], p[5])
def p_spaced(p):
"""
spaced : UINT ufloat
| UINT UINT ufloat
"""
if len(p) == 3:
p[0] = (p[1], p[2])
elif len(p) == 4:
p[0] = (p[1], p[2], p[3])
def p_generic(p):
"""
generic : colon
| spaced
| ufloat
"""
p[0] = p[1]
def p_hms(p):
"""
hms : UINT HOUR
| UINT HOUR ufloat
| UINT HOUR UINT MINUTE
| UINT HOUR UFLOAT MINUTE
| UINT HOUR UINT MINUTE ufloat
| UINT HOUR UINT MINUTE ufloat SECOND
| generic HOUR
"""
if len(p) == 3:
p[0] = (p[1], u.hourangle)
elif len(p) in (4, 5):
p[0] = ((p[1], p[3]), u.hourangle)
elif len(p) in (6, 7):
p[0] = ((p[1], p[3], p[5]), u.hourangle)
def p_dms(p):
"""
dms : UINT DEGREE
| UINT DEGREE ufloat
| UINT DEGREE UINT MINUTE
| UINT DEGREE UFLOAT MINUTE
| UINT DEGREE UINT MINUTE ufloat
| UINT DEGREE UINT MINUTE ufloat SECOND
| generic DEGREE
"""
if len(p) == 3:
p[0] = (p[1], u.degree)
elif len(p) in (4, 5):
p[0] = ((p[1], p[3]), u.degree)
elif len(p) in (6, 7):
p[0] = ((p[1], p[3], p[5]), u.degree)
def p_simple(p):
"""
simple : generic
| generic SIMPLE_UNIT
"""
if len(p) == 2:
p[0] = (p[1], None)
else:
p[0] = (p[1], p[2])
def p_arcsecond(p):
"""
arcsecond : generic SECOND
"""
p[0] = (p[1], u.arcsecond)
def p_arcminute(p):
"""
arcminute : generic MINUTE
"""
p[0] = (p[1], u.arcminute)
def p_error(p):
raise ValueError
parser = parsing.yacc(tabmodule="angle_parsetab", package="astropy/coordinates")
return parser, lexer
def parse(self, angle, unit, debug=False):
try:
found_angle, found_unit = self._thread_local._parser.parse(
angle, lexer=self._thread_local._lexer, debug=debug
)
except ValueError as e:
if str(e):
raise ValueError(f"{e} in angle {angle!r}") from e
else:
raise ValueError(f"Syntax error parsing angle {angle!r}") from e
if unit is None and found_unit is None:
raise u.UnitsError("No unit specified")
return found_angle, found_unit
def _check_hour_range(hrs):
"""
Checks that the given value is in the range (-24, 24).
"""
if np.any(np.abs(hrs) == 24.0):
warn(IllegalHourWarning(hrs, "Treating as 24 hr"))
elif np.any(hrs < -24.0) or np.any(hrs > 24.0):
raise IllegalHourError(hrs)
def _check_minute_range(m):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(m == 60.0):
warn(IllegalMinuteWarning(m, "Treating as 0 min, +1 hr/deg"))
elif np.any(m < -60.0) or np.any(m > 60.0):
# "Error: minutes not in range [-60,60) ({0}).".format(min))
raise IllegalMinuteError(m)
def _check_second_range(sec):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(sec == 60.0):
warn(IllegalSecondWarning(sec, "Treating as 0 sec, +1 min"))
elif sec is None:
pass
elif np.any(sec < -60.0) or np.any(sec > 60.0):
# "Error: seconds not in range [-60,60) ({0}).".format(sec))
raise IllegalSecondError(sec)
def check_hms_ranges(h, m, s):
"""
Checks that the given hour, minute and second are all within
reasonable range.
"""
_check_hour_range(h)
_check_minute_range(m)
_check_second_range(s)
def parse_angle(angle, unit=None, debug=False):
"""
Parses an input string value into an angle value.
Parameters
----------
angle : str
A string representing the angle. May be in one of the following forms:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
unit : `~astropy.units.UnitBase` instance, optional
The unit used to interpret the string. If ``unit`` is not
provided, the unit must be explicitly represented in the
string, either at the end or as number separators.
debug : bool, optional
If `True`, print debugging information from the parser.
Returns
-------
value, unit : tuple
``value`` is the value as a floating point number or three-part
tuple, and ``unit`` is a `Unit` instance which is either the
unit passed in or the one explicitly mentioned in the input
string.
"""
return _AngleParser().parse(angle, unit, debug=debug)
def degrees_to_dms(d):
"""
Convert a floating-point degree value into a ``(degree, arcminute,
arcsecond)`` tuple.
"""
sign = np.copysign(1.0, d)
(df, d) = np.modf(np.abs(d)) # (degree fraction, degree)
(mf, m) = np.modf(df * 60.0) # (minute fraction, minute)
s = mf * 60.0
return np.floor(sign * d), sign * np.floor(m), sign * s
@deprecated(
since="5.1",
message=(
"dms_to_degrees (or creating an Angle with a tuple) has ambiguous "
"behavior when the degree value is 0. Use {alternative}."
),
alternative=(
"another way of creating angles instead (e.g. a less "
"ambiguous string like '-0d1m2.3s')"
),
)
def dms_to_degrees(d, m, s=None):
"""
Convert degrees, arcminute, arcsecond to a float degrees value.
"""
_check_minute_range(m)
_check_second_range(s)
# determine sign
sign = np.copysign(1.0, d)
try:
d = np.floor(np.abs(d))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError as err:
raise ValueError(
format_exception(
"{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.",
d,
m,
s,
)
) from err
return sign * (d + m / 60.0 + s / 3600.0)
@deprecated(
since="5.1",
message=(
"hms_to_hours (or creating an Angle with a tuple) has ambiguous "
"behavior when the hour value is 0. Use {alternative}."
),
alternative=(
"another way of creating angles instead (e.g. a less "
"ambiguous string like '-0h1m2.3s')"
),
)
def hms_to_hours(h, m, s=None):
"""
Convert hour, minute, second to a float hour value.
"""
check_hms_ranges(h, m, s)
# determine sign
sign = np.copysign(1.0, h)
try:
h = np.floor(np.abs(h))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError as err:
raise ValueError(
format_exception(
"{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.",
h,
m,
s,
)
) from err
return sign * (h + m / 60.0 + s / 3600.0)
def hms_to_degrees(h, m, s):
"""
Convert hour, minute, second to a float degrees value.
"""
return hms_to_hours(h, m, s) * 15.0
def hms_to_radians(h, m, s):
"""
Convert hour, minute, second to a float radians value.
"""
return u.degree.to(u.radian, hms_to_degrees(h, m, s))
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s))
def hours_to_decimal(h):
"""
Convert any parseable hour value into a float value.
"""
from . import angles
return angles.Angle(h, unit=u.hourangle).hour
def hours_to_radians(h):
"""
Convert an angle in Hours to Radians.
"""
return u.hourangle.to(u.radian, h)
def hours_to_hms(h):
"""
Convert an floating-point hour value into an ``(hour, minute,
second)`` tuple.
"""
sign = np.copysign(1.0, h)
(hf, h) = np.modf(np.abs(h)) # (degree fraction, degree)
(mf, m) = np.modf(hf * 60.0) # (minute fraction, minute)
s = mf * 60.0
return (np.floor(sign * h), sign * np.floor(m), sign * s)
def radians_to_degrees(r):
"""
Convert an angle in Radians to Degrees.
"""
return u.radian.to(u.degree, r)
def radians_to_hours(r):
"""
Convert an angle in Radians to Hours.
"""
return u.radian.to(u.hourangle, r)
def radians_to_hms(r):
"""
Convert an angle in Radians to an ``(hour, minute, second)`` tuple.
"""
hours = radians_to_hours(r)
return hours_to_hms(hours)
def radians_to_dms(r):
"""
Convert an angle in Radians to an ``(degree, arcminute,
arcsecond)`` tuple.
"""
degrees = u.radian.to(u.degree, r)
return degrees_to_dms(degrees)
def sexagesimal_to_string(values, precision=None, pad=False, sep=(":",), fields=3):
"""
Given an already separated tuple of sexagesimal values, returns
a string.
See `hours_to_string` and `degrees_to_string` for a higher-level
interface to this functionality.
"""
# Check to see if values[0] is negative, using np.copysign to handle -0
sign = np.copysign(1.0, values[0])
# If the coordinates are negative, we need to take the absolute values.
# We use np.abs because abs(-0) is -0
# TODO: Is this true? (MHvK, 2018-02-01: not on my system)
values = [np.abs(value) for value in values]
if pad:
if sign == -1:
pad = 3
else:
pad = 2
else:
pad = 0
if not isinstance(sep, tuple):
sep = tuple(sep)
if fields < 1 or fields > 3:
raise ValueError("fields must be 1, 2, or 3")
if not sep: # empty string, False, or None, etc.
sep = ("", "", "")
elif len(sep) == 1:
if fields == 3:
sep = sep + (sep[0], "")
elif fields == 2:
sep = sep + ("", "")
else:
sep = ("", "", "")
elif len(sep) == 2:
sep = sep + ("",)
elif len(sep) != 3:
raise ValueError(
"Invalid separator specification for converting angle to string."
)
# Simplify the expression based on the requested precision. For
# example, if the seconds will round up to 60, we should convert
# it to 0 and carry upwards. If the field is hidden (by the
# fields kwarg) we round up around the middle, 30.0.
if precision is None:
rounding_thresh = 60.0 - (10.0**-8)
else:
rounding_thresh = 60.0 - (10.0**-precision)
if fields == 3 and values[2] >= rounding_thresh:
values[2] = 0.0
values[1] += 1.0
elif fields < 3 and values[2] >= 30.0:
values[1] += 1.0
if fields >= 2 and values[1] >= 60.0:
values[1] = 0.0
values[0] += 1.0
elif fields < 2 and values[1] >= 30.0:
values[0] += 1.0
literal = []
last_value = ""
literal.append("{0:0{pad}.0f}{sep[0]}")
if fields >= 2:
literal.append("{1:02d}{sep[1]}")
if fields == 3:
if precision is None:
last_value = f"{abs(values[2]):.8f}"
last_value = last_value.rstrip("0").rstrip(".")
else:
last_value = "{0:.{precision}f}".format(abs(values[2]), precision=precision)
if len(last_value) == 1 or last_value[1] == ".":
last_value = "0" + last_value
literal.append("{last_value}{sep[2]}")
literal = "".join(literal)
return literal.format(
np.copysign(values[0], sign),
int(values[1]),
values[2],
sep=sep,
pad=pad,
last_value=last_value,
)
def hours_to_string(h, precision=5, pad=False, sep=("h", "m", "s"), fields=3):
"""
Takes a decimal hour value and returns a string formatted as hms with
separator specified by the 'sep' parameter.
``h`` must be a scalar.
"""
h, m, s = hours_to_hms(h)
return sexagesimal_to_string(
(h, m, s), precision=precision, pad=pad, sep=sep, fields=fields
)
def degrees_to_string(d, precision=5, pad=False, sep=":", fields=3):
"""
Takes a decimal hour value and returns a string formatted as dms with
separator specified by the 'sep' parameter.
``d`` must be a scalar.
"""
d, m, s = degrees_to_dms(d)
return sexagesimal_to_string(
(d, m, s), precision=precision, pad=pad, sep=sep, fields=fields
)
|
e89d176bdab9b0c7c5e4f7cc05b2ff84defd07bdabb1117fc1ef036e386350f5 | from contextlib import contextmanager
from typing import Dict, NamedTuple
import numpy as np
from astropy.utils import unbroadcast
from astropy.utils.data_info import MixinInfo
from astropy.utils.shapes import ShapedLikeNDArray
__all__ = ["StokesCoord", "custom_stokes_symbol_mapping", "StokesSymbol"]
class StokesSymbol(NamedTuple):
"""Symbol for a Stokes coordinate."""
symbol: str = ""
description: str = ""
# This is table 29 in the FITS 4.0 paper
FITS_STOKES_VALUE_SYMBOL_MAP = {
1: StokesSymbol("I", "Standard Stokes unpolarized"),
2: StokesSymbol("Q", "Standard Stokes linear"),
3: StokesSymbol("U", "Standard Stokes linear"),
4: StokesSymbol("V", "Standard Stokes circular"),
-1: StokesSymbol("RR", "Right-right circular: <RR*>"),
-2: StokesSymbol("LL", "Left-left circular: <LL*>"),
-3: StokesSymbol("RL", "Right-left cross-circular: Re(<RL*>))"),
-4: StokesSymbol("LR", "Left-right cross-circular: Re(<LR*>)=Im(<RL*>)"),
-5: StokesSymbol("XX", "X parallel linear: <XX*>"),
-6: StokesSymbol("YY", "Y parallel linear: <YY*>"),
-7: StokesSymbol("XY", "XY cross linear: Re(<XY*>)"),
-8: StokesSymbol("YX", "YX cross linear: Im(<XY*>)"),
}
STOKES_VALUE_SYMBOL_MAP = FITS_STOKES_VALUE_SYMBOL_MAP.copy()
UNKNOWN_STOKES_VALUE = -99999
@contextmanager
def custom_stokes_symbol_mapping(
mapping: Dict[int, StokesSymbol], replace: bool = False
) -> None:
"""
Add a custom set of mappings from values to Stokes symbols.
Parameters
----------
mappings
A list of dictionaries with custom mappings between values (integers)
and `.StokesSymbol` classes.
replace
Replace all mappings with this one.
"""
global STOKES_VALUE_SYMBOL_MAP
original_mapping = STOKES_VALUE_SYMBOL_MAP.copy()
if not replace:
STOKES_VALUE_SYMBOL_MAP = {**original_mapping, **mapping}
else:
STOKES_VALUE_SYMBOL_MAP = mapping
yield
STOKES_VALUE_SYMBOL_MAP = original_mapping
class StokesCoordInfo(MixinInfo):
# The attributes containing actual information.
_represent_as_dict_attrs = {"value"}
# Since there is only one attribute, use a column with the name to represent it
# (rather than as name.value)
_represent_as_dict_primary_data = "value"
# Attributes that should be presented as positional arguments to
# the class initializer (which takes "stokes" as an argument, not "value").
_construct_from_dict_args = ("value",)
@property
def unit(self):
return None
@property
def dtype(self):
return self._parent._data.dtype
@staticmethod
def default_format(val):
return f"{val.symbol}"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new StokesCoord instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.coordinates.StokesCoord` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty StokesCoord.
shape = (length,) + attrs.pop("shape")
data = np.zeros(shape=shape, dtype=attrs.pop("dtype"))
# Get arguments needed to reconstruct class
out = self._construct_from_dict(dict(value=data))
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For StokesCoord this is just the underlying values.
Returns
-------
arrays : list of ndarray
"""
return [self._parent._data]
class StokesCoord(ShapedLikeNDArray):
"""
A representation of stokes coordinates with helpers for converting to profile names.
Parameters
----------
stokes : array-like
The numeric values representing stokes coordinates.
"""
info = StokesCoordInfo()
def __init__(self, stokes, copy=False):
if isinstance(stokes, type(self)):
data = stokes._data.copy() if copy else stokes._data
self.info = stokes.info
else:
data = np.asanyarray(stokes)
if data.dtype.kind == "O":
msg = "StokesCoord objects cannot be initialised with an object array."
raise ValueError(msg)
if data.dtype.kind == "U":
data = self._from_symbols(data)
else:
data = data.copy() if copy and data is stokes else data
self._data = data
@property
def shape(self):
return self._data.shape
@property
def value(self):
return self._data
@property
def dtype(self):
return self._data.dtype
def __array__(self, dtype=None):
return self._data.astype(dtype, copy=False)
def _apply(self, method, *args, **kwargs):
cls = type(self)
if callable(method):
new = cls(method(self._data, *args, **kwargs))
else:
new = cls(getattr(self._data, method)(*args, **kwargs))
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
@staticmethod
def _from_symbols(symbols):
"""
Convert an array of symbols to an array of values
"""
values_array = np.full_like(
symbols, UNKNOWN_STOKES_VALUE, dtype=int, subok=False
)
for stokes_value, symbol in STOKES_VALUE_SYMBOL_MAP.items():
values_array[symbols == symbol.symbol] = stokes_value
if (unknown_values := np.equal(values_array, UNKNOWN_STOKES_VALUE)).any():
raise ValueError(
f"Unknown stokes symbols present in the input array: {np.unique(symbols[unknown_values])}"
)
return values_array
@property
def symbol(self):
"""The coordinate represented as strings."""
known_symbols = tuple(
["?"] + [s.symbol for s in STOKES_VALUE_SYMBOL_MAP.values()]
)
max_len = np.max([len(s) for s in known_symbols])
# Note we unbroadcast and re-broadcast here to prevent the new array
# using more memory than the old one.
unbroadcasted = np.round(unbroadcast(self.value))
symbolarr = np.full(unbroadcasted.shape, "?", dtype=f"<U{max_len}")
for value, symbol in STOKES_VALUE_SYMBOL_MAP.items():
symbolarr[unbroadcasted == value] = symbol.symbol
return np.broadcast_to(symbolarr, self.shape)
def __setitem__(self, item, value):
self._data[item] = type(self)(value)._data
def __eq__(self, other):
try:
other = self.__class__(other)
except Exception:
return NotImplemented
return self._data == other._data
def __str__(self):
arrstr = np.array2string(self.symbol, separator=", ", prefix=" ")
return f"{type(self).__name__}({arrstr})"
def __repr__(self):
return self.__str__()
|
08d609db67ccadcb1e50540160c80d25cb932466a6d48dde52d77c09bccd1e70 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for celestial coordinates
of astronomical objects. It also contains a framework for conversions
between coordinate systems.
"""
from .angle_utilities import *
from .angles import *
from .attributes import *
from .baseframe import *
from .builtin_frames import *
from .calculation import *
from .distances import *
from .earth import *
from .errors import *
from .funcs import *
from .matching import *
from .name_resolve import *
from .polarization import *
from .representation import *
from .sky_coordinate import *
from .solar_system import *
from .spectral_coordinate import *
from .spectral_quantity import *
from .transformations import *
|
68db35392bc5a3e3ae5eca5ef2a761fc10353eb513d8c94be1585d4512e40a72 | import copy
import operator
import re
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.table import QTable
from astropy.time import Time
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle
from .baseframe import BaseCoordinateFrame, GenericFrame, frame_transform_graph
from .distances import Distance
from .representation import (
RadialDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from .sky_coordinate_parsers import (
_get_frame_class,
_get_frame_without_data,
_parse_coordinate_data,
)
__all__ = ["SkyCoord", "SkyCoordInfo"]
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ["{0." + compname + ".value:}" for compname in repr_data.components]
return ",".join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ",".join(
str(getattr(repr_data, comp).unit) or "None"
for comp in repr_data.components
)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if issubclass(sc.representation_type, SphericalRepresentation) and isinstance(
sc.data, UnitSphericalRepresentation
):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type, in_frame_units=True)
return repr_data
def _represent_as_dict(self):
sc = self._parent
attrs = list(sc.representation_component_names)
# Don't output distance unless it's actually distance.
if isinstance(sc.data, UnitSphericalRepresentation):
attrs = attrs[:-1]
diff = sc.data.differentials.get("s")
if diff is not None:
diff_attrs = list(sc.get_representation_component_names("s"))
# Don't output proper motions if they haven't been specified.
if isinstance(diff, RadialDifferential):
diff_attrs = diff_attrs[2:]
# Don't output radial velocity unless it's actually velocity.
elif isinstance(
diff, (UnitSphericalDifferential, UnitSphericalCosLatDifferential)
):
diff_attrs = diff_attrs[:-1]
attrs.extend(diff_attrs)
attrs.extend(frame_transform_graph.frame_attributes.keys())
out = super()._represent_as_dict(attrs)
out["representation_type"] = sc.representation_type.get_name()
out["frame"] = sc.frame.name
# Note that sc.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
def new_like(self, skycoords, length, metadata_conflicts="warn", name=None):
"""
Return a new SkyCoord instance which is consistent with the input
SkyCoord objects ``skycoords`` and has ``length`` rows. Being
"consistent" is defined as being able to set an item from one to each of
the rest without any exception being raised.
This is intended for creating a new SkyCoord instance whose elements can
be set in-place for table operations like join or vstack. This is used
when a SkyCoord object is used as a mixin column in an astropy Table.
The data values are not predictable and it is expected that the consumer
of the object will fill in all values.
Parameters
----------
skycoords : list
List of input SkyCoord objects
length : int
Length of the output skycoord object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output name (sets output skycoord.info.name)
Returns
-------
skycoord : |SkyCoord| (or subclass)
Instance of this class consistent with ``skycoords``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
skycoords, metadata_conflicts, name, ("meta", "description")
)
skycoord0 = skycoords[0]
# Make a new SkyCoord object with the desired length and attributes
# by using the _apply / __getitem__ machinery to effectively return
# skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame
# attributes with the right shape.
indexes = np.zeros(length, dtype=np.int64)
out = skycoord0[indexes]
# Use __setitem__ machinery to check for consistency of all skycoords
for skycoord in skycoords[1:]:
try:
out[0] = skycoord[0]
except Exception as err:
raise ValueError("Input skycoords are inconsistent.") from err
# Set (merged) info attributes
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The |SkyCoord| class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: https://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a |SkyCoord|
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias -- lower-case versions of the
class name that allow for creating a |SkyCoord| object and transforming
frames without explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this |SkyCoord| should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied coordinate values.
If only one unit is supplied then it applies to all values.
Note that passing only one unit might lead to unit conversion errors
if the coordinate values are expected to have mixed physical meanings
(e.g., angles and distances).
obstime : time-like, optional
Time(s) of observation.
equinox : time-like, optional
Coordinate frame equinox time.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : angle-like, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including ``ICRS``,
``FK5``, ``FK4``, and ``FK4NoETerms``.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components, in angle per time units.
l, b : angle-like, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the ``Galactic`` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components in the `~astropy.coordinates.Galactic` frame,
in angle per time units.
x, y, z : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))
):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError(
"Cannot initialize from a coordinate frame "
"instance without coordinate data"
)
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs
)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError("Cannot create a SkyCoord without data")
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
@property
def shape(self):
return self.frame.shape
def __eq__(self, value):
"""Equality operator for SkyCoord.
This implements strict equality and requires that the frames are
equivalent, extra frame attributes are equivalent, and that the
representation data are exactly equal.
"""
if isinstance(value, BaseCoordinateFrame):
if value._data is None:
raise ValueError("Can only compare SkyCoord to Frame with data")
return self.frame == value
if not isinstance(value, SkyCoord):
return NotImplemented
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(
f"cannot compare: extra frame attribute '{attr}' is not equivalent"
" (perhaps compare the frames directly to avoid this exception)"
)
return self._sky_coord_frame == value._sky_coord_frame
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method, *args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, "_" + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
"""Implement self[item] = value for SkyCoord.
The right hand ``value`` must be strictly consistent with self:
- Identical class
- Equivalent frames
- Identical representation_types
- Identical representation differentials keys
- Identical frame attributes
- Identical "extra" frame attributes (e.g. obstime for an ICRS coord)
With these caveats the setitem ends up as effectively a setitem on
the representation data.
self.frame.data[item] = value.frame.data
"""
if self.__class__ is not value.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(f"attribute {attr} is not equivalent")
# Set the frame values. This checks frame equivalence and also clears
# the cache to ensure that the object is not in an inconsistent state.
self._sky_coord_frame[item] = value._sky_coord_frame
def insert(self, obj, values, axis=0):
"""
Insert coordinate values before the given indices in the object and
return a new Frame object.
The values to be inserted must conform to the rules for in-place setting
of |SkyCoord| objects.
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple insertion before the index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.coordinates.SkyCoord` instance
New coordinate object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
# Set the output values. This is where validation of `values` takes place to ensure
# that it can indeed be inserted.
out[:idx0] = self[:idx0]
out[idx0 : idx0 + n_values] = values
out[idx0 + n_values :] = self[idx0:]
return out
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : frame class, frame object, or str
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
# TODO! like matplotlib, do string overrides for modified methods
new_frame = (
_get_frame_class(new_frame) if isinstance(new_frame, str) else new_frame
)
return self.frame.is_transformable_to(new_frame)
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
|SkyCoord| that are not part of the destination frame's definition are
kept (stored on the resulting |SkyCoord|), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without losing obstime).
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` class or instance, or |SkyCoord| instance
The frame to transform this coordinate into. If a |SkyCoord|, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : |SkyCoord|
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if frame_val is not None and not (
merge_attributes and frame.is_frame_attr_default(attr)
):
frame_kwargs[attr] = frame_val
elif self_val is not None and not self.is_frame_attr_default(attr):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError(
"Transform `frame` must be a frame name, class, or instance"
)
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError(
f"Cannot transform from {self.frame.__class__} to {new_frame_cls}"
)
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in set(new_coord.frame_attributes) & set(frame_kwargs.keys()):
frame_kwargs.pop(attr)
# Always remove the origin frame attribute, as that attribute only makes
# sense with a SkyOffsetFrame (in which case it will be stored on the frame).
# See gh-11277.
# TODO: Should it be a property of the frame attribute that it can
# or cannot be stored on a SkyCoord?
frame_kwargs.pop("origin", None)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""Compute the position to a new time using the velocities.
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation".
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : |SkyCoord|
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
from .builtin_frames.icrs import ICRS
if (new_obstime is None) == (dt is None):
raise ValueError(
"You must specify one of `new_obstime` or `dt`, but not both."
)
# Validate that we have velocity info
if "s" not in self.frame.data.differentials:
raise ValueError("SkyCoord requires velocity data to evolve the position.")
if "obstime" in self.frame.frame_attributes:
raise NotImplementedError(
"Updating the coordinates in a frame with explicit time dependence is"
" currently not supported. If you would like this functionality, please"
" open an issue on github:\nhttps://github.com/astropy/astropy"
)
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError(
"This object has no associated `obstime`. apply_space_motion() must"
" receive a time difference, `dt`, and not a new obstime."
)
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time("J2000")
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials["s"]
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.0
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km / u.s)
except u.UnitConversionError: # No RV
rv = 0.0
starpm = erfa.pmsafe(
icrsrep.lon.radian,
icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian / u.yr),
icrsvel.d_lat.to_value(u.radian / u.yr),
plx,
rv,
t1.jd1,
t1.jd2,
t2.jd1,
t2.jd2,
)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(
ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian / u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian / u.yr, copy=False),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km / u.s, copy=False),
differential_type=SphericalDifferential,
)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {
attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names
}
frattrs["obstime"] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return self.frame.name == string or (
isinstance(self.frame.name, list) and string in self.frame.name
)
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the primary transform graph.
"""
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.frame_attributes:
return getattr(self.frame, attr)
else:
return getattr(self, "_" + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Call __getattribute__; this will give correct exception.
return self.__getattribute__(attr)
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__("_" + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__("_" + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
def __dir__(self):
"""Original dir() behavior, plus frame attributes and transforms.
This dir includes:
- All attributes of the SkyCoord class
- Coordinate transforms available by aliases
- Attribute / methods of the underlying self.frame objects
"""
dir_values = set(super().__dir__())
# determine the aliases that this can be transformed to.
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(
{attr for attr in dir(self.frame) if not attr.startswith("_")}
)
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return sorted(dir_values)
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ": " + frameattrs
data = self.frame._data_repr()
if data:
data = ": " + data
return f"<{clsnm} ({coonm}{frameattrs}){data}>"
def to_string(self, style="decimal", **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
**kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {
"hmsdms": {
"lonargs": {"unit": u.hour, "pad": True},
"latargs": {"unit": u.degree, "pad": True, "alwayssign": True},
},
"dms": {"lonargs": {"unit": u.degree}, "latargs": {"unit": u.degree}},
"decimal": {
"lonargs": {"unit": u.degree, "decimal": True},
"latargs": {"unit": u.degree, "decimal": True},
},
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]["lonargs"])
latargs.update(styles[style]["latargs"])
else:
raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}")
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (
f"{sph_coord.lon.to_string(**lonargs)}"
f" {sph_coord.lat.to_string(**latargs)}"
)
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [
f"{lonangle.to_string(**lonargs)} {latangle.to_string(**latargs)}"
]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def to_table(self):
"""
Convert this |SkyCoord| to a |QTable|.
Any attributes that have the same length as the |SkyCoord| will be
converted to columns of the |QTable|. All other attributes will be
recorded as metadata.
Returns
-------
`~astropy.table.QTable`
A |QTable| containing the data of this |SkyCoord|.
Examples
--------
>>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg,
... obstime=Time([2000, 2010], format='jyear'))
>>> t = sc.to_table()
>>> t
<QTable length=2>
ra dec obstime
deg deg
float64 float64 Time
------- ------- -------
40.0 0.0 2000.0
70.0 -20.0 2010.0
>>> t.meta
{'representation_type': 'spherical', 'frame': 'icrs'}
"""
self_as_dict = self.info._represent_as_dict()
tabledata = {}
metadata = {}
# Record attributes that have the same length as self as columns in the
# table, and the other attributes as table metadata. This matches
# table.serialize._represent_mixin_as_column().
for key, value in self_as_dict.items():
if getattr(value, "shape", ())[:1] == (len(self),):
tabledata[key] = value
else:
metadata[key] = value
return QTable(tabledata, meta=metadata)
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two |SkyCoord| objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a |SkyCoord| or a subclass of
`~astropy.coordinates.BaseCoordinateFrame`.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if not BaseCoordinateFrame._frameattr_equiv(
getattr(self, fattrnm), getattr(other, fattrnm)
):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't frame-like"
)
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation :
for the *total* angular offset (not broken out into components).
position_angle :
for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError(
"Tried to use spherical_offsets_to with two non-matching frames!"
)
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def spherical_offsets_by(self, d_lon, d_lat):
"""
Computes the coordinate that is a specified pair of angular offsets away
from this coordinate.
Parameters
----------
d_lon : angle-like
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
d_lat : angle-like
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Returns
-------
newcoord : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
``d_lat`` in the latitude direction and ``d_lon`` in the longitude
direction.
Notes
-----
This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the
transformation. For a more complete set of transform offsets, use
`~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually.
This specific method can be reproduced by doing
``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``.
See Also
--------
spherical_offsets_to : compute the angular offsets to another coordinate
directional_offset_by : offset a coordinate by an angle in a direction
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return self.__class__(
SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self)
)
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat, posang=position_angle, distance=separation
)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_sky(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_sky"
)
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_3d(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_3d"
)
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : coordinate-like
The coordinates to search around to try to find matching points in
this |SkyCoord|. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` ['angle']
The on-sky separation to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(
searcharoundcoords, self, seplimit, storekdtree="_kdtree_sky"
)
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this |SkyCoord|. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` ['length']
The physical radius to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(
searcharoundcoords, self, distlimit, storekdtree="_kdtree_3d"
)
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
SkyCoord and another.
Parameters
----------
other : |SkyCoord|
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError(
"Can only get position_angle to another "
"SkyCoord or a coordinate frame with data"
)
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this SkyCoord at the origin.
Parameters
----------
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this |SkyCoord| (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) of the coordinates this SkyCoord contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array |SkyCoord|, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position information is used
extra_frameattrs = {nm: getattr(self, nm) for nm in self._extra_frameattr_names}
novel = SkyCoord(
self.realize_frame(self.data.without_differentials()), **extra_frameattrs
)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode="all"):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
from astropy.wcs.utils import skycoord_to_pixel
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode="all"):
"""
Create a new SkyCoord from pixel coordinates using a World Coordinate System.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : `~astropy.coordinates.SkyCoord`
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
from astropy.wcs.utils import pixel_to_skycoord
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the coordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(
self, kind="barycentric", obstime=None, location=None
):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the |SkyCoord| will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the |SkyCoord| will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this |SkyCoord|.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` ['speed']
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. The barycentric correction returned uses the optical
approximation v = z * c. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord, EarthLocation
>>> from astropy.constants import c
>>> t = Time(56370.5, format='mjd', scale='utc')
>>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA
>>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP
Also note that this method returns the correction velocity in the so-called
*optical convention*::
>>> vcorr = zb * c # doctest: +SKIP
where ``zb`` is the barycentric correction redshift as defined in section 3
of Wright & Eastman (2014). The application formula given above follows from their
equation (11) under assumption that the radial velocity ``rv`` has also been defined
using the same optical convention. Note, this can be regarded as a matter of
velocity definition and does not by itself imply any loss of accuracy, provided
sufficient care has been taken during interpretation of the results. If you need
the barycentric correction expressed as the full relativistic velocity (e.g., to provide
it as the input to another software which performs the application), the
following recipe can be used::
>>> zb = vcorr / c # doctest: +REMOTE_DATA
>>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA
>>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA
or alternatively using just equivalencies::
>>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA
See also `~astropy.units.equivalencies.doppler_optical`,
`~astropy.units.equivalencies.doppler_radio`, and
`~astropy.units.equivalencies.doppler_relativistic` for more information on
the velocity conventions.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
>>> from astropy.coordinates import solar_system_ephemeris
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA
... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, "location", None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError(
"`location` cannot be in both the passed-in `obstime` and this"
" `SkyCoord` because it is ambiguous which is meant for the"
" radial_velocity_correction."
)
elif timeloc is not None:
location = timeloc
else:
raise TypeError(
"Must provide a `location` to radial_velocity_correction, either as"
" a SkyCoord frame attribute, as an attribute on the passed in"
" `obstime`, or in the method call."
)
elif self.location is not None or timeloc is not None:
raise ValueError(
"Cannot compute radial velocity correction if `location` argument is"
" passed in and there is also a `location` attribute on this SkyCoord"
" or the passed-in `obstime`."
)
# obstime validation
coo_at_rv_obstime = self # assume we need no space motion for now
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError(
"Must provide an `obstime` to radial_velocity_correction, either as"
" a SkyCoord frame attribute or in the method call."
)
elif self.obstime is not None and self.frame.data.differentials:
# we do need space motion after all
coo_at_rv_obstime = self.apply_space_motion(obstime)
elif self.obstime is None:
# warn the user if the object has differentials set
if "s" in self.data.differentials:
warnings.warn(
"SkyCoord has space motion, and therefore the specified "
"position of the SkyCoord may not be the same as "
"the `obstime` for the radial velocity measurement. "
"This may affect the rv correction at the order of km/s"
"for very high proper motions sources. If you wish to "
"apply space motion of the SkyCoord to correct for this"
"the `obstime` attribute of the SkyCoord must be set",
AstropyUserWarning,
)
pos_earth, v_earth = get_body_barycentric_posvel("earth", obstime)
if kind == "barycentric":
v_origin_to_earth = v_earth
elif kind == "heliocentric":
v_sun = get_body_barycentric_posvel("sun", obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError(
"`kind` argument to radial_velocity_correction must "
f"be 'barycentric' or 'heliocentric', but got '{kind}'"
)
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
icrs_cart = coo_at_rv_obstime.icrs.cartesian
icrs_cart_novel = icrs_cart.without_differentials()
if self.data.__class__ is UnitSphericalRepresentation:
targcart = icrs_cart_novel
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
targcart = icrs_cart_novel - obs_icrs_cart
targcart /= targcart.norm()
if kind == "barycentric":
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm() ** 2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr / speed_of_light)
# try and get terms corresponding to stellar motion.
if icrs_cart.differentials:
try:
ro = self.icrs.cartesian
beta_star = ro.differentials["s"].to_cartesian() / speed_of_light
# ICRS unit vector at coordinate epoch
ro = ro.without_differentials()
ro /= ro.norm()
zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))
except u.UnitConversionError:
warnings.warn(
"SkyCoord contains some velocity information, but not enough to"
" calculate the full space motion of the source, and so this"
" has been ignored for the purposes of calculating the radial"
" velocity correction. This can lead to errors on the order of"
" metres/second.",
AstropyUserWarning,
)
zb = zb - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new SkyCoord from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames (including
differentials), if they are also followed by a non-alphanumeric
character. It will also match columns that *end* with the component name
if a non-alphanumeric character is *before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : `~astropy.table.Table` or subclass
The table to load data from.
**coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord` or subclass
The new instance.
Raises
------
ValueError
If more than one match is found in the table for a component,
unless the additional matches are also valid frame component names.
If a "coord_kwargs" is provided for a value also found in the table.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs["frame"] = coord_kwargs.get("frame", frame)
representation_component_names = set(
frame.get_representation_component_names()
).union(set(frame.get_representation_component_names("s")))
comp_kwargs = {}
for comp_name in representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r"(\W|\b|_)"
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r".*(\W|\b|_)" + comp_name + r"\b"
# the final regex ORs together the two patterns
rex = re.compile(
rf"({starts_with_comp})|({ends_with_comp})", re.IGNORECASE | re.UNICODE
)
# find all matches
matches = {col_name for col_name in table.colnames if rex.match(col_name)}
# now need to select among matches, also making sure we don't have
# an exact match with another component
if len(matches) == 0: # no matches
continue
elif len(matches) == 1: # only one match
col_name = matches.pop()
else: # more than 1 match
# try to sieve out other components
matches -= representation_component_names - {comp_name}
# if there's only one remaining match, it worked.
if len(matches) == 1:
col_name = matches.pop()
else:
raise ValueError(
f'Found at least two matches for component "{comp_name}":'
f' "{matches}". Cannot guess coordinates from a table with this'
" ambiguity."
)
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError(
f'Found column "{v.name}" in table, but it was already provided as'
' "{k}" keyword to guess_from_table function.'
)
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame="icrs", parse=False, cache=True):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names, e.g.,
'CRTS SSS100805 J194428-420209', this may be much faster than a
Sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, optional
Determines whether to cache the results or not. To update or
overwrite an existing value, pass ``cache='update'``.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse, cache=cache)
icrs_sky_coord = cls(icrs_coord)
if frame in ("icrs", icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
|
3b0928026f7382732efdf30496ca7d1fea2da2e4bb51f81d0b2ead4afcbe17ac | import numpy as np
from astropy.units import Unit, si
from astropy.units import equivalencies as eq
from astropy.units.decorators import quantity_input
from astropy.units.quantity import Quantity, SpecificTypeQuantity
__all__ = ["SpectralQuantity"]
# We don't want to run doctests in the docstrings we inherit from Quantity
__doctest_skip__ = ["SpectralQuantity.*"]
KMS = si.km / si.s
SPECTRAL_UNITS = (si.Hz, si.m, si.J, si.m**-1, KMS)
DOPPLER_CONVENTIONS = {
"radio": eq.doppler_radio,
"optical": eq.doppler_optical,
"relativistic": eq.doppler_relativistic,
}
class SpectralQuantity(SpecificTypeQuantity):
"""
One or more value(s) with spectral units.
The spectral units should be those for frequencies, wavelengths, energies,
wavenumbers, or velocities (interpreted as Doppler velocities relative to a
rest spectral value). The advantage of using this class over the regular
`~astropy.units.Quantity` class is that in `SpectralQuantity`, the
``u.spectral`` equivalency is enabled by default (allowing automatic
conversion between spectral units), and a preferred Doppler rest value and
convention can be stored for easy conversion to/from velocities.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralQuantity`
Spectral axis data values.
unit : unit-like
Unit for the given data.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value to use for conversions from/to velocities
doppler_convention : str, optional
The convention to use when converting the spectral data to/from
velocities.
"""
_equivalent_unit = SPECTRAL_UNITS
_include_easy_conversion_members = True
def __new__(
cls, value, unit=None, doppler_rest=None, doppler_convention=None, **kwargs
):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# If we're initializing from an existing SpectralQuantity, keep any
# parameters that aren't being overridden
if doppler_rest is None:
doppler_rest = getattr(value, "doppler_rest", None)
if doppler_convention is None:
doppler_convention = getattr(value, "doppler_convention", None)
obj._doppler_rest = doppler_rest
obj._doppler_convention = doppler_convention
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._doppler_rest = getattr(obj, "_doppler_rest", None)
self._doppler_convention = getattr(obj, "_doppler_convention", None)
def __quantity_subclass__(self, unit):
# Always default to just returning a Quantity, unless we explicitly
# choose to return a SpectralQuantity - even if the units match, we
# want to avoid doing things like adding two SpectralQuantity instances
# together and getting a SpectralQuantity back
if unit is self.unit:
return SpectralQuantity, True
else:
return Quantity, False
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# We always return Quantity except in a few specific cases
result = super().__array_ufunc__(function, method, *inputs, **kwargs)
if (
(
function is np.multiply
or function is np.true_divide
and inputs[0] is self
)
and result.unit == self.unit
or (
function in (np.minimum, np.maximum, np.fmax, np.fmin)
and method in ("reduce", "reduceat")
)
):
result = result.view(self.__class__)
result.__array_finalize__(self)
else:
if result is self:
raise TypeError(
"Cannot store the result of this operation in"
f" {self.__class__.__name__}"
)
if result.dtype.kind == "b":
result = result.view(np.ndarray)
else:
result = result.view(Quantity)
return result
@property
def doppler_rest(self):
"""
The rest value of the spectrum used for transformations to/from
velocity space.
Returns
-------
`~astropy.units.Quantity` ['speed']
Rest value as an astropy `~astropy.units.Quantity` object.
"""
return self._doppler_rest
@doppler_rest.setter
@quantity_input(value=SPECTRAL_UNITS)
def doppler_rest(self, value):
"""
New rest value needed for velocity-space conversions.
Parameters
----------
value : `~astropy.units.Quantity` ['speed']
Rest value.
"""
if self._doppler_rest is not None:
raise AttributeError(
"doppler_rest has already been set, and cannot be changed. Use the"
" ``to`` method to convert the spectral values(s) to use a different"
" rest value"
)
self._doppler_rest = value
@property
def doppler_convention(self):
"""
The defined convention for conversions to/from velocity space.
Returns
-------
str
One of 'optical', 'radio', or 'relativistic' representing the
equivalency used in the unit conversions.
"""
return self._doppler_convention
@doppler_convention.setter
def doppler_convention(self, value):
"""
New velocity convention used for velocity space conversions.
Parameters
----------
value
Notes
-----
More information on the equations dictating the transformations can be
found in the astropy documentation [1]_.
References
----------
.. [1] Astropy documentation: https://docs.astropy.org/en/stable/units/equivalencies.html#spectral-doppler-equivalencies
"""
if self._doppler_convention is not None:
raise AttributeError(
"doppler_convention has already been set, and cannot be changed. Use"
" the ``to`` method to convert the spectral values(s) to use a"
" different convention"
)
if value is not None and value not in DOPPLER_CONVENTIONS:
raise ValueError(
"doppler_convention should be one of"
f" {'/'.join(sorted(DOPPLER_CONVENTIONS))}"
)
self._doppler_convention = value
@quantity_input(doppler_rest=SPECTRAL_UNITS)
def to(self, unit, equivalencies=[], doppler_rest=None, doppler_convention=None):
"""
Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit.
By default, the ``spectral`` equivalency will be enabled, as well as
one of the Doppler equivalencies if converting to/from velocities.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package, and should be a spectral unit.
equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional
A list of equivalence pairs to try if the units are not
directly convertible (along with spectral).
See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, spectral equivalencies will be used.
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value used when converting to/from velocities. This will
also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
doppler_convention : {'relativistic', 'optical', 'radio'}, optional
The Doppler convention used when converting to/from velocities.
This will also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
Returns
-------
`SpectralQuantity`
New spectral coordinate object with data converted to the new unit.
"""
# Make sure units can be passed as strings
unit = Unit(unit)
# If equivalencies is explicitly set to None, we should just use the
# default Quantity.to with equivalencies also set to None
if equivalencies is None:
result = super().to(unit, equivalencies=None)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
# FIXME: need to consider case where doppler equivalency is passed in
# equivalencies list, or is u.spectral equivalency is already passed
if doppler_rest is None:
doppler_rest = self._doppler_rest
if doppler_convention is None:
doppler_convention = self._doppler_convention
elif doppler_convention not in DOPPLER_CONVENTIONS:
raise ValueError(
"doppler_convention should be one of"
f" {'/'.join(sorted(DOPPLER_CONVENTIONS))}"
)
if self.unit.is_equivalent(KMS) and unit.is_equivalent(KMS):
# Special case: if the current and final units are both velocity,
# and either the rest value or the convention are different, we
# need to convert back to frequency temporarily.
if doppler_convention is not None and self._doppler_convention is None:
raise ValueError("Original doppler_convention not set")
if doppler_rest is not None and self._doppler_rest is None:
raise ValueError("Original doppler_rest not set")
if doppler_rest is None and doppler_convention is None:
result = super().to(unit, equivalencies=equivalencies)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
elif (doppler_rest is None) is not (doppler_convention is None):
raise ValueError(
"Either both or neither doppler_rest and doppler_convention should"
" be defined for velocity conversions"
)
vel_equiv1 = DOPPLER_CONVENTIONS[self._doppler_convention](
self._doppler_rest
)
freq = super().to(si.Hz, equivalencies=equivalencies + vel_equiv1)
vel_equiv2 = DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
result = freq.to(unit, equivalencies=equivalencies + vel_equiv2)
else:
additional_equivalencies = eq.spectral()
if self.unit.is_equivalent(KMS) or unit.is_equivalent(KMS):
if doppler_convention is None:
raise ValueError(
"doppler_convention not set, cannot convert to/from velocities"
)
if doppler_rest is None:
raise ValueError(
"doppler_rest not set, cannot convert to/from velocities"
)
additional_equivalencies = (
additional_equivalencies
+ DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
)
result = super().to(
unit, equivalencies=equivalencies + additional_equivalencies
)
# Since we have to explicitly specify when we want to keep this as a
# SpectralQuantity, we need to convert it back from a Quantity to
# a SpectralQuantity here. Note that we don't use __array_finalize__
# here since we might need to set the output doppler convention and
# rest based on the parameters passed to 'to'
result = result.view(self.__class__)
result.__array_finalize__(self)
result._doppler_convention = doppler_convention
result._doppler_rest = doppler_rest
return result
def to_value(self, unit=None, *args, **kwargs):
if unit is None:
return self.view(np.ndarray)
return self.to(unit, *args, **kwargs).value
|
d3d858c87f518aa1cf5f8c698f71882c490c2d36482a78e27e319f59cbdc4f3b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for getting a coordinate object
for a named object by querying SESAME and getting the first returned result.
Note that this is intended to be a convenience, and is very simple. If you
need precise coordinates for an object you should find the appropriate
reference for that measurement and input the coordinates manually.
"""
# Standard library
import os
import re
import socket
import urllib.error
import urllib.parse
import urllib.request
# Astropy
from astropy import units as u
from astropy.utils import data
from astropy.utils.data import download_file, get_file_contents
from astropy.utils.state import ScienceState
from .sky_coordinate import SkyCoord
__all__ = ["get_icrs_coordinates"]
class sesame_url(ScienceState):
"""
The URL(s) to Sesame's web-queryable database.
"""
_value = [
"https://cds.unistra.fr/cgi-bin/nph-sesame/",
"http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/",
]
@classmethod
def validate(cls, value):
# TODO: Implement me
return value
class sesame_database(ScienceState):
"""
This specifies the default database that SESAME will query when
using the name resolve mechanism in the coordinates
subpackage. Default is to search all databases, but this can be
'all', 'simbad', 'ned', or 'vizier'.
"""
_value = "all"
@classmethod
def validate(cls, value):
if value not in ["all", "simbad", "ned", "vizier"]:
raise ValueError(f"Unknown database '{value}'")
return value
class NameResolveError(Exception):
pass
def _parse_response(resp_data):
"""
Given a string response from SESAME, parse out the coordinates by looking
for a line starting with a J, meaning ICRS J2000 coordinates.
Parameters
----------
resp_data : str
The string HTTP response from SESAME.
Returns
-------
ra : str
The string Right Ascension parsed from the HTTP response.
dec : str
The string Declination parsed from the HTTP response.
"""
pattr = re.compile(r"%J\s*([0-9\.]+)\s*([\+\-\.0-9]+)")
matched = pattr.search(resp_data)
if matched is None:
return None, None
else:
ra, dec = matched.groups()
return ra, dec
def get_icrs_coordinates(name, parse=False, cache=False):
"""
Retrieve an ICRS object by using an online name resolving service to
retrieve coordinates for the specified name. By default, this will
search all available databases until a match is found. If you would like
to specify the database, use the science state
``astropy.coordinates.name_resolve.sesame_database``. You can also
specify a list of servers to use for querying Sesame using the science
state ``astropy.coordinates.name_resolve.sesame_url``. This will try
each one in order until a valid response is returned. By default, this
list includes the main Sesame host and a mirror at vizier. The
configuration item `astropy.utils.data.Conf.remote_timeout` controls the
number of seconds to wait for a response from the server before giving
up.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names eg:
'CRTS SSS100805 J194428-420209', this may be much faster than a
sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, str, optional
Determines whether to cache the results or not. Passed through to
`~astropy.utils.data.download_file`, so pass "update" to update the
cached value.
Returns
-------
coord : `astropy.coordinates.ICRS` object
The object's coordinates in the ICRS frame.
"""
# if requested, first try extract coordinates embedded in the object name.
# Do this first since it may be much faster than doing the sesame query
if parse:
from . import jparser
if jparser.search(name):
return jparser.to_skycoord(name)
else:
# if the parser failed, fall back to sesame query.
pass
# maybe emit a warning instead of silently falling back to sesame?
database = sesame_database.get()
# The web API just takes the first letter of the database name
db = database.upper()[0]
# Make sure we don't have duplicates in the url list
urls = []
domains = []
for url in sesame_url.get():
domain = urllib.parse.urlparse(url).netloc
# Check for duplicates
if domain not in domains:
domains.append(domain)
# Add the query to the end of the url, add to url list
fmt_url = os.path.join(url, "{db}?{name}")
fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db)
urls.append(fmt_url)
exceptions = []
for url in urls:
try:
resp_data = get_file_contents(
download_file(url, cache=cache, show_progress=False)
)
break
except urllib.error.URLError as e:
exceptions.append(e)
continue
except socket.timeout as e:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
e.reason = (
"Request took longer than the allowed "
f"{data.conf.remote_timeout:.1f} seconds"
)
exceptions.append(e)
continue
# All Sesame URL's failed...
else:
messages = [f"{url}: {e.reason}" for url, e in zip(urls, exceptions)]
raise NameResolveError(
"All Sesame queries failed. Unable to retrieve coordinates. See errors per"
f" URL below: \n {os.linesep.join(messages)}"
)
ra, dec = _parse_response(resp_data)
if ra is None or dec is None:
if db == "A":
err = f"Unable to find coordinates for name '{name}' using {url}"
else:
err = (
f"Unable to find coordinates for name '{name}' in database"
f" {database} using {url}"
)
raise NameResolveError(err)
# Return SkyCoord object
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame="icrs")
return sc
|
a24a0ac045de7e3347f1dde5026d3073f92b28cb054aed58c550ef328ac15731 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains utility functions for working with angles. These are both
used internally in astropy.coordinates.angles, and of possible.
"""
__all__ = [
"angular_separation",
"position_angle",
"offset_by",
"golden_spiral_grid",
"uniform_spherical_random_surface",
"uniform_spherical_random_volume",
]
# Third-party
import numpy as np
# Astropy
import astropy.units as u
from astropy.coordinates.representation import (
SphericalRepresentation,
UnitSphericalRepresentation,
)
_TWOPI = 2 * np.pi
def angular_separation(lon1, lat1, lon2, lat2):
"""
Angular separation between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
angular separation : `~astropy.units.Quantity` ['angle'] or float
Type depends on input; ``Quantity`` in angular units, or float in
radians.
Notes
-----
The angular separation is calculated using the Vincenty formula [1]_,
which is slightly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.arctan2(np.hypot(num1, num2), denominator)
def position_angle(lon1, lat1, lon2, lat2):
"""
Position Angle (East of North) between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from position 1 to
position 2. If any of the angles are arrays, this will contain an array
following the appropriate `numpy` broadcasting rules.
"""
from .angles import Angle
deltalon = lon2 - lon1
colat = np.cos(lat2)
x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon)
y = np.sin(deltalon) * colat
return Angle(np.arctan2(y, x), u.radian).wrap_at(360 * u.deg)
def offset_by(lon, lat, posang, distance):
"""
Point with the given offset from the given point.
Parameters
----------
lon, lat, posang, distance : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the starting point,
position angle and distance to the final point.
Quantities should be in angular units; floats in radians.
Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon.
Returns
-------
lon, lat : `~astropy.coordinates.Angle`
The position of the final point. If any of the angles are arrays,
these will contain arrays following the appropriate `numpy` broadcasting rules.
0 <= lon < 2pi.
"""
from .angles import Angle
# Calculations are done using the spherical trigonometry sine and cosine rules
# of the triangle A at North Pole, B at starting point, C at final point
# with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang)
# with sides a (distance), b (final co-latitude), c (starting colatitude)
# B, a, c are knowns; A and b are unknowns
# https://en.wikipedia.org/wiki/Spherical_trigonometry
cos_a = np.cos(distance)
sin_a = np.sin(distance)
cos_c = np.sin(lat)
sin_c = np.cos(lat)
cos_B = np.cos(posang)
sin_B = np.sin(posang)
# cosine rule: Know two sides: a,c and included angle: B; get unknown side b
cos_b = cos_c * cos_a + sin_c * sin_a * cos_B
# sin_b = np.sqrt(1 - cos_b**2)
# sine rule and cosine rule for A (using both lets arctan2 pick quadrant).
# multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors
# at poles. Correct for the x=0 multiplication a few lines down.
# sin_A/sin_a == sin_B/sin_b # Sine rule
xsin_A = sin_a * sin_B * sin_c
# cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule
xcos_A = cos_a - cos_b * cos_c
A = Angle(np.arctan2(xsin_A, xcos_A), u.radian)
# Treat the poles as if they are infinitesimally far from pole but at given lon
small_sin_c = sin_c < 1e-12
if small_sin_c.any():
# For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang
A_pole = (90 * u.deg + cos_c * (90 * u.deg - Angle(posang, u.radian))).to(u.rad)
if A.shape:
# broadcast to ensure the shape is like that of A, which is also
# affected by the (possible) shapes of lat, posang, and distance.
small_sin_c = np.broadcast_to(small_sin_c, A.shape)
A[small_sin_c] = A_pole[small_sin_c]
else:
A = A_pole
outlon = (Angle(lon, u.radian) + A).wrap_at(360.0 * u.deg).to(u.deg)
outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg)
return outlon, outlat
def golden_spiral_grid(size):
"""Generate a grid of points on the surface of the unit sphere using the
Fibonacci or Golden Spiral method.
.. seealso::
`Evenly distributing points on a sphere <https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere>`_
Parameters
----------
size : int
The number of points to generate.
Returns
-------
rep : `~astropy.coordinates.UnitSphericalRepresentation`
The grid of points.
"""
golden_r = (1 + 5**0.5) / 2
grid = np.arange(0, size, dtype=float) + 0.5
lon = _TWOPI / golden_r * grid * u.rad
lat = np.arcsin(1 - 2 * grid / size) * u.rad
return UnitSphericalRepresentation(lon, lat)
def uniform_spherical_random_surface(size=1):
"""Generate a random sampling of points on the surface of the unit sphere.
Parameters
----------
size : int
The number of points to generate.
Returns
-------
rep : `~astropy.coordinates.UnitSphericalRepresentation`
The random points.
"""
rng = np.random # can maybe switch to this being an input later - see #11628
lon = rng.uniform(0, _TWOPI, size) * u.rad
lat = np.arcsin(rng.uniform(-1, 1, size=size)) * u.rad
return UnitSphericalRepresentation(lon, lat)
def uniform_spherical_random_volume(size=1, max_radius=1):
"""Generate a random sampling of points that follow a uniform volume
density distribution within a sphere.
Parameters
----------
size : int
The number of points to generate.
max_radius : number, quantity-like, optional
A dimensionless or unit-ful factor to scale the random distances.
Returns
-------
rep : `~astropy.coordinates.SphericalRepresentation`
The random points.
"""
rng = np.random # can maybe switch to this being an input later - see #11628
usph = uniform_spherical_random_surface(size=size)
r = np.cbrt(rng.uniform(size=size)) * u.Quantity(max_radius, copy=False)
return SphericalRepresentation(usph.lon, usph.lat, r)
|
9cfebda62c43f5fd076ec00b7205478d240e20b349f1182069b174c730fac5a4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import subprocess
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager, suppress
from inspect import signature
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"TransformGraph",
"CoordinateTransform",
"FunctionTransform",
"BaseAffineTransform",
"AffineTransform",
"StaticMatrixTransform",
"DynamicMatrixTransform",
"FunctionTransformWithFiniteDifference",
"CompositeTransform",
]
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, "name", None)
if nm is not None:
if not isinstance(nm, list):
nm = [nm]
for name in nm:
dct[name] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this TransformGraph.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this TransformGraph.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this TransformGraph.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : `~astropy.coordinates.CoordinateTransform`
The transformation object. Typically a
`~astropy.coordinates.CoordinateTransform` object, although it may
be some other callable that is called with the same signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not isinstance(fromsys, type):
raise TypeError("fromsys must be a class")
if not isinstance(tosys, type):
raise TypeError("tosys must be a class")
if not callable(transform):
raise TypeError("transform must be callable")
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError(
f"Frame(s) {list(invalid_frames)} contain invalid attribute names:"
f" {invalid_attrs}\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
)
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or None
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or None
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or None
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError("fromsys and tosys must both be None if either are")
if transform is None:
raise ValueError("cannot give all Nones to remove_transform")
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if agraph[b] is transform:
del agraph[b]
fromsys = a
break
# If the transform was found, need to break out of the outer for loop too
if fromsys:
break
else:
raise ValueError(f"Could not find transform {transform} in the graph")
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError(
f"Current transform from {fromsys} to {tosys} is not"
f" {transform}"
)
# Remove the subgraph if it is now empty
if self._graph[fromsys] == {}:
self._graph.pop(fromsys)
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of class or None
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : float or int
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float("inf")
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, "priority") else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(getattr(agraph[b], "priority", 1))
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError(
"n2 not in heap - this should be impossible!"
)
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""Generates and returns the CompositeTransform for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `~astropy.coordinates.CompositeTransform` or None
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
A `~astropy.coordinates.CompositeTransform` is always returned, because
`~astropy.coordinates.CompositeTransform` is slightly more adaptable in
the way it can be called than other transform classes. Specifically, it
takes care of intermediate steps of transformations in a way that is
consistent with 1-hop transformations.
"""
if not isinstance(fromsys, type):
raise TypeError("fromsys is not a class")
if not isinstance(tosys, type):
raise TypeError("tosys is not a class")
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(
transforms, fromsys, tosys, register_graph=False
)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
`BaseCoordinateFrame` subclass
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(
self,
priorities=True,
addnodes=[],
savefn=None,
savelayout="plain",
saveformat=None,
color_edges=True,
):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : None or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = {
f: [k for k, v in self._cached_names.items() if v == f]
for f in self.frame_set
}
for n in nodes:
if n in invclsaliases:
aliases = "`\\n`".join(invclsaliases[n])
nodenames.append(
'{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases)
)
else:
nodenames.append(n.__name__ + "[ shape=oval ]")
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__] if color_edges else "black"
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ["digraph AstropyCoordinateTransformGraph {"]
lines.append("graph [rankdir=LR]")
lines.append("; ".join(nodenames) + ";")
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = "[ {0} {1} ]"
if priorities:
priority_part = f'label = "{weights}"'
else:
priority_part = ""
color_part = f'color = "{color}"'
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append(f"{enm1} -> {enm2}{labelstr};")
lines.append("")
lines.append("overlap=false")
lines.append("}")
dotgraph = "\n".join(lines)
if savefn is not None:
if savelayout == "plain":
with open(savefn, "w") as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append("-T" + saveformat)
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError("problem running graphviz: \n" + stderr)
with open(savefn, "w") as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <https://networkx.github.io/>`_
package installed for this to work.
Returns
-------
nxgraph : ``networkx.Graph``
This `~astropy.coordinates.TransformGraph` as a
`networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third are
``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use
`~astropy.coordinates.TransformGraph.add_transform` instead of this
decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(
func, fromsys, tosys, priority=priority, register_graph=self, **kwargs
)
return func
return deco
def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1):
"""
Add a single-step transform that encapsulates a multi-step transformation path,
using the transforms that already exist in the graph.
The created transform internally calls the existing transforms. If all of the
transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
This method is primarily useful for defining loopback transformations
(i.e., where ``fromsys`` and the final ``tosys`` are the same).
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform to.
*furthersys : class
Additional coordinate frame classes to transform to in order.
priority : number
The priority of this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Notes
-----
Even though the created transform is a single step in the graph, it
will still internally call the constituent transforms. Thus, there is
no performance benefit for using this created transform.
For Astropy's built-in frames, loopback transformations typically use
`~astropy.coordinates.ICRS` to be safe. Transforming through an inertial
frame ensures that changes in observation time and observer
location/velocity are properly accounted for.
An error will be raised if a direct transform between ``fromsys`` and
``tosys`` already exist.
"""
frames = [fromsys, tosys, *furthersys]
lastsys = frames[-1]
full_path = self.get_transform(fromsys, lastsys)
transforms = [
self.get_transform(frame_a, frame_b)
for frame_a, frame_b in zip(frames[:-1], frames[1:])
]
if None in transforms:
raise ValueError("This transformation path is not possible")
if len(full_path.transforms) == 1:
raise ValueError(
f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already"
" exists"
)
self.add_transform(
fromsys,
lastsys,
CompositeTransform(
transforms, fromsys, lastsys, priority=priority
)._as_single_transform(),
)
@contextmanager
def impose_finite_difference_dt(self, dt):
"""
Context manager to impose a finite-difference time step on all applicable transformations.
For each transformation in this transformation graph that has the attribute
``finite_difference_dt``, that attribute is set to the provided value. The only standard
transformation with this attribute is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
Parameters
----------
dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the finite difference.
If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value.
"""
key = "finite_difference_dt"
saved_settings = []
try:
for to_frames in self._graph.values():
for transform in to_frames.values():
if hasattr(transform, key):
old_setting = (transform, key, getattr(transform, key))
saved_settings.append(old_setting)
setattr(transform, key, dt)
yield
finally:
for setting in saved_settings:
setattr(*setting)
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to start from.
tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not isinstance(fromsys, type):
raise TypeError("fromsys must be a class")
if not isinstance(tosys, type):
raise TypeError("tosys must be a class")
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not isinstance(fromsys, type) or not isinstance(tosys, type):
raise TypeError("fromsys and tosys must be classes")
self.overlapping_frame_attr_names = overlap = []
if hasattr(fromsys, "frame_attributes") and hasattr(tosys, "frame_attributes"):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.frame_attributes:
if from_nm in tosys.frame_attributes:
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : `~astropy.coordinates.TransformGraph` object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary ``tosys.frame_attributes``.
Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError("func must be callable")
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (
len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds
):
raise ValueError("provided function does not accept two arguments")
self.func = func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError(
f"the transformation function yielded {res} but "
f"should have been of type {self.tosys}"
)
if fromcoord.data.differentials and not res.data.differentials:
warn(
"Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.",
AstropyWarning,
)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""Transormation based on functions using finite difference for velocities.
A coordinate transformation that works like a
`~astropy.coordinates.FunctionTransform`, but computes velocity shifts
based on the finite-difference relative to one of the frame attributes.
Note that the transform function should *not* change the differential at
all in this case, as any differentials will be overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any existing differential.
finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`~astropy.coordinates.FunctionTransform`.
"""
def __init__(
self,
func,
fromsys,
tosys,
priority=1,
register_graph=None,
finite_difference_frameattr_name="obstime",
finite_difference_dt=1 * u.second,
symmetric_finite_difference=True,
):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError(
f"Frame attribute name {value} is not a frame attribute of"
f" {self.fromsys} or {self.tosys}"
)
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import CartesianDifferential, CartesianRepresentation
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt / 2
from_diffless = fromcoord.realize_frame(
fromcoord.data.without_differentials()
)
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (
fromcoord_cart.xyz
+ fromcoord_cart.differentials["s"].d_xyz * halfdt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
backxyz = (
fromcoord_cart.xyz
- fromcoord_cart.differentials["s"].d_xyz * halfdt
)
back = supcall(
fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe
)
else:
fwdxyz = (
fromcoord_cart.xyz + fromcoord_cart.differentials["s"].d_xyz * dt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(
newdiff
)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because `~astropy.coordinates.AffineTransform`
and the matrix transform classes share the ``__call__()`` method, but
differ in how they generate the affine parameters.
`~astropy.coordinates.StaticMatrixTransform` passes in a matrix stored as a
class attribute, and both of the matrix transforms pass in ``None`` for the
offset. Hence, user subclasses would likely want to subclass this (rather
than `~astropy.coordinates.AffineTransform`) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (
CartesianDifferential,
RadialDifferential,
SphericalCosLatDifferential,
SphericalDifferential,
UnitSphericalRepresentation,
)
data = fromcoord.data
has_velocity = "s" in data.differentials
# Bail out if no transform is actually requested
if matrix is None and offset is None:
return data
# list of unit differentials
_unit_diffs = (
SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential,
)
unit_vel_diff = has_velocity and isinstance(
data.differentials["s"], _unit_diffs
)
rad_vel_diff = has_velocity and isinstance(
data.differentials["s"], RadialDifferential
)
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError(
"Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {data.__class__})"
)
elif (
has_velocity
and (unit_vel_diff or rad_vel_diff)
and offset is not None
and "s" in offset.differentials
):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError(
"Velocity information stored on coordinate frame is insufficient to do"
" a full-space velocity transformation (differential class:"
f" {data.differentials['s'].__class__})"
)
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError(
"Representation passed to AffineTransform contains multiple associated"
" differentials. Only a single differential with velocity units is"
f" presently supported (differentials: {data.differentials})."
)
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (
has_velocity
and isinstance(data, UnitSphericalRepresentation)
and not unit_vel_diff
and not rad_vel_diff
):
# retrieve just velocity differential
unit_diff = data.differentials["s"].represent_as(
data.differentials["s"]._unit_differential, data
)
data = data.with_differentials({"s": unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = {
k: diff.represent_as(CartesianDifferential, data)
for k, diff in data.differentials.items()
}
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = rep.without_differentials() + offset.without_differentials()
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials["s"] # already in Cartesian form
if offset is not None and "s" in offset.differentials:
veldiff = veldiff + offset.differentials["s"]
newrep = newrep.with_differentials({"s": veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials["s"]
_unit_cls = fromcoord.data.differentials["s"]._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = {comp: getattr(newdiff, comp) for comp in newdiff.components}
kwargs["d_distance"] = fromcoord.data.differentials["s"].d_distance
diffs = {
"s": fromcoord.data.differentials["s"].__class__(
copy=False, **kwargs
)
}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials["s"].represent_as(
fromcoord.data.differentials["s"].__class__, newrep
)
diffs = {"s": newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials["s"].__class__
newrep = newrep.represent_as(
fromcoord.data.__class__, diff_cls._dimensional_differential
)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials({"s": fromcoord.data.differentials["s"]})
return newrep
def __call__(self, fromcoord, toframe):
params = self._affine_params(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, *params)
return toframe.realize_frame(newrep)
@abstractmethod
def _affine_params(self, fromcoord, toframe):
pass
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(transform_func):
raise TypeError("transform_func is not callable")
self.transform_func = transform_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.transform_func(fromcoord, toframe)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError("Provided matrix is not 3 x 3")
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix, None
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(matrix_func):
raise TypeError("matrix_func is not callable")
self.matrix_func = matrix_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `~astropy.coordinates.CoordinateTransform` object
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `~astropy.coordinates.StaticMatrixTransform`
will be collapsed into a single transformation to speed up the
calculation.
"""
def __init__(
self,
transforms,
fromsys,
tosys,
priority=1,
register_graph=None,
collapse_static_mats=True,
):
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of StaticMatrixTransform's into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if isinstance(lasttrans, StaticMatrixTransform) and isinstance(
currtrans, StaticMatrixTransform
):
newtrans[-1] = StaticMatrixTransform(
currtrans.matrix @ lasttrans.matrix,
lasttrans.fromsys,
currtrans.tosys,
)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `toframe`, or if not there, `fromcoord`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.frame_attributes:
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutable, so copying is not needed
return curr_coord
def _as_single_transform(self):
"""
Return an encapsulated version of the composite transform so that it appears to
be a single transform.
The returned transform internally calls the constituent transforms. If all of
the transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
"""
# Create a list of the transforms including flattening any constituent CompositeTransform
transforms = [
t if not isinstance(t, CompositeTransform) else t._as_single_transform()
for t in self.transforms
]
if all(isinstance(t, BaseAffineTransform) for t in transforms):
# Check if there may be an origin shift
fixed_origin = all(
isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform))
for t in transforms
)
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return None if fixed_origin else (None, None)
# Create a merged attribute dictionary for any intermediate frames
# For any attributes shared by the "from"/"to" frames, the "to" frame takes
# precedence because this is the same choice implemented in __call__()
merged_attr = {
name: getattr(from_coo, name) for name in from_coo.frame_attributes
}
merged_attr.update(
{
name: getattr(to_frame, name)
for name in to_frame.frame_attributes
}
)
affine_params = (None, None)
# Step through each transform step (frame A -> frame B)
for i, t in enumerate(transforms):
# Extract the relevant attributes for frame A
if i == 0:
# If frame A is actually the initial frame, preserve its attributes
a_attr = {
name: getattr(from_coo, name)
for name in from_coo.frame_attributes
}
else:
a_attr = {
k: v
for k, v in merged_attr.items()
if k in t.fromsys.frame_attributes
}
# Extract the relevant attributes for frame B
b_attr = {
k: v
for k, v in merged_attr.items()
if k in t.tosys.frame_attributes
}
# Obtain the affine parameters for the transform
# Note that we insert some dummy data into frame A because the transformation
# machinery requires there to be data present. Removing that limitation
# is a possible TODO, but some care would need to be taken because some affine
# transforms have branching code depending on the presence of differentials.
next_affine_params = t._affine_params(
t.fromsys(from_coo.data, **a_attr), t.tosys(**b_attr)
)
# Combine the affine parameters with the running set
affine_params = _combine_affine_params(
affine_params, next_affine_params
)
# If there is no origin shift, return only the matrix
return affine_params[0] if fixed_origin else affine_params
# The return type depends on whether there is any origin shift
transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform
else:
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return to_frame.realize_frame(from_coo.data)
return self(from_coo, to_frame)
transform_type = FunctionTransformWithFiniteDifference
return transform_type(
single_transform, self.fromsys, self.tosys, priority=self.priority
)
def _combine_affine_params(params, next_params):
"""
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``.
"""
M, vec = params
next_M, next_vec = next_params
# Multiply the transformation matrices if they both exist
if M is not None and next_M is not None:
new_M = next_M @ M
else:
new_M = M if M is not None else next_M
if vec is not None:
# Transform the first displacement vector by the second transformation matrix
if next_M is not None:
vec = vec.transform(next_M)
# Calculate the new displacement vector
if next_vec is not None:
if "s" in vec.differentials and "s" in next_vec.differentials:
# Adding vectors with velocities takes more steps
# TODO: Add support in representation.py
new_vec_velocity = vec.differentials["s"] + next_vec.differentials["s"]
new_vec = vec.without_differentials() + next_vec.without_differentials()
new_vec = new_vec.with_differentials({"s": new_vec_velocity})
else:
new_vec = vec + next_vec
else:
new_vec = vec
else:
new_vec = next_vec
return new_M, new_vec
# map class names to colorblind-safe colors
trans_to_color = {}
trans_to_color[AffineTransform] = "#555555" # gray
trans_to_color[FunctionTransform] = "#783001" # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = "#d95f02" # red-ish
trans_to_color[StaticMatrixTransform] = "#7570b3" # blue-ish
trans_to_color[DynamicMatrixTransform] = "#1b9e77" # green-ish
|
e4e0d758b81a2cfe3c0909a56ba933d7a26e40efed89ec1edcae5c22e8c33f2d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for coordinate-related functionality.
This is generally just wrapping around the object-oriented coordinates
framework, but it is useful for some users who are used to more functional
interfaces.
"""
import warnings
from collections.abc import Sequence
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c
from astropy.io import ascii
from astropy.utils import data, isiterable
from .builtin_frames import GCRS, PrecessedGeocentric
from .builtin_frames.utils import get_jd12
from .representation import CartesianRepresentation, SphericalRepresentation
from .sky_coordinate import SkyCoord
__all__ = [
"cartesian_to_spherical",
"spherical_to_cartesian",
"get_sun",
"get_constellation",
"concatenate_representations",
"concatenate",
]
def cartesian_to_spherical(x, y, z):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
Note that the resulting angles are latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This function simply wraps functionality provided by the
`~astropy.coordinates.CartesianRepresentation` and
`~astropy.coordinates.SphericalRepresentation` classes. In general,
for both performance and readability, we suggest using these classes
directly. But for situations where a quick one-off conversion makes
sense, this function is provided.
Parameters
----------
x : scalar, array-like, or `~astropy.units.Quantity`
The first Cartesian coordinate.
y : scalar, array-like, or `~astropy.units.Quantity`
The second Cartesian coordinate.
z : scalar, array-like, or `~astropy.units.Quantity`
The third Cartesian coordinate.
Returns
-------
r : `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : `~astropy.units.Quantity` ['angle']
The latitude in radians
lon : `~astropy.units.Quantity` ['angle']
The longitude in radians
"""
if not hasattr(x, "unit"):
x = x * u.dimensionless_unscaled
if not hasattr(y, "unit"):
y = y * u.dimensionless_unscaled
if not hasattr(z, "unit"):
z = z * u.dimensionless_unscaled
cart = CartesianRepresentation(x, y, z)
sph = cart.represent_as(SphericalRepresentation)
return sph.distance, sph.lat, sph.lon
def spherical_to_cartesian(r, lat, lon):
"""
Converts spherical polar coordinates to rectangular cartesian
coordinates.
Note that the input angles should be in latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This is a low-level function used internally in
`astropy.coordinates`. It is provided for users if they really
want to use it, but it is recommended that you use the
`astropy.coordinates` coordinate systems.
Parameters
----------
r : scalar, array-like, or `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : scalar, array-like, or `~astropy.units.Quantity` ['angle']
The latitude (in radians if array or scalar)
lon : scalar, array-like, or `~astropy.units.Quantity` ['angle']
The longitude (in radians if array or scalar)
Returns
-------
x : float or array
The first cartesian coordinate.
y : float or array
The second cartesian coordinate.
z : float or array
The third cartesian coordinate.
"""
if not hasattr(r, "unit"):
r = r * u.dimensionless_unscaled
if not hasattr(lat, "unit"):
lat = lat * u.radian
if not hasattr(lon, "unit"):
lon = lon * u.radian
sph = SphericalRepresentation(distance=r, lat=lat, lon=lon)
cart = sph.represent_as(CartesianRepresentation)
return cart.x, cart.y, cart.z
def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, "tdb"))
# We have to manually do aberration because we're outputting directly into
# GCRS
earth_p = earth_pv_helio["p"]
earth_v = earth_pv_bary["v"]
# convert barycentric velocity to units of c, but keep as array for passing in to erfa
earth_v /= c.to_value(u.au / u.d)
dsun = np.sqrt(np.sum(earth_p**2, axis=-1))
invlorentz = (1 - np.sum(earth_v**2, axis=-1)) ** 0.5
properdir = erfa.ab(
earth_p / dsun.reshape(dsun.shape + (1,)), -earth_v, dsun, invlorentz
)
cartrep = CartesianRepresentation(
x=-dsun * properdir[..., 0] * u.AU,
y=-dsun * properdir[..., 1] * u.AU,
z=-dsun * properdir[..., 2] * u.AU,
)
return SkyCoord(cartrep, frame=GCRS(obstime=time))
# global dictionary that caches repeatedly-needed info for get_constellation
_constellation_data = {}
def get_constellation(coord, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate-like
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/cat/VI/42>`_.
"""
if constellation_list != "iau":
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents("data/constellation_data_roman87.dat")
ctable = ascii.read(cdata, names=["ral", "rau", "decl", "name"])
cnames = data.get_pkg_data_contents(
"data/constellation_names.dat", encoding="UTF8"
)
cnames_short_to_long = {
l[:3]: l[4:] for l in cnames.split("\n") if not l.startswith("#")
}
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable["name"]])
_constellation_data["ctable"] = ctable
_constellation_data["cnames_long"] = cnames_long
else:
ctable = _constellation_data["ctable"]
cnames_long = _constellation_data["cnames_long"]
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
# this yields a "dubious year" warning because ERFA considers the year 1875
# "dubious", probably because UTC isn't well-defined then and precession
# models aren't precisely calibrated back to then. But it's plenty
# sufficient for constellations
with warnings.catch_warnings():
warnings.simplefilter("ignore", erfa.ErfaWarning)
constel_coord = coord.transform_to(PrecessedGeocentric(equinox="B1875"))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row["ral"] < rah) & (rah < row["rau"]) & (decd > row["decl"])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError(
f"Could not find constellation for coordinates {constel_coord[notided]}"
)
if short_name:
names = ctable["name"][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
def _concatenate_components(reps_difs, names):
"""Helper function for the concatenate function below. Gets and
concatenates all of the individual components for an iterable of
representations or differentials.
"""
values = []
for name in names:
unit0 = getattr(reps_difs[0], name).unit
# Go via to_value because np.concatenate doesn't work with Quantity
data_vals = [getattr(x, name).to_value(unit0) for x in reps_difs]
concat_vals = np.concatenate(np.atleast_1d(*data_vals))
concat_vals = concat_vals << unit0
values.append(concat_vals)
return values
def concatenate_representations(reps):
"""
Combine multiple representation objects into a single instance by
concatenating the data in each component.
Currently, all of the input representations have to be the same type. This
properly handles differential or velocity data, but all input objects must
have the same differential object type as well.
Parameters
----------
reps : sequence of `~astropy.coordinates.BaseRepresentation`
The objects to concatenate
Returns
-------
rep : `~astropy.coordinates.BaseRepresentation` subclass instance
A single representation object with its data set to the concatenation of
all the elements of the input sequence of representations.
"""
if not isinstance(reps, (Sequence, np.ndarray)):
raise TypeError("Input must be a list or iterable of representation objects.")
# First, validate that the representations are the same, and
# concatenate all of the positional data:
rep_type = type(reps[0])
if any(type(r) != rep_type for r in reps):
raise TypeError("Input representations must all have the same type.")
# Construct the new representation with the concatenated data from the
# representations passed in
values = _concatenate_components(reps, rep_type.attr_classes.keys())
new_rep = rep_type(*values)
has_diff = any("s" in rep.differentials for rep in reps)
if has_diff and any("s" not in rep.differentials for rep in reps):
raise ValueError(
"Input representations must either all contain "
"differentials, or not contain differentials."
)
if has_diff:
dif_type = type(reps[0].differentials["s"])
if any(
"s" not in r.differentials or type(r.differentials["s"]) != dif_type
for r in reps
):
raise TypeError(
"All input representations must have the same differential type."
)
values = _concatenate_components(
[r.differentials["s"] for r in reps], dif_type.attr_classes.keys()
)
new_dif = dif_type(*values)
new_rep = new_rep.with_differentials({"s": new_dif})
return new_rep
def concatenate(coords):
"""
Combine multiple coordinate objects into a single
`~astropy.coordinates.SkyCoord`.
"Coordinate objects" here mean frame objects with data,
`~astropy.coordinates.SkyCoord`, or representation objects. Currently,
they must all be in the same frame, but in a future version this may be
relaxed to allow inhomogeneous sequences of objects.
Parameters
----------
coords : sequence of coordinate-like
The objects to concatenate
Returns
-------
cskycoord : SkyCoord
A single sky coordinate with its data set to the concatenation of all
the elements in ``coords``
"""
if getattr(coords, "isscalar", False) or not isiterable(coords):
raise TypeError("The argument to concatenate must be iterable")
scs = [SkyCoord(coord, copy=False) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError(
f"All inputs must have equivalent frames: {sc} != {scs[0]}"
)
# TODO: this can be changed to SkyCoord.from_representation() for a speed
# boost when we switch to using classmethods
return SkyCoord(
concatenate_representations([c.data for c in coords]), frame=scs[0].frame
)
|
d5044ac7d590f50edd0f5e6c1ac29f8521e2e788268bd1c34e18d2432fc578a8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from collections.abc import Sequence
import numpy as np
from astropy import units as u
from astropy.units import IrreducibleUnit, Unit
from .baseframe import (
BaseCoordinateFrame,
_get_diff_cls,
_get_repr_cls,
frame_transform_graph,
)
from .representation import (
BaseRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
)
"""
This module contains utility functions to make the SkyCoord initializer more modular
and maintainable. No functionality here should be in the public API, but rather used as
part of creating SkyCoord objects.
"""
PLUS_MINUS_RE = re.compile(r"(\+|\-)")
J_PREFIXED_RA_DEC_RE = re.compile(
r"""J # J prefix
([0-9]{6,7}\.?[0-9]{0,2}) # RA as HHMMSS.ss or DDDMMSS.ss, optional decimal digits
([\+\-][0-9]{6}\.?[0-9]{0,2})\s*$ # Dec as DDMMSS.ss, optional decimal digits
""",
re.VERBOSE,
)
def _get_frame_class(frame):
"""
Get a frame class from the input `frame`, which could be a frame name
string, or frame class.
"""
if isinstance(frame, str):
frame_names = frame_transform_graph.get_names()
if frame not in frame_names:
raise ValueError(
f'Coordinate frame name "{frame}" is not a known '
f"coordinate frame ({sorted(frame_names)})"
)
frame_cls = frame_transform_graph.lookup_name(frame)
elif isinstance(frame, type) and issubclass(frame, BaseCoordinateFrame):
frame_cls = frame
else:
raise ValueError(
"Coordinate frame must be a frame name or frame class, not a"
f" '{frame.__class__.__name__}'"
)
return frame_cls
_conflict_err_msg = (
"Coordinate attribute '{0}'={1!r} conflicts with keyword argument '{0}'={2!r}. This"
" usually means an attribute was set on one of the input objects and also in the "
"keyword arguments to {3}"
)
def _get_frame_without_data(args, kwargs):
"""
Determines the coordinate frame from input SkyCoord args and kwargs.
This function extracts (removes) all frame attributes from the kwargs and
determines the frame class either using the kwargs, or using the first
element in the args (if a single frame object is passed in, for example).
This function allows a frame to be specified as a string like 'icrs' or a
frame class like ICRS, or an instance ICRS(), as long as the instance frame
attributes don't conflict with kwargs passed in (which could require a
three-way merge with the coordinate data possibly specified via the args).
"""
from .sky_coordinate import SkyCoord
# We eventually (hopefully) fill and return these by extracting the frame
# and frame attributes from the input:
frame_cls = None
frame_cls_kwargs = {}
# The first place to check: the frame could be specified explicitly
frame = kwargs.pop("frame", None)
if frame is not None:
# Here the frame was explicitly passed in as a keyword argument.
# If the frame is an instance or SkyCoord, we extract the attributes
# and split the instance into the frame class and an attributes dict
if isinstance(frame, SkyCoord):
# If the frame was passed as a SkyCoord, we also want to preserve
# any extra attributes (e.g., obstime) if they are not already
# specified in the kwargs. We preserve these extra attributes by
# adding them to the kwargs dict:
for attr in frame._extra_frameattr_names:
if attr in kwargs and np.any(getattr(frame, attr) != kwargs[attr]):
# This SkyCoord attribute passed in with the frame= object
# conflicts with an attribute passed in directly to the
# SkyCoord initializer as a kwarg:
raise ValueError(
_conflict_err_msg.format(
attr, getattr(frame, attr), kwargs[attr], "SkyCoord"
)
)
else:
kwargs[attr] = getattr(frame, attr)
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
# Extract any frame attributes
for attr in frame.frame_attributes:
# If the frame was specified as an instance, we have to make
# sure that no frame attributes were specified as kwargs - this
# would require a potential three-way merge:
if attr in kwargs:
raise ValueError(
f"Cannot specify frame attribute '{attr}' directly as an"
" argument to SkyCoord because a frame instance was passed in."
" Either pass a frame class, or modify the frame attributes of"
" the input frame instance."
)
elif not frame.is_frame_attr_default(attr):
kwargs[attr] = getattr(frame, attr)
frame_cls = frame.__class__
# Make sure we propagate representation/differential _type choices,
# unless these are specified directly in the kwargs:
kwargs.setdefault("representation_type", frame.representation_type)
kwargs.setdefault("differential_type", frame.differential_type)
if frame_cls is None: # frame probably a string
frame_cls = _get_frame_class(frame)
# Check that the new frame doesn't conflict with existing coordinate frame
# if a coordinate is supplied in the args list. If the frame still had not
# been set by this point and a coordinate was supplied, then use that frame.
for arg in args:
# this catches the "single list passed in" case. For that case we want
# to allow the first argument to set the class. That's OK because
# _parse_coordinate_arg goes and checks that the frames match between
# the first and all the others
if isinstance(arg, (Sequence, np.ndarray)) and len(args) == 1 and len(arg) > 0:
arg = arg[0]
coord_frame_obj = coord_frame_cls = None
if isinstance(arg, BaseCoordinateFrame):
coord_frame_obj = arg
elif isinstance(arg, SkyCoord):
coord_frame_obj = arg.frame
if coord_frame_obj is not None:
coord_frame_cls = coord_frame_obj.__class__
frame_diff = coord_frame_obj.get_representation_cls("s")
if frame_diff is not None:
# we do this check because otherwise if there's no default
# differential (i.e. it is None), the code below chokes. but
# None still gets through if the user *requests* it
kwargs.setdefault("differential_type", frame_diff)
for attr in coord_frame_obj.frame_attributes:
if (
attr in kwargs
and not coord_frame_obj.is_frame_attr_default(attr)
and np.any(kwargs[attr] != getattr(coord_frame_obj, attr))
):
raise ValueError(
f"Frame attribute '{attr}' has conflicting values between the"
" input coordinate data and either keyword arguments or the "
"frame specification (frame=...):"
f" {getattr(coord_frame_obj, attr)} =/= {kwargs[attr]}"
)
elif attr not in kwargs and not coord_frame_obj.is_frame_attr_default(
attr
):
kwargs[attr] = getattr(coord_frame_obj, attr)
if coord_frame_cls is not None:
if frame_cls is None:
frame_cls = coord_frame_cls
elif frame_cls is not coord_frame_cls:
raise ValueError(
f"Cannot override frame='{coord_frame_cls.__name__}' of input "
f"coordinate with new frame='{frame_cls.__name__}'. Instead, "
"transform the coordinate."
)
if frame_cls is None:
from .builtin_frames import ICRS
frame_cls = ICRS
# By now, frame_cls should be set - if it's not, something went wrong
if not issubclass(frame_cls, BaseCoordinateFrame):
# We should hopefully never get here...
raise ValueError(f"Frame class has unexpected type: {frame_cls.__name__}")
for attr in frame_cls.frame_attributes:
if attr in kwargs:
frame_cls_kwargs[attr] = kwargs.pop(attr)
if "representation_type" in kwargs:
frame_cls_kwargs["representation_type"] = _get_repr_cls(
kwargs.pop("representation_type")
)
differential_type = kwargs.pop("differential_type", None)
if differential_type is not None:
frame_cls_kwargs["differential_type"] = _get_diff_cls(differential_type)
return frame_cls, frame_cls_kwargs
def _parse_coordinate_data(frame, args, kwargs):
"""
Extract coordinate data from the args and kwargs passed to SkyCoord.
By this point, we assume that all of the frame attributes have been
extracted from kwargs (see _get_frame_without_data()), so all that are left
are (1) extra SkyCoord attributes, and (2) the coordinate data, specified in
any of the valid ways.
"""
valid_skycoord_kwargs = {}
valid_components = {}
info = None
# Look through the remaining kwargs to see if any are valid attribute names
# by asking the frame transform graph:
attr_names = list(kwargs.keys())
for attr in attr_names:
if attr in frame_transform_graph.frame_attributes:
valid_skycoord_kwargs[attr] = kwargs.pop(attr)
# By this point in parsing the arguments, anything left in the args and
# kwargs should be data. Either as individual components, or a list of
# objects, or a representation, etc.
# Get units of components
units = _get_representation_component_units(args, kwargs)
# Grab any frame-specific attr names like `ra` or `l` or `distance` from
# kwargs and move them to valid_components.
valid_components.update(_get_representation_attrs(frame, units, kwargs))
# Error if anything is still left in kwargs
if kwargs:
# The next few lines add a more user-friendly error message to a
# common and confusing situation when the user specifies, e.g.,
# `pm_ra` when they really should be passing `pm_ra_cosdec`. The
# extra error should only turn on when the positional representation
# is spherical, and when the component 'pm_<lon>' is passed.
pm_message = ""
if frame.representation_type == SphericalRepresentation:
frame_names = list(frame.get_representation_component_names().keys())
lon_name = frame_names[0]
lat_name = frame_names[1]
if f"pm_{lon_name}" in list(kwargs.keys()):
pm_message = (
"\n\n By default, most frame classes expect the longitudinal proper"
" motion to include the cos(latitude) term, named"
f" `pm_{lon_name}_cos{lat_name}`. Did you mean to pass in this"
" component?"
)
raise ValueError(
"Unrecognized keyword argument(s) {}{}".format(
", ".join(f"'{key}'" for key in kwargs), pm_message
)
)
# Finally deal with the unnamed args. This figures out what the arg[0]
# is and returns a dict with appropriate key/values for initializing
# frame class. Note that differentials are *never* valid args, only
# kwargs. So they are not accounted for here (unless they're in a frame
# or SkyCoord object)
if args:
if len(args) == 1:
# One arg which must be a coordinate. In this case coord_kwargs
# will contain keys like 'ra', 'dec', 'distance' along with any
# frame attributes like equinox or obstime which were explicitly
# specified in the coordinate object (i.e. non-default).
_skycoord_kwargs, _components = _parse_coordinate_arg(
args[0], frame, units, kwargs
)
# Copy other 'info' attr only if it has actually been defined.
if "info" in getattr(args[0], "__dict__", ()):
info = args[0].info
elif len(args) <= 3:
_skycoord_kwargs = {}
_components = {}
frame_attr_names = frame.representation_component_names.keys()
repr_attr_names = frame.representation_component_names.values()
for arg, frame_attr_name, repr_attr_name, unit in zip(
args, frame_attr_names, repr_attr_names, units
):
attr_class = frame.representation_type.attr_classes[repr_attr_name]
_components[frame_attr_name] = attr_class(arg, unit=unit)
else:
raise ValueError(
f"Must supply no more than three positional arguments, got {len(args)}"
)
# The next two loops copy the component and skycoord attribute data into
# their final, respective "valid_" dictionaries. For each, we check that
# there are no relevant conflicts with values specified by the user
# through other means:
# First validate the component data
for attr, coord_value in _components.items():
if attr in valid_components:
raise ValueError(
_conflict_err_msg.format(
attr, coord_value, valid_components[attr], "SkyCoord"
)
)
valid_components[attr] = coord_value
# Now validate the custom SkyCoord attributes
for attr, value in _skycoord_kwargs.items():
if attr in valid_skycoord_kwargs and np.any(
valid_skycoord_kwargs[attr] != value
):
raise ValueError(
_conflict_err_msg.format(
attr, value, valid_skycoord_kwargs[attr], "SkyCoord"
)
)
valid_skycoord_kwargs[attr] = value
return valid_skycoord_kwargs, valid_components, info
def _get_representation_component_units(args, kwargs):
"""
Get the unit from kwargs for the *representation* components (not the
differentials).
"""
if "unit" not in kwargs:
units = [None, None, None]
else:
units = kwargs.pop("unit")
if isinstance(units, str):
units = [x.strip() for x in units.split(",")]
# Allow for input like unit='deg' or unit='m'
if len(units) == 1:
units = [units[0], units[0], units[0]]
elif isinstance(units, (Unit, IrreducibleUnit)):
units = [units, units, units]
try:
units = [(Unit(x) if x else None) for x in units]
units.extend(None for x in range(3 - len(units)))
if len(units) > 3:
raise ValueError()
except Exception as err:
raise ValueError(
"Unit keyword must have one to three unit values as "
"tuple or comma-separated string."
) from err
return units
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
from .sky_coordinate import SkyCoord
is_scalar = False # Differentiate between scalar and list input
# valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
components = {}
skycoord_kwargs = {}
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation_type.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError("Cannot initialize from a frame without coordinate data")
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (
isinstance(coords.data, UnitSphericalRepresentation)
and repr_attr_name == "distance"
):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and "s" in coords.data.differentials:
orig_vel = coords.data.differentials["s"]
vel = coords.data.represent_as(
frame.representation_type, frame.get_representation_cls("s")
).differentials["s"]
for frname, reprname in frame.get_representation_component_names(
"s"
).items():
if (
reprname == "d_distance"
and not hasattr(orig_vel, reprname)
and "unit" in orig_vel.get_name()
):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (
isinstance(coords, SkyCoord) or attr not in coords.frame_attributes
)
if use_value and value is not None:
skycoord_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and "s" in coords.differentials:
diffs = frame.get_representation_cls("s")
data = coords.represent_as(frame.representation_type, diffs)
values = [
getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names
]
for frname, reprname in frame.get_representation_component_names(
"s"
).items():
values.append(getattr(data.differentials["s"], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials["s"].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation_type)
values = [
getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names
]
elif (
isinstance(coords, np.ndarray)
and coords.dtype.kind in "if"
and coords.ndim == 2
and coords.shape[1] <= 3
):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = (
"ra" in frame.representation_component_names
and "dec" in frame.representation_component_names
)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError(
f"List of inputs don't have equivalent frames: {sc} != {scs[0]}"
)
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(
frame_attr_names, repr_attr_names
):
if allunitsphrepr and repr_attr_name == "distance":
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(
data_val.reshape((1,)) if sc.isscalar else data_val
)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (" ".join(coord1[:3]), " ".join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted({len(x) for x in vals})
except Exception as err:
raise ValueError(
"One or more elements of input sequence does not have a length."
) from err
if len(n_coords) > 1:
raise ValueError(
"Input coordinate values must have same number of elements, found"
f" {n_coords}"
)
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError(
f"Input coordinates have {n_coords} values but representation"
f" {frame.representation_type.get_name()} only accepts"
f" {n_attr_names}"
)
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError("Cannot parse coordinates from first argument")
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units
):
components[frame_attr_name] = repr_attr_class(value, unit=unit, copy=False)
except Exception as err:
raise ValueError(
f'Cannot parse first argument data "{value}" for attribute'
f" {frame_attr_name}"
) from err
return skycoord_kwargs, components
def _get_representation_attrs(frame, units, kwargs):
"""
Find instances of the "representation attributes" for specifying data
for this frame. Pop them off of kwargs, run through the appropriate class
constructor (to validate and apply unit), and put into the output
valid_kwargs. "Representation attributes" are the frame-specific aliases
for the underlying data values in the representation, e.g. "ra" for "lon"
for many equatorial spherical representations, or "w" for "x" in the
cartesian representation of Galactic.
This also gets any *differential* kwargs, because they go into the same
frame initializer later on.
"""
frame_attr_names = frame.representation_component_names.keys()
repr_attr_classes = frame.representation_type.attr_classes.values()
valid_kwargs = {}
for frame_attr_name, repr_attr_class, unit in zip(
frame_attr_names, repr_attr_classes, units
):
value = kwargs.pop(frame_attr_name, None)
if value is not None:
try:
valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit)
except u.UnitConversionError as err:
error_message = (
f"Unit '{unit}' ({unit.physical_type}) could not be applied to"
f" '{frame_attr_name}'. This can occur when passing units for some"
" coordinate components when other components are specified as"
" Quantity objects. Either pass a list of units for all components"
" (and unit-less coordinate data), or pass Quantities for all"
" components."
)
raise u.UnitConversionError(error_message) from err
# also check the differentials. They aren't included in the units keyword,
# so we only look for the names.
differential_type = frame.differential_type
if differential_type is not None:
for frame_name, repr_name in frame.get_representation_component_names(
"s"
).items():
diff_attr_class = differential_type.attr_classes[repr_name]
value = kwargs.pop(frame_name, None)
if value is not None:
valid_kwargs[frame_name] = diff_attr_class(value)
return valid_kwargs
def _parse_ra_dec(coord_str):
"""Parse RA and Dec values from a coordinate string.
Currently the following formats are supported:
* space separated 6-value format
* space separated <6-value format, this requires a plus or minus sign
separation between RA and Dec
* sign separated format
* JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
* JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
Parameters
----------
coord_str : str
Coordinate string to parse.
Returns
-------
coord : str or list of str
Parsed coordinate values.
"""
if isinstance(coord_str, str):
coord1 = coord_str.split()
else:
# This exception should never be raised from SkyCoord
raise TypeError("coord_str must be a single str")
if len(coord1) == 6:
coord = (" ".join(coord1[:3]), " ".join(coord1[3:]))
elif len(coord1) > 2:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], " ".join(coord[1:]))
elif len(coord1) == 1:
match_j = J_PREFIXED_RA_DEC_RE.match(coord_str)
if match_j:
coord = match_j.groups()
if len(coord[0].split(".")[0]) == 7:
coord = (
f"{coord[0][0:3]} {coord[0][3:5]} {coord[0][5:]}",
f"{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}",
)
else:
coord = (
f"{coord[0][0:2]} {coord[0][2:4]} {coord[0][4:]}",
f"{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}",
)
else:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], " ".join(coord[1:]))
else:
coord = coord1
return coord
|
94d1293624af2ba7836cd127d8e157157fa3afc6af0c916ea198bd15f173c1b2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Framework and base classes for coordinate frames/"low-level" coordinate
classes.
"""
# Standard library
import copy
import warnings
from collections import defaultdict, namedtuple
# Dependencies
import numpy as np
from astropy import units as u
from astropy.utils import ShapedLikeNDArray, check_broadcast
# Project
from astropy.utils.decorators import deprecated, format_doc, lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from . import representation as r
from .angles import Angle
from .attributes import Attribute
from .transformations import TransformGraph
__all__ = [
"BaseCoordinateFrame",
"frame_transform_graph",
"GenericFrame",
"RepresentationMapping",
]
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
def _get_repr_cls(value):
"""
Return a valid representation class from ``value`` or raise exception.
"""
if value in r.REPRESENTATION_CLASSES:
value = r.REPRESENTATION_CLASSES[value]
elif not isinstance(value, type) or not issubclass(value, r.BaseRepresentation):
raise ValueError(
f"Representation is {value!r} but must be a BaseRepresentation class "
f"or one of the string aliases {list(r.REPRESENTATION_CLASSES)}"
)
return value
def _get_diff_cls(value):
"""
Return a valid differential class from ``value`` or raise exception.
As originally created, this is only used in the SkyCoord initializer, so if
that is refactored, this function my no longer be necessary.
"""
if value in r.DIFFERENTIAL_CLASSES:
value = r.DIFFERENTIAL_CLASSES[value]
elif not isinstance(value, type) or not issubclass(value, r.BaseDifferential):
raise ValueError(
f"Differential is {value!r} but must be a BaseDifferential class "
f"or one of the string aliases {list(r.DIFFERENTIAL_CLASSES)}"
)
return value
def _get_repr_classes(base, **differentials):
"""Get valid representation and differential classes.
Parameters
----------
base : str or `~astropy.coordinates.BaseRepresentation` subclass
class for the representation of the base coordinates. If a string,
it is looked up among the known representation classes.
**differentials : dict of str or `~astropy.coordinates.BaseDifferentials`
Keys are like for normal differentials, i.e., 's' for a first
derivative in time, etc. If an item is set to `None`, it will be
guessed from the base class.
Returns
-------
repr_classes : dict of subclasses
The base class is keyed by 'base'; the others by the keys of
``diffferentials``.
"""
base = _get_repr_cls(base)
repr_classes = {"base": base}
for name, differential_type in differentials.items():
if differential_type == "base":
# We don't want to fail for this case.
differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None)
elif differential_type in r.DIFFERENTIAL_CLASSES:
differential_type = r.DIFFERENTIAL_CLASSES[differential_type]
elif differential_type is not None and (
not isinstance(differential_type, type)
or not issubclass(differential_type, r.BaseDifferential)
):
raise ValueError(
"Differential is {differential_type!r} but must be a BaseDifferential"
f" class or one of the string aliases {list(r.DIFFERENTIAL_CLASSES)}"
)
repr_classes[name] = differential_type
return repr_classes
_RepresentationMappingBase = namedtuple(
"RepresentationMapping", ("reprname", "framename", "defaultunit")
)
class RepresentationMapping(_RepresentationMappingBase):
"""
This `~collections.namedtuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (which is degrees
for Angles, nothing otherwise), or None (to indicate that no unit mapping
should be done).
"""
def __new__(cls, reprname, framename, defaultunit="recommended"):
# this trick just provides some defaults
return super().__new__(cls, reprname, framename, defaultunit)
base_doc = """{__doc__}
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` subclass instance
A representation object or ``None`` to have no data (or use the
coordinate component arguments, see below).
{components}
representation_type : `~astropy.coordinates.BaseRepresentation` subclass, str, optional
A representation class or string name of a representation class. This
sets the expected input representation class, thereby changing the
expected keyword arguments for the data passed in. For example, passing
``representation_type='cartesian'`` will make the classes expect
position data with cartesian names, i.e. ``x, y, z`` in most cases
unless overridden via ``frame_specific_representation_info``. To see this
frame's names, check out ``<this frame>().representation_info``.
differential_type : `~astropy.coordinates.BaseDifferential` subclass, str, dict, optional
A differential class or dictionary of differential classes (currently
only a velocity differential with key 's' is supported). This sets the
expected input differential class, thereby changing the expected keyword
arguments of the data passed in. For example, passing
``differential_type='cartesian'`` will make the classes expect velocity
data with the argument names ``v_x, v_y, v_z`` unless overridden via
``frame_specific_representation_info``. To see this frame's names,
check out ``<this frame>().representation_info``.
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
{footer}
"""
_components = """
*args, **kwargs
Coordinate components, with names that depend on the subclass.
"""
@format_doc(base_doc, components=_components, footer="")
class BaseCoordinateFrame(ShapedLikeNDArray):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `default_differential`
A subclass of `~astropy.coordinates.BaseDifferential` that will be
treated as the default differential class of this frame. This is the
differential class assumed by default when the frame is created.
* `~astropy.coordinates.Attribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
Unless overridden via `frame_specific_representation_info`, velocity name
defaults are:
* ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `~astropy.coordinates.SphericalCosLatDifferential` velocity components
* ``pm_{lon}``, ``pm_{lat}`` for `~astropy.coordinates.SphericalDifferential` velocity components
* ``radial_velocity`` for any ``d_distance`` component
* ``v_{x,y,z}`` for `~astropy.coordinates.CartesianDifferential` velocity components
where ``{lon}`` and ``{lat}`` are the frame names of the angular components.
"""
default_representation = None
default_differential = None
# Specifies special names and units for representation and differential
# attributes.
frame_specific_representation_info = {}
frame_attributes = {}
# Default empty frame_attributes dict
def __init_subclass__(cls, **kwargs):
# We first check for explicitly set values for these:
default_repr = getattr(cls, "default_representation", None)
default_diff = getattr(cls, "default_differential", None)
repr_info = getattr(cls, "frame_specific_representation_info", None)
# Then, to make sure this works for subclasses-of-subclasses, we also
# have to check for cases where the attribute names have already been
# replaced by underscore-prefaced equivalents by the logic below:
if default_repr is None or isinstance(default_repr, property):
default_repr = getattr(cls, "_default_representation", None)
if default_diff is None or isinstance(default_diff, property):
default_diff = getattr(cls, "_default_differential", None)
if repr_info is None or isinstance(repr_info, property):
repr_info = getattr(cls, "_frame_specific_representation_info", None)
repr_info = cls._infer_repr_info(repr_info)
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
cls._create_readonly_property(
"default_representation",
default_repr,
"Default representation for position data",
)
cls._create_readonly_property(
"default_differential",
default_diff,
"Default representation for differential data (e.g., velocity)",
)
cls._create_readonly_property(
"frame_specific_representation_info",
copy.deepcopy(repr_info),
"Mapping for frame-specific component names",
)
# Set the frame attributes. We first construct the attributes from
# superclasses, going in reverse order to keep insertion order,
# and then add any attributes from the frame now being defined
# (if any old definitions are overridden, this keeps the order).
# Note that we cannot simply start with the inherited frame_attributes
# since we could be a mixin between multiple coordinate frames.
# TODO: Should this be made to use readonly_prop_factory as well or
# would it be inconvenient for getting the frame_attributes from
# classes?
frame_attrs = {}
for basecls in reversed(cls.__bases__):
if issubclass(basecls, BaseCoordinateFrame):
frame_attrs.update(basecls.frame_attributes)
for k, v in cls.__dict__.items():
if isinstance(v, Attribute):
frame_attrs[k] = v
cls.frame_attributes = frame_attrs
# Deal with setting the name of the frame:
if not hasattr(cls, "name"):
cls.name = cls.__name__.lower()
elif BaseCoordinateFrame not in cls.__bases__ and cls.name in [
getattr(base, "name", None) for base in cls.__bases__
]:
# This may be a subclass of a subclass of BaseCoordinateFrame,
# like ICRS(BaseRADecFrame). In this case, cls.name will have been
# set by init_subclass
cls.name = cls.__name__.lower()
# A cache that *must be unique to each frame class* - it is
# insufficient to share them with superclasses, hence the need to put
# them in the meta
cls._frame_class_cache = {}
super().__init_subclass__(**kwargs)
# call this once here to initialize defaults
# (via FrameAttribute.__get__/convert_input)
cls.get_frame_attr_defaults()
def __init__(
self,
*args,
copy=True,
representation_type=None,
differential_type=None,
**kwargs,
):
self._attr_names_with_defaults = []
self._representation = self._infer_representation(
representation_type, differential_type
)
self._data = self._infer_data(args, copy, kwargs) # possibly None.
# Set frame attributes, if any
values = {}
for fnm, fdefault in self.get_frame_attr_defaults().items():
# Read-only frame attributes are defined as FrameAttribute
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, "_" + fnm, value)
# Validate attribute by getting it. If the instance has data,
# this also checks its shape is OK. If not, we do it below.
values[fnm] = getattr(self, fnm)
else:
setattr(self, "_" + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
if kwargs:
raise TypeError(
f"Coordinate frame {self.__class__.__name__} got unexpected "
f"keywords: {list(kwargs)}"
)
# We do ``is None`` because self._data might evaluate to false for
# empty arrays or data == 0
if self._data is None:
# No data: we still need to check that any non-scalar attributes
# have consistent shapes. Collect them for all attributes with
# size > 1 (which should be array-like and thus have a shape).
shapes = {
fnm: value.shape
for fnm, value in values.items()
if getattr(value, "shape", ())
}
if shapes:
if len(shapes) > 1:
try:
self._no_data_shape = check_broadcast(*shapes.values())
except ValueError as err:
raise ValueError(
f"non-scalar attributes with inconsistent shapes: {shapes}"
) from err
# Above, we checked that it is possible to broadcast all
# shapes. By getting and thus validating the attributes,
# we verify that the attributes can in fact be broadcast.
for fnm in shapes:
getattr(self, fnm)
else:
self._no_data_shape = shapes.popitem()[1]
else:
self._no_data_shape = ()
# The logic of this block is not related to the previous one
if self._data is not None:
# This makes the cache keys backwards-compatible, but also adds
# support for having differentials attached to the frame data
# representation object.
if "s" in self._data.differentials:
# TODO: assumes a velocity unit differential
key = (
self._data.__class__.__name__,
self._data.differentials["s"].__class__.__name__,
False,
)
else:
key = (self._data.__class__.__name__, False)
# Set up representation cache.
self.cache["representation"][key] = self._data
def _infer_representation(self, representation_type, differential_type):
if representation_type is None and differential_type is None:
return {"base": self.default_representation, "s": self.default_differential}
if representation_type is None:
representation_type = self.default_representation
if isinstance(differential_type, type) and issubclass(
differential_type, r.BaseDifferential
):
# TODO: assumes the differential class is for the velocity
# differential
differential_type = {"s": differential_type}
elif isinstance(differential_type, str):
# TODO: assumes the differential class is for the velocity
# differential
diff_cls = r.DIFFERENTIAL_CLASSES[differential_type]
differential_type = {"s": diff_cls}
elif differential_type is None:
if representation_type == self.default_representation:
differential_type = {"s": self.default_differential}
else:
differential_type = {"s": "base"} # see set_representation_cls()
return _get_repr_classes(representation_type, **differential_type)
def _infer_data(self, args, copy, kwargs):
# if not set below, this is a frame with no data
representation_data = None
differential_data = None
args = list(args) # need to be able to pop them
if args and (isinstance(args[0], r.BaseRepresentation) or args[0] is None):
representation_data = args.pop(0) # This can still be None
if len(args) > 0:
raise TypeError(
"Cannot create a frame with both a representation object "
"and other positional arguments"
)
if representation_data is not None:
diffs = representation_data.differentials
differential_data = diffs.get("s", None)
if (differential_data is None and len(diffs) > 0) or (
differential_data is not None and len(diffs) > 1
):
raise ValueError(
"Multiple differentials are associated with the representation"
" object passed in to the frame initializer. Only a single"
f" velocity differential is supported. Got: {diffs}"
)
else:
representation_cls = self.get_representation_cls()
# Get any representation data passed in to the frame initializer
# using keyword or positional arguments for the component names
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
# first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
# special-case the Spherical->UnitSpherical if no `distance`
if repr_kwargs:
# TODO: determine how to get rid of the part before the "try" -
# currently removing it has a performance regression for
# unitspherical because of the try-related overhead.
# Also frames have no way to indicate what the "distance" is
if repr_kwargs.get("distance", True) is None:
del repr_kwargs["distance"]
if (
issubclass(representation_cls, r.SphericalRepresentation)
and "distance" not in repr_kwargs
):
representation_cls = representation_cls._unit_representation
try:
representation_data = representation_cls(copy=copy, **repr_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
try:
representation_data = representation_cls._unit_representation(
copy=copy, **repr_kwargs
)
except Exception:
msg = str(e)
names = self.get_representation_component_names()
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace("__init__()", f"{self.__class__.__name__}()")
e.args = (msg,)
raise e
# Now we handle the Differential data:
# Get any differential data passed in to the frame initializer
# using keyword or positional arguments for the component names
differential_cls = self.get_representation_cls("s")
diff_component_names = self.get_representation_component_names("s")
diff_kwargs = {}
for nmkw, nmrep in diff_component_names.items():
if len(args) > 0:
# first gather up positional args
diff_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
diff_kwargs[nmrep] = kwargs.pop(nmkw)
if diff_kwargs:
if (
hasattr(differential_cls, "_unit_differential")
and "d_distance" not in diff_kwargs
):
differential_cls = differential_cls._unit_differential
elif len(diff_kwargs) == 1 and "d_distance" in diff_kwargs:
differential_cls = r.RadialDifferential
try:
differential_data = differential_cls(copy=copy, **diff_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
msg = str(e)
names = self.get_representation_component_names("s")
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace("__init__()", f"{self.__class__.__name__}()")
e.args = (msg,)
raise
if len(args) > 0:
raise TypeError(
"{}.__init__ had {} remaining unhandled arguments".format(
self.__class__.__name__, len(args)
)
)
if representation_data is None and differential_data is not None:
raise ValueError(
"Cannot pass in differential component data "
"without positional (representation) data."
)
if differential_data:
# Check that differential data provided has units compatible
# with time-derivative of representation data.
# NOTE: there is no dimensionless time while lengths can be
# dimensionless (u.dimensionless_unscaled).
for comp in representation_data.components:
if (diff_comp := f"d_{comp}") in differential_data.components:
current_repr_unit = representation_data._units[comp]
current_diff_unit = differential_data._units[diff_comp]
expected_unit = current_repr_unit / u.s
if not current_diff_unit.is_equivalent(expected_unit):
for (
key,
val,
) in self.get_representation_component_names().items():
if val == comp:
current_repr_name = key
break
for key, val in self.get_representation_component_names(
"s"
).items():
if val == diff_comp:
current_diff_name = key
break
raise ValueError(
f'{current_repr_name} has unit "{current_repr_unit}" with'
f' physical type "{current_repr_unit.physical_type}", but'
f" {current_diff_name} has incompatible unit"
f' "{current_diff_unit}" with physical type'
f' "{current_diff_unit.physical_type}" instead of the'
f' expected "{(expected_unit).physical_type}".'
)
representation_data = representation_data.with_differentials(
{"s": differential_data}
)
return representation_data
@classmethod
def _infer_repr_info(cls, repr_info):
# Unless overridden via `frame_specific_representation_info`, velocity
# name defaults are (see also docstring for BaseCoordinateFrame):
# * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for
# `SphericalCosLatDifferential` proper motion components
# * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper
# motion components
# * ``radial_velocity`` for any `d_distance` component
# * ``v_{x,y,z}`` for `CartesianDifferential` velocity components
# where `{lon}` and `{lat}` are the frame names of the angular
# components.
if repr_info is None:
repr_info = {}
# the tuple() call below is necessary because if it is not there,
# the iteration proceeds in a difficult-to-predict manner in the
# case that one of the class objects hash is such that it gets
# revisited by the iteration. The tuple() call prevents this by
# making the items iterated over fixed regardless of how the dict
# changes
for cls_or_name in tuple(repr_info.keys()):
if isinstance(cls_or_name, str):
# TODO: this provides a layer of backwards compatibility in
# case the key is a string, but now we want explicit classes.
_cls = _get_repr_cls(cls_or_name)
repr_info[_cls] = repr_info.pop(cls_or_name)
# The default spherical names are 'lon' and 'lat'
repr_info.setdefault(
r.SphericalRepresentation,
[RepresentationMapping("lon", "lon"), RepresentationMapping("lat", "lat")],
)
sph_component_map = {
m.reprname: m.framename for m in repr_info[r.SphericalRepresentation]
}
repr_info.setdefault(
r.SphericalCosLatDifferential,
[
RepresentationMapping(
"d_lon_coslat",
"pm_{lon}_cos{lat}".format(**sph_component_map),
u.mas / u.yr,
),
RepresentationMapping(
"d_lat", "pm_{lat}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping("d_distance", "radial_velocity", u.km / u.s),
],
)
repr_info.setdefault(
r.SphericalDifferential,
[
RepresentationMapping(
"d_lon", "pm_{lon}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping(
"d_lat", "pm_{lat}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping("d_distance", "radial_velocity", u.km / u.s),
],
)
repr_info.setdefault(
r.CartesianDifferential,
[
RepresentationMapping("d_x", "v_x", u.km / u.s),
RepresentationMapping("d_y", "v_y", u.km / u.s),
RepresentationMapping("d_z", "v_z", u.km / u.s),
],
)
# Unit* classes should follow the same naming conventions
# TODO: this adds some unnecessary mappings for the Unit classes, so
# this could be cleaned up, but in practice doesn't seem to have any
# negative side effects
repr_info.setdefault(
r.UnitSphericalRepresentation, repr_info[r.SphericalRepresentation]
)
repr_info.setdefault(
r.UnitSphericalCosLatDifferential, repr_info[r.SphericalCosLatDifferential]
)
repr_info.setdefault(
r.UnitSphericalDifferential, repr_info[r.SphericalDifferential]
)
return repr_info
@classmethod
def _create_readonly_property(cls, attr_name, value, doc=None):
private_attr = "_" + attr_name
def getter(self):
return getattr(self, private_attr)
setattr(cls, private_attr, value)
setattr(cls, attr_name, property(getter, doc=doc))
@lazyproperty
def cache(self):
"""Cache for this frame, a dict.
It stores anything that should be computed from the coordinate data (*not* from
the frame attributes). This can be used in functions to store anything that
might be expensive to compute but might be re-used by some other function.
E.g.::
if 'user_data' in myframe.cache:
data = myframe.cache['user_data']
else:
myframe.cache['user_data'] = data = expensive_func(myframe.lat)
If in-place modifications are made to the frame data, the cache should
be cleared::
myframe.cache.clear()
"""
return defaultdict(dict)
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data, an
`ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError(
f'The frame object "{self!r}" does not have associated data'
)
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
@property
def shape(self):
return self.data.shape if self.has_data else self._no_data_shape
# We have to override the ShapedLikeNDArray definitions, since our shape
# does not have to be that of the data.
def __len__(self):
return len(self.data)
def __bool__(self):
return self.has_data and self.size > 0
@property
def size(self):
return self.data.size
@property
def isscalar(self):
return self.has_data and self.data.isscalar
@classmethod
def get_frame_attr_defaults(cls):
"""Return a dict with the defaults for each frame attribute."""
return {name: getattr(cls, name) for name in cls.frame_attributes}
@deprecated(
"5.2",
alternative="get_frame_attr_defaults",
message=(
"The {func}() {obj_type} is deprecated and may be removed in a future"
" version. Use {alternative}() to obtain a dict of frame attribute names"
" and default values."
" The fastest way to obtain the names is frame_attributes.keys()"
),
)
@classmethod
def get_frame_attr_names(cls):
"""Return a dict with the defaults for each frame attribute."""
return cls.get_frame_attr_defaults()
def get_representation_cls(self, which="base"):
"""The class used for part of this frame's data.
Parameters
----------
which : ('base', 's', `None`)
The class of which part to return. 'base' means the class used to
represent the coordinates; 's' the first derivative to time, i.e.,
the class representing the proper motion and/or radial velocity.
If `None`, return a dict with both.
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`.
"""
if which is not None:
return self._representation[which]
else:
return self._representation
def set_representation_cls(self, base=None, s="base"):
"""Set representation and/or differential class for this frame's data.
Parameters
----------
base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional
The name or subclass to use to represent the coordinate data.
s : `~astropy.coordinates.BaseDifferential` subclass, optional
The differential subclass to use to represent any velocities,
such as proper motion and radial velocity. If equal to 'base',
which is the default, it will be inferred from the representation.
If `None`, the representation will drop any differentials.
"""
if base is None:
base = self._representation["base"]
self._representation = _get_repr_classes(base=base, s=s)
representation_type = property(
fget=get_representation_cls,
fset=set_representation_cls,
doc="""The representation class used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseRepresentation`.
Can also be *set* using the string name of the representation. If you
wish to set an explicit differential class (rather than have it be
inferred), use the ``set_representation_cls`` method.
""",
)
@property
def differential_type(self):
"""
The differential used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseDifferential`.
For simultaneous setting of representation and differentials, see the
``set_representation_cls`` method.
"""
return self.get_representation_cls("s")
@differential_type.setter
def differential_type(self, value):
self.set_representation_cls(s=value)
@classmethod
def _get_representation_info(cls):
# This exists as a class method only to support handling frame inputs
# without units, which are deprecated and will be removed. This can be
# moved into the representation_info property at that time.
# note that if so moved, the cache should be acceessed as
# self.__class__._frame_class_cache
if (
cls._frame_class_cache.get("last_reprdiff_hash", None)
!= r.get_reprdiff_cls_hash()
):
repr_attrs = {}
for repr_diff_cls in list(r.REPRESENTATION_CLASSES.values()) + list(
r.DIFFERENTIAL_CLASSES.values()
):
repr_attrs[repr_diff_cls] = {"names": [], "units": []}
for c, c_cls in repr_diff_cls.attr_classes.items():
repr_attrs[repr_diff_cls]["names"].append(c)
rec_unit = u.deg if issubclass(c_cls, Angle) else None
repr_attrs[repr_diff_cls]["units"].append(rec_unit)
for (
repr_diff_cls,
mappings,
) in cls._frame_specific_representation_info.items():
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_diff_cls]["names"]
uns = repr_attrs[repr_diff_cls]["units"]
comptomap = {m.reprname: m for m in mappings}
for i, c in enumerate(repr_diff_cls.attr_classes.keys()):
if c in comptomap:
mapp = comptomap[c]
nms[i] = mapp.framename
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (
isinstance(mapp.defaultunit, str)
and mapp.defaultunit == "recommended"
):
uns[i] = mapp.defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_diff_cls]["names"] = tuple(nms)
repr_attrs[repr_diff_cls]["units"] = tuple(uns)
cls._frame_class_cache["representation_info"] = repr_attrs
cls._frame_class_cache["last_reprdiff_hash"] = r.get_reprdiff_cls_hash()
return cls._frame_class_cache["representation_info"]
@lazyproperty
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
return self._get_representation_info()
def get_representation_component_names(self, which="base"):
out = {}
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
data_names = repr_or_diff_cls.attr_classes.keys()
repr_names = self.representation_info[repr_or_diff_cls]["names"]
for repr_name, data_name in zip(repr_names, data_names):
out[repr_name] = data_name
return out
def get_representation_component_units(self, which="base"):
out = {}
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
repr_attrs = self.representation_info[repr_or_diff_cls]
repr_names = repr_attrs["names"]
repr_units = repr_attrs["units"]
for repr_name, repr_unit in zip(repr_names, repr_units):
if repr_unit:
out[repr_name] = repr_unit
return out
representation_component_names = property(get_representation_component_names)
representation_component_units = property(get_representation_component_units)
def _replicate(self, data, copy=False, **kwargs):
"""Base for replicating a frame, with possibly different attributes.
Produces a new instance of the frame using the attributes of the old
frame (unless overridden) and with the data given.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` or None
Data to use in the new frame instance. If `None`, it will be
a data-less frame.
copy : bool, optional
Whether data and the attributes on the old frame should be copied
(default), or passed on by reference.
**kwargs
Any attributes that should be overridden.
"""
# This is to provide a slightly nicer error message if the user tries
# to use frame_obj.representation instead of frame_obj.data to get the
# underlying representation object [e.g., #2890]
if isinstance(data, type):
raise TypeError(
"Class passed as data instead of a representation instance. If you"
" called frame.representation, this returns the representation class."
" frame.data returns the instantiated object - you may want to use"
" this instead."
)
if copy and data is not None:
data = data.copy()
for attr in self.frame_attributes:
if attr not in self._attr_names_with_defaults and attr not in kwargs:
value = getattr(self, attr)
if copy:
value = value.copy()
kwargs[attr] = value
return self.__class__(data, copy=False, **kwargs)
def replicate(self, copy=False, **kwargs):
"""
Return a replica of the frame, optionally with new frame attributes.
The replica is a new frame object that has the same data as this frame
object and with frame attributes overridden if they are provided as extra
keyword arguments to this method. If ``copy`` is set to `True` then a
copy of the internal arrays will be made. Otherwise the replica will
use a reference to the original arrays when possible to save memory. The
internal arrays are normally not changeable by the user so in most cases
it should not be necessary to set ``copy`` to `True`.
Parameters
----------
copy : bool, optional
If True, the resulting object is a copy of the data. When False,
references are used where possible. This rule also applies to the
frame attributes.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
Replica of this object, but possibly with new frame attributes.
"""
return self._replicate(self.data, copy=copy, **kwargs)
def replicate_without_data(self, copy=False, **kwargs):
"""
Return a replica without data, optionally with new frame attributes.
The replica is a new frame object without data but with the same frame
attributes as this object, except where overridden by extra keyword
arguments to this method. The ``copy`` keyword determines if the frame
attributes are truly copied vs being references (which saves memory for
cases where frame attributes are large).
This method is essentially the converse of `realize_frame`.
Parameters
----------
copy : bool, optional
If True, the resulting object has copies of the frame attributes.
When False, references are used where possible.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
Replica of this object, but without data and possibly with new frame
attributes.
"""
return self._replicate(None, copy=copy, **kwargs)
def realize_frame(self, data, **kwargs):
"""
Generates a new frame with new data from another frame (which may or
may not have data). Roughly speaking, the converse of
`replicate_without_data`.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation`
The representation to use as the data for the new frame.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object. In particular, `representation_type` can be specified.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
A new object in *this* frame, with the same frame attributes as
this one, but with the ``data`` as the coordinate data.
"""
return self._replicate(data, **kwargs)
def represent_as(self, base, s="base", in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation, or
use the ``set_representation_cls`` method to also set the differential.
Parameters
----------
base : subclass of BaseRepresentation or string
The type of representation to generate. Must be a *class*
(not an instance), or the string name of the representation
class.
s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional
Class in which any velocities should be represented. Must be
a *class* (not an instance), or the string name of the
differential class. If equal to 'base' (default), inferred from
the base class. If `None`, all velocity information is dropped.
in_frame_units : bool, keyword-only
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP
<CartesianRepresentation (x, y, z) [dimensionless]
(1., 0., 0.)>
>>> coord.representation_type = CartesianRepresentation
>>> coord # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1., 0., 0.)>
"""
# For backwards compatibility (because in_frame_units used to be the
# 2nd argument), we check to see if `new_differential` is a boolean. If
# it is, we ignore the value of `new_differential` and warn about the
# position change
if isinstance(s, bool):
warnings.warn(
"The argument position for `in_frame_units` in `represent_as` has"
" changed. Use as a keyword argument if needed.",
AstropyWarning,
)
in_frame_units = s
s = "base"
# In the future, we may want to support more differentials, in which
# case one probably needs to define **kwargs above and use it here.
# But for now, we only care about the velocity.
repr_classes = _get_repr_classes(base=base, s=s)
representation_cls = repr_classes["base"]
# We only keep velocity information
if "s" in self.data.differentials:
# For the default 'base' option in which _get_repr_classes has
# given us a best guess based on the representation class, we only
# use it if the class we had already is incompatible.
if s == "base" and (
self.data.differentials["s"].__class__
in representation_cls._compatible_differentials
):
differential_cls = self.data.differentials["s"].__class__
else:
differential_cls = repr_classes["s"]
elif s is None or s == "base":
differential_cls = None
else:
raise TypeError(
"Frame data has no associated differentials (i.e. the frame has no"
" velocity data) - represent_as() only accepts a new representation."
)
if differential_cls:
cache_key = (
representation_cls.__name__,
differential_cls.__name__,
in_frame_units,
)
else:
cache_key = (representation_cls.__name__, in_frame_units)
cached_repr = self.cache["representation"].get(cache_key)
if not cached_repr:
if differential_cls:
# Sanity check to ensure we do not just drop radial
# velocity. TODO: should Representation.represent_as
# allow this transformation in the first place?
if (
isinstance(self.data, r.UnitSphericalRepresentation)
and issubclass(representation_cls, r.CartesianRepresentation)
and not isinstance(
self.data.differentials["s"],
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
)
):
raise u.UnitConversionError(
"need a distance to retrieve a cartesian representation "
"when both radial velocity and proper motion are present, "
"since otherwise the units cannot match."
)
# TODO NOTE: only supports a single differential
data = self.data.represent_as(representation_cls, differential_cls)
diff = data.differentials["s"] # TODO: assumes velocity
else:
data = self.data.represent_as(representation_cls)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
new_attrs = self.representation_info.get(representation_cls)
if new_attrs and in_frame_units:
datakwargs = {comp: getattr(data, comp) for comp in data.components}
for comp, new_attr_unit in zip(data.components, new_attrs["units"]):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(copy=False, **datakwargs)
if differential_cls:
# the original differential
data_diff = self.data.differentials["s"]
# If the new differential is known to this frame and has a
# defined set of names and units, then use that.
new_attrs = self.representation_info.get(differential_cls)
if new_attrs and in_frame_units:
diffkwargs = {comp: getattr(diff, comp) for comp in diff.components}
for comp, new_attr_unit in zip(diff.components, new_attrs["units"]):
# Some special-casing to treat a situation where the
# input data has a UnitSphericalDifferential or a
# RadialDifferential. It is re-represented to the
# frame's differential class (which might be, e.g., a
# dimensional Differential), so we don't want to try to
# convert the empty component units
if (
isinstance(
data_diff,
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
),
)
and comp not in data_diff.__class__.attr_classes
):
continue
elif (
isinstance(data_diff, r.RadialDifferential)
and comp not in data_diff.__class__.attr_classes
):
continue
# Try to convert to requested units. Since that might
# not be possible (e.g., for a coordinate with proper
# motion but without distance, one cannot convert to a
# cartesian differential in km/s), we allow the unit
# conversion to fail. See gh-7028 for discussion.
if new_attr_unit and hasattr(diff, comp):
try:
diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit)
except Exception:
pass
diff = diff.__class__(copy=False, **diffkwargs)
# Here we have to bypass using with_differentials() because
# it has a validation check. But because
# .representation_type and .differential_type don't point to
# the original classes, if the input differential is a
# RadialDifferential, it usually gets turned into a
# SphericalCosLatDifferential (or whatever the default is)
# with strange units for the d_lon and d_lat attributes.
# This then causes the dictionary key check to fail (i.e.
# comparison against `diff._get_deriv_key()`)
data._differentials.update({"s": diff})
self.cache["representation"][cache_key] = data
return self.cache["representation"][cache_key]
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : coordinate-like or `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The frame to transform this coordinate frame into.
The frame class option is deprecated.
Returns
-------
transframe : coordinate-like
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
from .errors import ConvertError
if self._data is None:
raise ValueError("Cannot transform a frame with no data")
if (
getattr(self.data, "differentials", None)
and hasattr(self, "obstime")
and hasattr(new_frame, "obstime")
and np.any(self.obstime != new_frame.obstime)
):
raise NotImplementedError(
"You cannot transform a frame that has velocities to another frame at a"
" different obstime. If you think this should (or should not) be"
" possible, please comment at"
" https://github.com/astropy/astropy/issues/6280"
)
if isinstance(new_frame, type):
warnings.warn(
"Transforming a frame instance to a frame class (as opposed to another "
"frame instance) will not be supported in the future. Either "
"explicitly instantiate the target frame, or first convert the source "
"frame instance to a `astropy.coordinates.SkyCoord` and use its "
"`transform_to()` method.",
AstropyDeprecationWarning,
)
# Use the default frame attributes for this class
new_frame = new_frame()
if hasattr(new_frame, "_sky_coord_frame"):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__, new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = "Cannot transform from {0} to {1}"
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass or instance
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if isinstance(new_frame, type) else type(new_frame)
trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls)
if trans is None:
if new_frame_cls is self.__class__:
return "same"
else:
return False
else:
return True
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
@staticmethod
def _frameattr_equiv(left_fattr, right_fattr):
"""
Determine if two frame attributes are equivalent. Implemented as a
staticmethod mainly as a convenient location, although conceivable it
might be desirable for subclasses to override this behavior.
Primary purpose is to check for equality of representations. This
aspect can actually be simplified/removed now that representations have
equality defined.
Secondary purpose is to check for equality of coordinate attributes,
which first checks whether they themselves are in equivalent frames
before checking for equality in the normal fashion. This is because
checking for equality with non-equivalent frames raises an error.
"""
if left_fattr is right_fattr:
# shortcut if it's exactly the same object
return True
elif left_fattr is None or right_fattr is None:
# shortcut if one attribute is unspecified and the other isn't
return False
left_is_repr = isinstance(left_fattr, r.BaseRepresentationOrDifferential)
right_is_repr = isinstance(right_fattr, r.BaseRepresentationOrDifferential)
if left_is_repr and right_is_repr:
# both are representations.
if getattr(left_fattr, "differentials", False) or getattr(
right_fattr, "differentials", False
):
warnings.warn(
"Two representation frame attributes were checked for equivalence"
" when at least one of them has differentials. This yields False"
" even if the underlying representations are equivalent (although"
" this may change in future versions of Astropy)",
AstropyWarning,
)
return False
if isinstance(right_fattr, left_fattr.__class__):
# if same representation type, compare components.
return np.all(
[
(getattr(left_fattr, comp) == getattr(right_fattr, comp))
for comp in left_fattr.components
]
)
else:
# convert to cartesian and see if they match
return np.all(
left_fattr.to_cartesian().xyz == right_fattr.to_cartesian().xyz
)
elif left_is_repr or right_is_repr:
return False
left_is_coord = isinstance(left_fattr, BaseCoordinateFrame)
right_is_coord = isinstance(right_fattr, BaseCoordinateFrame)
if left_is_coord and right_is_coord:
# both are coordinates
if left_fattr.is_equivalent_frame(right_fattr):
return np.all(left_fattr == right_fattr)
else:
return False
elif left_is_coord or right_is_coord:
return False
return np.all(left_fattr == right_fattr)
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : :class:`~astropy.coordinates.BaseCoordinateFrame`
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `~astropy.coordinates.BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
for frame_attr_name in self.frame_attributes:
if not self._frameattr_equiv(
getattr(self, frame_attr_name), getattr(other, frame_attr_name)
):
return False
return True
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't a frame"
)
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = f" ({frameattrs})"
if data_repr:
return f"<{self.__class__.__name__} Coordinate{frameattrs}: {data_repr}>"
else:
return f"<{self.__class__.__name__} Frame{frameattrs}>"
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ""
if self.representation_type:
if hasattr(self.representation_type, "_unit_representation") and isinstance(
self.data, self.representation_type._unit_representation
):
rep_cls = self.data.__class__
else:
rep_cls = self.representation_type
if "s" in self.data.differentials:
dif_cls = self.get_representation_cls("s")
dif_data = self.data.differentials["s"]
if isinstance(
dif_data,
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
):
dif_cls = dif_data.__class__
else:
dif_cls = None
data = self.represent_as(rep_cls, dif_cls, in_frame_units=True)
data_repr = repr(data)
# Generate the list of component names out of the repr string
part1, _, remainder = data_repr.partition("(")
if remainder != "":
comp_str, _, part2 = remainder.partition(")")
comp_names = comp_str.split(", ")
# Swap in frame-specific component names
invnames = {
nmrepr: nmpref
for nmpref, nmrepr in self.representation_component_names.items()
}
for i, name in enumerate(comp_names):
comp_names[i] = invnames.get(name, name)
# Reassemble the repr string
data_repr = part1 + "(" + ", ".join(comp_names) + ")" + part2
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith("<" + data.__class__.__name__):
# remove both the leading "<" and the space after the name, as well
# as the trailing ">"
data_repr = data_repr[(len(data.__class__.__name__) + 2) : -1]
else:
data_repr = "Data:\n" + data_repr
if "s" in self.data.differentials:
data_repr_spl = data_repr.split("\n")
if "has differentials" in data_repr_spl[-1]:
diffrepr = repr(data.differentials["s"]).split("\n")
if diffrepr[0].startswith("<"):
diffrepr[0] = " " + " ".join(diffrepr[0].split(" ")[1:])
for frm_nm, rep_nm in self.get_representation_component_names(
"s"
).items():
diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm)
if diffrepr[-1].endswith(">"):
diffrepr[-1] = diffrepr[-1][:-1]
data_repr_spl[-1] = "\n".join(diffrepr)
data_repr = "\n".join(data_repr_spl)
return data_repr
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
attr_strs = []
for attribute_name in self.frame_attributes:
attr = getattr(self, attribute_name)
# Check to see if this object has a way of representing itself
# specific to being an attribute of a frame. (Note, this is not the
# Attribute class, it's the actual object).
if hasattr(attr, "_astropy_repr_in_frame"):
attrstr = attr._astropy_repr_in_frame()
else:
attrstr = str(attr)
attr_strs.append(f"{attribute_name}={attrstr}")
return ", ".join(attr_strs)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
new = super().__new__(self.__class__)
if hasattr(self, "_representation"):
new._representation = self._representation.copy()
new._attr_names_with_defaults = self._attr_names_with_defaults.copy()
for attr in self.frame_attributes:
_attr = "_" + attr
if attr in self._attr_names_with_defaults:
setattr(new, _attr, getattr(self, _attr))
else:
value = getattr(self, _attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, _attr, value)
if self.has_data:
new._data = apply_method(self.data)
else:
new._data = None
shapes = [
getattr(new, "_" + attr).shape
for attr in new.frame_attributes
if (
attr not in new._attr_names_with_defaults
and getattr(getattr(new, "_" + attr), "shape", ())
)
]
if shapes:
new._no_data_shape = (
check_broadcast(*shapes) if len(shapes) > 1 else shapes[0]
)
else:
new._no_data_shape = ()
return new
def __setitem__(self, item, value):
if self.__class__ is not value.__class__:
raise TypeError(
f"can only set from object of same class: {self.__class__.__name__} vs."
f" {value.__class__.__name__}"
)
if not self.is_equivalent_frame(value):
raise ValueError("can only set frame item from an equivalent frame")
if value._data is None:
raise ValueError("can only set frame with value that has data")
if self._data is None:
raise ValueError("cannot set frame which has no data")
if self.shape == ():
raise TypeError(
f"scalar '{self.__class__.__name__}' frame object "
"does not support item assignment"
)
if self._data is None:
raise ValueError("can only set frame if it has data")
if self._data.__class__ is not value._data.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self._data.__class__.__name__} vs. {value._data.__class__.__name__}"
)
if self._data._differentials:
# Can this ever occur? (Same class but different differential keys).
# This exception is not tested since it is not clear how to generate it.
if self._data._differentials.keys() != value._data._differentials.keys():
raise ValueError("setitem value must have same differentials")
for key, self_diff in self._data._differentials.items():
if self_diff.__class__ is not value._data._differentials[key].__class__:
raise TypeError(
"can only set from object of same class: "
f"{self_diff.__class__.__name__} vs. "
f"{value._data._differentials[key].__class__.__name__}"
)
# Set representation data
self._data[item] = value._data
# Frame attributes required to be identical by is_equivalent_frame,
# no need to set them here.
self.cache.clear()
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
return sorted(
set(super().__dir__())
| set(self.representation_component_names)
| set(self.get_representation_component_names("s"))
)
def __getattr__(self, attr):
"""
Allow access to attributes on the representation and differential as
found via ``self.get_representation_component_names``.
TODO: We should handle dynamic representation transforms here (e.g.,
`.cylindrical`) instead of defining properties as below.
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if attr.startswith("_"):
return self.__getattribute__(attr) # Raise AttributeError.
repr_names = self.representation_component_names
if attr in repr_names:
if self._data is None:
# this raises the "no data" error by design - doing it this way means we
# don't have to replicate the error message here.
self.data # noqa: B018
rep = self.represent_as(self.representation_type, in_frame_units=True)
val = getattr(rep, repr_names[attr])
return val
diff_names = self.get_representation_component_names("s")
if attr in diff_names:
if self._data is None:
self.data # noqa: B018 # see above.
# TODO: this doesn't work for the case when there is only
# unitspherical information. The differential_type gets set to the
# default_differential, which expects full information, so the
# units don't work out
rep = self.represent_as(
in_frame_units=True, **self.get_representation_cls(None)
)
val = getattr(rep.differentials["s"], diff_names[attr])
return val
return self.__getattribute__(attr) # Raise AttributeError.
def __setattr__(self, attr, value):
# Don't slow down access of private attributes!
if not attr.startswith("_"):
if hasattr(self, "representation_info"):
repr_attr_names = set()
for representation_attr in self.representation_info.values():
repr_attr_names.update(representation_attr["names"])
if attr in repr_attr_names:
raise AttributeError(f"Cannot set any frame attribute {attr}")
super().__setattr__(attr, value)
def __eq__(self, value):
"""Equality operator for frame.
This implements strict equality and requires that the frames are
equivalent and that the representation data are exactly equal.
"""
if not isinstance(value, BaseCoordinateFrame):
return NotImplemented
is_equiv = self.is_equivalent_frame(value)
if self._data is None and value._data is None:
# For Frame with no data, == compare is same as is_equivalent_frame()
return is_equiv
if not is_equiv:
raise TypeError(
"cannot compare: objects must have equivalent frames: "
f"{self.replicate_without_data()} vs. {value.replicate_without_data()}"
)
if (value._data is None) != (self._data is None):
raise ValueError(
"cannot compare: one frame has data and the other does not"
)
return self._data == value._data
def __ne__(self, value):
return np.logical_not(self == value)
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from .angle_utilities import angular_separation
from .angles import Angle
self_unit_sph = self.represent_as(r.UnitSphericalRepresentation)
other_transformed = other.transform_to(self)
other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation)
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(
self_unit_sph.lon, self_unit_sph.lat, other_unit_sph.lon, other_unit_sph.lat
)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
from .distances import Distance
if issubclass(self.data.__class__, r.UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
# do this first just in case the conversion somehow creates a distance
other_in_self_system = other.transform_to(self)
if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
# drop the differentials to ensure they don't do anything odd in the
# subtraction
self_car = self.data.without_differentials().represent_as(
r.CartesianRepresentation
)
other_car = other_in_self_system.data.without_differentials().represent_as(
r.CartesianRepresentation
)
dist = (self_car - other_car).norm()
if dist.unit == u.one:
return dist
else:
return Distance(dist)
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("cartesian", in_frame_units=True)
@property
def cylindrical(self):
"""
Shorthand for a cylindrical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("cylindrical", in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("spherical", in_frame_units=True)
@property
def sphericalcoslat(self):
"""
Shorthand for a spherical representation of the positional data and a
`~astropy.coordinates.SphericalCosLatDifferential` for the velocity
data in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("spherical", "sphericalcoslat", in_frame_units=True)
@property
def velocity(self):
"""
Shorthand for retrieving the Cartesian space-motion as a
`~astropy.coordinates.CartesianDifferential` object.
This is equivalent to calling ``self.cartesian.differentials['s']``.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
return self.cartesian.differentials["s"]
@property
def proper_motion(self):
"""
Shorthand for the two-dimensional proper motion as a
`~astropy.units.Quantity` object with angular velocity units. In the
returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude
dimension so that ``.proper_motion[0]`` is the longitudinal proper
motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper
motion already includes the cos(latitude) term.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
sph = self.represent_as("spherical", "sphericalcoslat", in_frame_units=True)
pm_lon = sph.differentials["s"].d_lon_coslat
pm_lat = sph.differentials["s"].d_lat
return (
np.stack((pm_lon.value, pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit
)
@property
def radial_velocity(self):
"""
Shorthand for the radial or line-of-sight velocity as a
`~astropy.units.Quantity` object.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
sph = self.represent_as("spherical", in_frame_units=True)
return sph.differentials["s"].d_distance
class GenericFrame(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
self.frame_attributes = {}
for name, default in frame_attrs.items():
self.frame_attributes[name] = Attribute(default)
setattr(self, "_" + name, default)
super().__init__(None)
def __getattr__(self, name):
if "_" + name in self.__dict__:
return getattr(self, "_" + name)
else:
raise AttributeError(f"no {name}")
def __setattr__(self, name, value):
if name in self.frame_attributes:
raise AttributeError(f"can't set frame attribute '{name}'")
else:
super().__setattr__(name, value)
|
7ed59c442585d2293b6d4ee31e487686cdab2e8ea5db47501ef70e7a885417c4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import json
import socket
import urllib.error
import urllib.parse
import urllib.request
from warnings import warn
import numpy as np
from astropy import constants as consts
from astropy import units as u
from astropy.units.quantity import QuantityInfoBase
from astropy.utils import data
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle, Latitude, Longitude
from .errors import UnknownSiteException
from .matrix_utilities import matrix_transpose
from .representation import (
CartesianDifferential,
CartesianRepresentation,
)
from .representation.geodetic import ELLIPSOIDS
__all__ = [
"EarthLocation",
]
GeodeticLocation = collections.namedtuple("GeodeticLocation", ["lon", "lat", "height"])
OMEGA_EARTH = (1.002_737_811_911_354_48 * u.cycle / u.day).to(
1 / u.s, u.dimensionless_angles()
)
"""
Rotational velocity of Earth, following SOFA's pvtob.
In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value
in SI seconds, so multiply by the ratio of stellar to solar day.
See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth
Seidelmann (1992), University Science Books. The constant is the
conventional, exact one (IERS conventions 2003); see
http://hpiers.obspm.fr/eop-pc/index.php?index=constants.
"""
def _check_ellipsoid(ellipsoid=None, default="WGS84"):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError(f"Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})")
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode("utf8"))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out")) from e
else:
raise NameResolveError(err_str.format(msg=e.reason)) from e
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get("results", [])
if resp_data.get("status", None) != "OK":
raise NameResolveError(
err_str.format(msg="unknown failure with Google API")
)
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("x", "y", "z", "ellipsoid")
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop("ellipsoid")
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop("dtype")
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop("shape")
data = u.Quantity(
np.zeros(shape=shape, dtype=cols[0].dtype), unit=cols[0].unit, copy=False
)
# Get arguments needed to reconstruct class
map = {
key: (data[key] if key in "xyz" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = "WGS84"
_location_dtype = np.dtype({"names": ["x", "y", "z"], "formats": [np.float64] * 3})
_array_dtype = np.dtype((np.float64, (3,)))
_site_registry = None
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], EarthLocation):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError(
"Coordinates could not be parsed as either "
"geocentric or geodetic, with respective "
f'exceptions "{exc_geocentric}" and "{exc_geodetic}"'
)
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array-like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : unit-like or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError(
"Geocentric coordinates should be Quantities "
"unless an explicit unit is given."
) from None
else:
unit = u.Unit(unit)
if unit.physical_type != "length":
raise u.UnitsError("Geocentric coordinates should be in units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc["x"], struc["y"], struc["z"] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0.0, ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` ['length'] or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
# As wrapping fails on readonly input, we do so manually
lon = Angle(lon, u.degree, copy=False).wrap_at(180 * u.degree)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates.
geodetic = ELLIPSOIDS[ellipsoid](lon, lat, height, copy=False)
xyz = geodetic.to_cartesian().get_xyz(xyz_axis=-1) << height.unit
self = xyz.view(cls._location_dtype, cls).reshape(geodetic.shape)
self._ellipsoid = ellipsoid
return self
@classmethod
def of_site(cls, site_name, *, refresh_cache=False):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
Additional information about the site is stored in the ``.info.meta``
dictionary of sites obtained using this method (see the examples below).
.. note::
This function is meant to access the site registry from the astropy
data server, which is saved in the user's local cache. If you would
like a site to be added there, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If the cache already exists the function will use it even if the
version in the astropy-data repository has been updated unless the
``refresh_cache=True`` option is used. If there is no cache and the
online version cannot be reached, this function falls back on a
built-in list, which currently only contains the Greenwich Royal
Observatory as an example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
refresh_cache : bool, optional
If `True`, force replacement of the cached registry with a
newly downloaded version. (Default: `False`)
.. versionadded:: 5.3
Returns
-------
site : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the observatory. The returned class will be the same
as this class.
Examples
--------
>>> from astropy.coordinates import EarthLocation
>>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA
>>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP
GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>)
>>> keck.info # doctest: +REMOTE_DATA
name = W. M. Keck Observatory
dtype = (float64, float64, float64)
unit = m
class = EarthLocation
n_bad = 0
>>> keck.info.meta # doctest: +REMOTE_DATA
{'source': 'IRAF Observatory Database', 'timezone': 'US/Hawaii'}
See Also
--------
get_site_names : the list of sites that this function can access
"""
registry = cls._get_site_registry(force_download=refresh_cache)
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(
e.site, "EarthLocation.get_site_names", close_names=e.close_names
) from e
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False, google_api_key=None):
"""
Return an object of this class for a given address by querying either
the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding
API [2]_, which requires a specified API key.
This is intended as a quick convenience function to get easy access to
locations. If you need to specify a precise location, you should use the
initializer directly and pass in a longitude, latitude, and elevation.
In the background, this just issues a web query to either of
the APIs noted above. This is not meant to be abused! Both
OpenStreetMap and Google use IP-based query limiting and will ban your
IP if you send more than a few thousand queries per hour [2]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first**
returned location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API,
this can be a fully specified street address (e.g., 123 Main St.,
New York, NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool, optional
This only works when using the Google API! See the ``google_api_key``
block below. Use the retrieved location to perform a second query to
the Google maps elevation API to retrieve the height of the input
address [3]_.
google_api_key : str, optional
A Google API key with the Geocoding API and (optionally) the
elevation API enabled. See [4]_ for more information.
Returns
-------
location : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the input address.
Will be type(this class)
References
----------
.. [1] https://nominatim.openstreetmap.org/
.. [2] https://developers.google.com/maps/documentation/geocoding/start
.. [3] https://developers.google.com/maps/documentation/elevation/start
.. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key
"""
use_google = google_api_key is not None
# Fail fast if invalid options are passed:
if not use_google and get_height:
raise ValueError(
"Currently, `get_height` only works when using the Google geocoding"
" API, which requires passing a Google API key with `google_api_key`."
" See:"
" https://developers.google.com/maps/documentation/geocoding/get-api-key"
" for information on obtaining an API key."
)
if use_google: # Google
pars = urllib.parse.urlencode({"address": address, "key": google_api_key})
geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}"
else: # OpenStreetMap
pars = urllib.parse.urlencode({"q": address, "format": "json"})
geo_url = f"https://nominatim.openstreetmap.org/search?{pars}"
# get longitude and latitude location
err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}"
geo_result = _get_json_result(geo_url, err_str=err_str, use_google=use_google)
if use_google:
loc = geo_result[0]["geometry"]["location"]
lat = loc["lat"]
lon = loc["lng"]
else:
loc = geo_result[0]
lat = float(loc["lat"]) # strings are returned by OpenStreetMap
lon = float(loc["lon"])
if get_height:
pars = {"locations": f"{lat:.8f},{lon:.8f}", "key": google_api_key}
pars = urllib.parse.urlencode(pars)
ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}"
err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}"
ele_result = _get_json_result(
ele_url, err_str=err_str, use_google=use_google
)
height = ele_result[0]["elevation"] * u.meter
else:
height = 0.0
return cls.from_geodetic(lon=lon * u.deg, lat=lat * u.deg, height=height)
@classmethod
def get_site_names(cls, *, refresh_cache=False):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
This function is meant to access the site registry from the astropy
data server, which is saved in the user's local cache. If you would
like a site to be added there, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If the cache already exists the function will use it even if the
version in the astropy-data repository has been updated unless the
``refresh_cache=True`` option is used. If there is no cache and the
online version cannot be reached, this function falls back on a
built-in list, which currently only contains the Greenwich Royal
Observatory as an example case.
Parameters
----------
refresh_cache : bool, optional
If `True`, force replacement of the cached registry with a
newly downloaded version. (Default: `False`)
.. versionadded:: 5.3
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry(force_download=refresh_cache).names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
Returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
if force_builtin and force_download:
raise ValueError("Cannot have both force_builtin and force_download True")
if force_builtin:
cls._site_registry = get_builtin_sites()
else:
if force_download or not cls._site_registry:
try:
if isinstance(force_download, str):
cls._site_registry = get_downloaded_sites(force_download)
else:
cls._site_registry = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = (
"Could not access the main site list. Falling back on the "
"built-in version, which is rather limited. If you want to "
"retry the download, use the option 'refresh_cache=True'."
)
warn(msg, AstropyUserWarning)
cls._site_registry = get_builtin_sites()
return cls._site_registry
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
lon, lat, height : `~astropy.units.Quantity`
The tuple is a ``GeodeticLocation`` namedtuple and is comprised of
instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`.
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
xyz = self.view(self._array_dtype, u.Quantity)
llh = CartesianRepresentation(xyz, xyz_axis=-1, copy=False).represent_as(
ELLIPSOIDS[ellipsoid]
)
return GeodeticLocation(
Longitude(llh.lon, u.deg, wrap_angle=180 * u.deg, copy=False),
llh.lat << u.deg,
llh.height << self.unit,
)
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lat(self):
"""Latitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities."""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities."""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None, location=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``, either geocentric, or
topocentric relative to a given ``location``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
location : `~astropy.coordinates.EarthLocation` or None
A possible observer's location, for a topocentric ITRS position.
If not given (default), a geocentric ITRS object will be created.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame, either geocentric or topocentric
relative to the given ``location``.
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.shape:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
if location is None:
# No location provided, return geocentric ITRS coordinates
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
else:
return ITRS(
self.x - location.x,
self.y - location.y,
self.z - location.z,
copy=False,
obstime=obstime,
location=location,
)
itrs = property(
get_itrs,
doc="""An `~astropy.coordinates.ITRS` object
for the location of this object at the
default ``obstime``.""",
)
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
loc, vel = self.get_gcrs_posvel(obstime)
loc.differentials["s"] = CartesianDifferential.from_cartesian(vel)
return GCRS(loc, obstime=obstime)
def _get_gcrs_posvel(self, obstime, ref_to_itrs, gcrs_to_ref):
"""Calculate GCRS position and velocity given transformation matrices.
The reference frame z axis must point to the Celestial Intermediate Pole
(as is the case for CIRS and TETE).
This private method is used in intermediate_rotation_transforms,
where some of the matrices are already available for the coordinate
transformation.
The method is faster by an order of magnitude than just adding a zero
velocity to ITRS and transforming to GCRS, because it avoids calculating
the velocity via finite differencing of the results of the transformation
at three separate times.
"""
# The simplest route is to transform to the reference frame where the
# z axis is properly aligned with the Earth's rotation axis (CIRS or
# TETE), then calculate the velocity, and then transform this
# reference position and velocity to GCRS. For speed, though, we
# transform the coordinates to GCRS in one step, and calculate the
# velocities by rotating around the earth's axis transformed to GCRS.
ref_to_gcrs = matrix_transpose(gcrs_to_ref)
itrs_to_gcrs = ref_to_gcrs @ matrix_transpose(ref_to_itrs)
# Earth's rotation vector in the ref frame is rot_vec_ref = (0,0,OMEGA_EARTH),
# so in GCRS it is rot_vec_gcrs[..., 2] @ OMEGA_EARTH.
rot_vec_gcrs = CartesianRepresentation(
ref_to_gcrs[..., 2] * OMEGA_EARTH, xyz_axis=-1, copy=False
)
# Get the position in the GCRS frame.
# Since we just need the cartesian representation of ITRS, avoid get_itrs().
itrs_cart = CartesianRepresentation(self.x, self.y, self.z, copy=False)
pos = itrs_cart.transform(itrs_to_gcrs)
vel = rot_vec_gcrs.cross(pos)
return pos, vel
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# Local import to prevent circular imports.
from .builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
)
# Get gcrs_posvel by transforming via CIRS (slightly faster than TETE).
return self._get_gcrs_posvel(
obstime, cirs_to_itrs_mat(obstime), gcrs_to_cirs_mat(obstime)
)
def gravitational_redshift(
self, obstime, bodies=["sun", "jupiter", "moon"], masses={}
):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict[str, `~astropy.units.Quantity`], optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
-------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if "earth" in bodies:
bodies.remove("earth")
bodies.append("earth")
_masses = {
"sun": consts.GM_sun,
"jupiter": consts.GM_jup,
"moon": consts.G * 7.34767309e22 * u.kg,
"earth": consts.GM_earth,
}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3 / u.s**2, [M_GM_equivalency]))
except KeyError as err:
raise KeyError(f'body "{body}" does not have a mass.') from err
except u.UnitsError as exc:
exc.args += (
(
'"masses" argument values must be masses or '
"gravitational parameters."
),
)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [
-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)
]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self["x"]
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self["y"]
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self["z"]
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "_ellipsoid"):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError("0-d EarthLocation arrays cannot be indexed")
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
|
2343f3184180316c9174d7753a48b1c768d628ab66692e40a347f95716898395 | """
Module for parsing astronomical object names to extract embedded coordinates
eg: '2MASS J06495091-0737408'.
"""
import re
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
RA_REGEX = r"()([0-2]\d)([0-5]\d)([0-5]\d)\.?(\d{0,3})"
DEC_REGEX = r"([+-])(\d{1,2})([0-5]\d)([0-5]\d)\.?(\d{0,3})"
JCOORD_REGEX = "(.*?J)" + RA_REGEX + DEC_REGEX
JPARSER = re.compile(JCOORD_REGEX)
def _sexagesimal(g):
# convert matched regex groups to sexigesimal array
sign, h, m, s, frac = g
sign = -1 if (sign == "-") else 1
s = f"{s}.{frac}"
return sign * np.array([h, m, s], float)
def search(name, raise_=False):
"""Regex match for coordinates in name."""
# extract the coordinate data from name
match = JPARSER.search(name)
if match is None and raise_:
raise ValueError("No coordinate match found!")
return match
def to_ra_dec_angles(name):
"""get RA in hourangle and DEC in degrees by parsing name."""
groups = search(name, True).groups()
prefix, hms, dms = np.split(groups, [1, 6])
ra = (_sexagesimal(hms) / (1, 60, 60 * 60) * u.hourangle).sum()
dec = (_sexagesimal(dms) * (u.deg, u.arcmin, u.arcsec)).sum()
return ra, dec
def to_skycoord(name, frame="icrs"):
"""Convert to `name` to `SkyCoords` object."""
return SkyCoord(*to_ra_dec_angles(name), frame=frame)
def shorten(name):
"""Produce a shortened version of the full object name.
The shortened name is built from the prefix (usually the survey name) and RA (hour,
minute), DEC (deg, arcmin) parts.
e.g.: '2MASS J06495091-0737408' --> '2MASS J0649-0737'
Parameters
----------
name : str
Full object name with J-coords embedded.
Returns
-------
shortName: str
"""
match = search(name)
return "".join(match.group(1, 3, 4, 7, 8, 9))
|
5c6751a3e9c23bcf4073818046f56cfc1efde7a1587421af1f8bbe014a7da502 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest`_ testing framework.
"""
import os
import pickle
import pytest
from astropy.units import allclose as quantity_allclose # noqa: F401
from astropy.utils.compat import PYTHON_LT_3_11
# For backward-compatibility with affiliated packages
from .runner import TestRunner # noqa: F401
__all__ = [
"assert_follows_unicode_guidelines",
"assert_quantity_allclose",
"check_pickling_recovery",
"pickle_protocol",
"generic_recursive_equality_test",
]
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from astropy.utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key), os.path.realpath(testing_path)
)
new_path = os.path.abspath(os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print("Saving coverage data in .coverage...", "green")
cov.save()
color_print("Saving HTML coverage report in htmlcov...", "green")
cov.html_report(directory=os.path.join(rootdir, "htmlcov"))
def assert_follows_unicode_guidelines(x, roundtrip=None):
"""
Test that an object follows our Unicode policy. See
"Unicode guidelines" in the coding guidelines.
Parameters
----------
x : object
The instance to test
roundtrip : module, optional
When provided, this namespace will be used to evaluate
``repr(x)`` and ensure that it roundtrips. It will also
ensure that ``__bytes__(x)`` roundtrip.
If not provided, no roundtrip testing will be performed.
"""
from astropy import conf
with conf.set_temp("unicode_output", False):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode("ascii")
assert isinstance(unicode_x, str)
unicode_x.encode("ascii")
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode("ascii")
else:
repr_x.encode("ascii")
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
with conf.set_temp("unicode_output", True):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode("ascii")
assert isinstance(unicode_x, str)
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode("ascii")
else:
repr_x.encode("ascii")
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
@pytest.fixture(params=[0, 1, -1])
def pickle_protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
(Originally from astropy.table.tests.test_pickle).
"""
return request.param
def generic_recursive_equality_test(a, b, class_history):
"""
Check if the attributes of a and b are equal. Then,
check if the attributes of the attributes are equal.
"""
if PYTHON_LT_3_11:
dict_a = a.__getstate__() if hasattr(a, "__getstate__") else a.__dict__
else:
# NOTE: The call may need to be adapted if other objects implementing a __getstate__
# with required argument(s) are passed to this function.
# For a class with `__slots__` the default state is not a `dict`;
# with neither `__dict__` nor `__slots__` it is `None`.
state = a.__getstate__(a) if isinstance(a, type) else a.__getstate__()
dict_a = state if isinstance(state, dict) else getattr(a, "__dict__", dict())
dict_b = b.__dict__
for key in dict_a:
assert key in dict_b, f"Did not pickle {key}"
if dict_a[key].__class__.__eq__ is not object.__eq__:
# Only compare if the class defines a proper equality test.
# E.g., info does not define __eq__, and hence defers to
# object.__eq__, which is equivalent to checking that two
# instances are the same. This will generally not be true
# after pickling.
eq = dict_a[key] == dict_b[key]
if "__iter__" in dir(eq):
eq = False not in eq
assert eq, f"Value of {key} changed by pickling"
if hasattr(dict_a[key], "__dict__"):
if dict_a[key].__class__ in class_history:
# attempt to prevent infinite recursion
pass
else:
new_class_history = [dict_a[key].__class__]
new_class_history.extend(class_history)
generic_recursive_equality_test(
dict_a[key], dict_b[key], new_class_history
)
def check_pickling_recovery(original, protocol):
"""
Try to pickle an object. If successful, make sure
the object's attributes survived pickling and unpickling.
"""
f = pickle.dumps(original, protocol=protocol)
unpickled = pickle.loads(f)
class_history = [original.__class__]
generic_recursive_equality_test(original, unpickled, class_history)
def assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
import numpy as np
from astropy.units.quantity import _unquantify_allclose_arguments
np.testing.assert_allclose(
*_unquantify_allclose_arguments(actual, desired, rtol, atol), **kwargs
)
|
0b268bfe6439bd7485249e9ce253eb6322ac97d69702078aac66881b41865241 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.table import QTable, Table, groups
from astropy.time import Time, TimeDelta
from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns
from astropy.units import Quantity, UnitsError
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ["TimeSeries"]
@autocheck_required_columns
class TimeSeries(BaseTimeSeries):
"""
A class to represent time series data in tabular form.
`~astropy.timeseries.TimeSeries` provides a class for representing time
series as a collection of values of different quantities measured at specific
points in time (for time series with finite time bins, see the
`~astropy.timeseries.BinnedTimeSeries` class).
`~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable`
and thus provides all the standard table maniplation methods available to
tables, but it also provides additional conveniences for dealing with time
series, such as a flexible initializer for setting up the times, a method
for folding time series, and a ``time`` attribute for easy access to the
time values.
See also: https://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional
Data to initialize time series. This does not need to contain the times,
which can be provided separately, but if it does contain the times they
should be in a column called ``'time'`` to be automatically recognized.
time : `~astropy.time.Time`, `~astropy.time.TimeDelta` or iterable
The times at which the values are sampled - this can be either given
directly as a `~astropy.time.Time` or `~astropy.time.TimeDelta` array
or as any iterable that initializes the `~astropy.time.Time` class. If
this is given, then the remaining time-related arguments should not be used.
time_start : `~astropy.time.Time` or str
The time of the first sample in the time series. This is an alternative
to providing ``time`` and requires that ``time_delta`` is also provided.
time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity` ['time']
The step size in time for the series. This can either be a scalar if
the time series is evenly sampled, or an array of values if it is not.
n_samples : int
The number of time samples for the series. This is only used if both
``time_start`` and ``time_delta`` are provided and are scalar values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ["time"]
def __init__(
self,
data=None,
*,
time=None,
time_start=None,
time_delta=None,
n_samples=None,
**kwargs,
):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if data is None and time is None and time_start is None and time_delta is None:
self._required_columns_relax = True
return
# First if time has been given in the table data, we should extract it
# and treat it as if it had been passed as a keyword argument.
if data is not None:
if n_samples is not None:
if n_samples != len(self):
raise TypeError(
"'n_samples' has been given both and it is not the "
"same length as the input data."
)
else:
n_samples = len(self)
if "time" in self.colnames:
if time is None:
time = self.columns["time"]
else:
raise TypeError(
"'time' has been given both in the table and as a keyword argument"
)
if time is None and time_start is None:
raise TypeError("Either 'time' or 'time_start' should be specified")
elif time is not None and time_start is not None:
raise TypeError("Cannot specify both 'time' and 'time_start'")
if time is not None and not isinstance(time, (Time, TimeDelta)):
time = Time(time)
if time_start is not None and not isinstance(time_start, (Time, TimeDelta)):
time_start = Time(time_start)
if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)):
raise TypeError("'time_delta' should be a Quantity or a TimeDelta")
if isinstance(time_delta, TimeDelta):
time_delta = time_delta.sec * u.s
if time_start is not None:
# We interpret this as meaning that time is that of the first
# sample and that the interval is given by time_delta.
if time_delta is None:
raise TypeError("'time' is scalar, so 'time_delta' is required")
if time_delta.isscalar:
time_delta = np.repeat(time_delta, n_samples)
time_delta = np.cumsum(time_delta)
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0.0 * u.s
time = time_start + time_delta
elif len(self.colnames) > 0 and len(time) != len(self):
raise ValueError(
f"Length of 'time' ({len(time)}) should match data length ({n_samples})"
)
elif time_delta is not None:
raise TypeError(
"'time_delta' should not be specified since 'time' is an array"
)
with self._delay_required_column_checks():
if "time" in self.colnames:
self.remove_column("time")
self.add_column(time, index=0, name="time")
@property
def time(self):
"""
The time values.
"""
return self["time"]
@deprecated_renamed_argument("midpoint_epoch", "epoch_time", "4.0")
def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity` ['time']
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
if not isinstance(period, Quantity) or period.unit.physical_type != "time":
raise UnitsError("period should be a Quantity in units of time")
folded = self.copy()
if epoch_time is None:
epoch_time = self.time[0]
else:
epoch_time = Time(epoch_time)
period_sec = period.to_value(u.s)
if normalize_phase:
if (
isinstance(epoch_phase, Quantity)
and epoch_phase.unit.physical_type != "dimensionless"
):
raise UnitsError(
"epoch_phase should be a dimensionless Quantity "
"or a float when normalize_phase=True"
)
epoch_phase_sec = epoch_phase * period_sec
else:
if epoch_phase == 0:
epoch_phase_sec = 0.0
else:
if (
not isinstance(epoch_phase, Quantity)
or epoch_phase.unit.physical_type != "time"
):
raise UnitsError(
"epoch_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
epoch_phase_sec = epoch_phase.to_value(u.s)
if wrap_phase is None:
wrap_phase = period_sec / 2
else:
if normalize_phase:
if isinstance(
wrap_phase, Quantity
) and not wrap_phase.unit.is_equivalent(u.one):
raise UnitsError(
"wrap_phase should be dimensionless when normalize_phase=True"
)
else:
if wrap_phase < 0 or wrap_phase > 1:
raise ValueError("wrap_phase should be between 0 and 1")
else:
wrap_phase = wrap_phase * period_sec
else:
if (
isinstance(wrap_phase, Quantity)
and wrap_phase.unit.physical_type == "time"
):
if wrap_phase < 0 or wrap_phase > period:
raise ValueError(
"wrap_phase should be between 0 and the period"
)
else:
wrap_phase = wrap_phase.to_value(u.s)
else:
raise UnitsError(
"wrap_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
relative_time_sec = (
(self.time - epoch_time).sec + epoch_phase_sec + (period_sec - wrap_phase)
) % period_sec - (period_sec - wrap_phase)
folded_time = TimeDelta(relative_time_sec * u.s)
if normalize_phase:
folded_time = (folded_time / period).decompose()
period = period_sec = 1
with folded._delay_required_column_checks():
folded.remove_column("time")
folded.add_column(folded_time, name="time", index=0)
return folded
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if "time" not in item:
out = QTable(
[self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices,
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
return out
return super().__getitem__(item)
def add_column(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_column`.
"""
# Note that the docstring is inherited from QTable
result = super().add_column(*args, **kwargs)
if len(self.indices) == 0 and "time" in self.colnames:
self.add_index("time")
return result
def add_columns(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_columns`.
"""
# Note that the docstring is inherited from QTable
result = super().add_columns(*args, **kwargs)
if len(self.indices) == 0 and "time" in self.colnames:
self.add_index("time")
return result
@classmethod
def from_pandas(self, df, time_scale="utc"):
"""
Convert a :class:`~pandas.DataFrame` to a
:class:`astropy.timeseries.TimeSeries`.
Parameters
----------
df : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance.
time_scale : str
The time scale to pass into `astropy.time.Time`.
Defaults to ``UTC``.
"""
from pandas import DataFrame, DatetimeIndex
if not isinstance(df, DataFrame):
raise TypeError("Input should be a pandas DataFrame")
if not isinstance(df.index, DatetimeIndex):
raise TypeError("DataFrame does not have a DatetimeIndex")
time = Time(df.index, scale=time_scale)
table = Table.from_pandas(df)
return TimeSeries(time=time, data=table)
def to_pandas(self):
"""
Convert this :class:`~astropy.timeseries.TimeSeries` to a
:class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
"""
return Table(self).to_pandas(index="time")
@classmethod
def read(
self,
filename,
time_column=None,
time_format=None,
time_scale=None,
format=None,
*args,
**kwargs,
):
"""
Read and parse a file and returns a `astropy.timeseries.TimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(https://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.TimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv',
... time_column='date') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_column : str, optional
The name of the time column.
time_format : str, optional
The time format for the time column.
time_scale : str, optional
The time scale for the time column.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.sampled.TimeSeries`
TimeSeries corresponding to file contents.
Notes
-----
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, *args, format=format, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_column is None:
raise ValueError(
"``time_column`` should be provided since the default Table readers"
" are being used."
)
table = Table.read(filename, *args, format=format, **kwargs)
if time_column in table.colnames:
time = Time(
table.columns[time_column], scale=time_scale, format=time_format
)
table.remove_column(time_column)
else:
raise ValueError(
f"Time column '{time_column}' not found in the input data."
)
return TimeSeries(time=time, data=table)
|
93d53949561ffb0790035598f47f0eef0f33d142bc995422b876024713fbb43e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.table import QTable, Table, groups
from astropy.time import Time, TimeDelta
from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns
from astropy.units import Quantity
__all__ = ["BinnedTimeSeries"]
@autocheck_required_columns
class BinnedTimeSeries(BaseTimeSeries):
"""
A class to represent binned time series data in tabular form.
`~astropy.timeseries.BinnedTimeSeries` provides a class for
representing time series as a collection of values of different
quantities measured in time bins (for time series with values
sampled at specific times, see the `~astropy.timeseries.TimeSeries`
class). `~astropy.timeseries.BinnedTimeSeries` is a sub-class of
`~astropy.table.QTable` and thus provides all the standard table
maniplation methods available to tables, but it also provides
additional conveniences for dealing with time series, such as a
flexible initializer for setting up the times, and attributes to
access the start/center/end time of bins.
See also: https://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize time series. This does not need to contain the
times, which can be provided separately, but if it does contain the
times they should be in columns called ``'time_bin_start'`` and
``'time_bin_size'`` to be automatically recognized.
time_bin_start : `~astropy.time.Time` or iterable
The times of the start of each bin - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. If this is given, then
the remaining time-related arguments should not be used. This can also
be a scalar value if ``time_bin_size`` is provided.
time_bin_end : `~astropy.time.Time` or iterable
The times of the end of each bin - this can be either given directly
as a `~astropy.time.Time` array or as any value or iterable that
initializes the `~astropy.time.Time` class. If this is given, then the
remaining time-related arguments should not be used. This can only be
given if ``time_bin_start`` is an array of values. If ``time_bin_end``
is a scalar, time bins are assumed to be contiguous, such that the end
of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array, the
time bins do not need to be contiguous. If this argument is provided,
``time_bin_size`` should not be provided.
time_bin_size : `~astropy.time.TimeDelta` or `~astropy.units.Quantity`
The size of the time bins, either as a scalar value (in which case all
time bins will be assumed to have the same duration) or as an array of
values (in which case each time bin can have a different duration).
If this argument is provided, ``time_bin_end`` should not be provided.
n_bins : int
The number of time bins for the series. This is only used if both
``time_bin_start`` and ``time_bin_size`` are provided and are scalar
values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ["time_bin_start", "time_bin_size"]
def __init__(
self,
data=None,
*,
time_bin_start=None,
time_bin_end=None,
time_bin_size=None,
n_bins=None,
**kwargs,
):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if (
data is None
and time_bin_start is None
and time_bin_end is None
and time_bin_size is None
and n_bins is None
):
self._required_columns_relax = True
return
# First if time_bin_start and time_bin_end have been given in the table data, we
# should extract them and treat them as if they had been passed as
# keyword arguments.
if "time_bin_start" in self.colnames:
if time_bin_start is None:
time_bin_start = self.columns["time_bin_start"]
else:
raise TypeError(
"'time_bin_start' has been given both in the table "
"and as a keyword argument"
)
if "time_bin_size" in self.colnames:
if time_bin_size is None:
time_bin_size = self.columns["time_bin_size"]
else:
raise TypeError(
"'time_bin_size' has been given both in the table "
"and as a keyword argument"
)
if time_bin_start is None:
raise TypeError("'time_bin_start' has not been specified")
if time_bin_end is None and time_bin_size is None:
raise TypeError(
"Either 'time_bin_size' or 'time_bin_end' should be specified"
)
if not isinstance(time_bin_start, (Time, TimeDelta)):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)):
time_bin_end = Time(time_bin_end)
if time_bin_size is not None and not isinstance(
time_bin_size, (Quantity, TimeDelta)
):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if isinstance(time_bin_size, TimeDelta):
time_bin_size = time_bin_size.sec * u.s
if n_bins is not None and time_bin_size is not None:
if not (time_bin_start.isscalar and time_bin_size.isscalar):
raise TypeError(
"'n_bins' cannot be specified if 'time_bin_start' or "
"'time_bin_size' are not scalar'"
)
if time_bin_start.isscalar:
# We interpret this as meaning that this is the start of the
# first bin and that the bins are contiguous. In this case,
# we require time_bin_size to be specified.
if time_bin_size is None:
raise TypeError(
"'time_bin_start' is scalar, so 'time_bin_size' is required"
)
if time_bin_size.isscalar:
if data is not None:
if n_bins is not None:
if n_bins != len(self):
raise TypeError(
"'n_bins' has been given and it is not the "
"same length as the input data."
)
else:
n_bins = len(self)
time_bin_size = np.repeat(time_bin_size, n_bins)
time_delta = np.cumsum(time_bin_size)
time_bin_end = time_bin_start + time_delta
# Now shift the array so that the first entry is 0
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0.0 * u.s
# Make time_bin_start into an array
time_bin_start = time_bin_start + time_delta
else:
if len(self.colnames) > 0 and len(time_bin_start) != len(self):
raise ValueError(
f"Length of 'time_bin_start' ({len(time_bin_start)}) should match "
f"table length ({len(self)})"
)
if time_bin_end is not None:
if time_bin_end.isscalar:
times = time_bin_start.copy()
times[:-1] = times[1:]
times[-1] = time_bin_end
time_bin_end = times
time_bin_size = (time_bin_end - time_bin_start).sec * u.s
if time_bin_size.isscalar:
time_bin_size = np.repeat(time_bin_size, len(self))
with self._delay_required_column_checks():
if "time_bin_start" in self.colnames:
self.remove_column("time_bin_start")
if "time_bin_size" in self.colnames:
self.remove_column("time_bin_size")
self.add_column(time_bin_start, index=0, name="time_bin_start")
self.add_index("time_bin_start")
self.add_column(time_bin_size, index=1, name="time_bin_size")
@property
def time_bin_start(self):
"""
The start times of all the time bins.
"""
return self["time_bin_start"]
@property
def time_bin_center(self):
"""
The center times of all the time bins.
"""
return self["time_bin_start"] + self["time_bin_size"] * 0.5
@property
def time_bin_end(self):
"""
The end times of all the time bins.
"""
return self["time_bin_start"] + self["time_bin_size"]
@property
def time_bin_size(self):
"""
The sizes of all the time bins.
"""
return self["time_bin_size"]
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if "time_bin_start" not in item or "time_bin_size" not in item:
out = QTable(
[self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices,
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
return out
return super().__getitem__(item)
@classmethod
def read(
self,
filename,
time_bin_start_column=None,
time_bin_end_column=None,
time_bin_size_column=None,
time_bin_size_unit=None,
time_format=None,
time_scale=None,
format=None,
*args,
**kwargs,
):
"""
Read and parse a file and returns a `astropy.timeseries.BinnedTimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(https://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.BinnedTimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries.binned import BinnedTimeSeries
>>> ts = BinnedTimeSeries.read('binned.dat', format='ascii.ecsv',
... time_bin_start_column='date_start',
... time_bin_end_column='date_end') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_bin_start_column : str
The name of the column with the start time for each bin.
time_bin_end_column : str, optional
The name of the column with the end time for each bin. Either this
option or ``time_bin_size_column`` should be specified.
time_bin_size_column : str, optional
The name of the column with the size for each bin. Either this
option or ``time_bin_end_column`` should be specified.
time_bin_size_unit : `astropy.units.Unit`, optional
If ``time_bin_size_column`` is specified but does not have a unit
set in the table, you can specify the unit manually.
time_format : str, optional
The time format for the start and end columns.
time_scale : str, optional
The time scale for the start and end columns.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.binned.BinnedTimeSeries`
BinnedTimeSeries corresponding to the file.
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, *args, format=format, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_bin_start_column is None:
raise ValueError(
"``time_bin_start_column`` should be provided since the default"
" Table readers are being used."
)
if time_bin_end_column is None and time_bin_size_column is None:
raise ValueError(
"Either `time_bin_end_column` or `time_bin_size_column` should be"
" provided."
)
elif time_bin_end_column is not None and time_bin_size_column is not None:
raise ValueError(
"Cannot specify both `time_bin_end_column` and"
" `time_bin_size_column`."
)
table = Table.read(filename, *args, format=format, **kwargs)
if time_bin_start_column in table.colnames:
time_bin_start = Time(
table.columns[time_bin_start_column],
scale=time_scale,
format=time_format,
)
table.remove_column(time_bin_start_column)
else:
raise ValueError(
f"Bin start time column '{time_bin_start_column}' not found in the"
" input data."
)
if time_bin_end_column is not None:
if time_bin_end_column in table.colnames:
time_bin_end = Time(
table.columns[time_bin_end_column],
scale=time_scale,
format=time_format,
)
table.remove_column(time_bin_end_column)
else:
raise ValueError(
f"Bin end time column '{time_bin_end_column}' not found in the"
" input data."
)
time_bin_size = None
elif time_bin_size_column is not None:
if time_bin_size_column in table.colnames:
time_bin_size = table.columns[time_bin_size_column]
table.remove_column(time_bin_size_column)
else:
raise ValueError(
f"Bin size column '{time_bin_size_column}' not found in the"
" input data."
)
if time_bin_size.unit is None:
if time_bin_size_unit is None or not isinstance(
time_bin_size_unit, u.UnitBase
):
raise ValueError(
"The bin size unit should be specified as an astropy Unit"
" using ``time_bin_size_unit``."
)
time_bin_size = time_bin_size * time_bin_size_unit
else:
time_bin_size = u.Quantity(time_bin_size)
time_bin_end = None
if time_bin_start.isscalar and time_bin_size.isscalar:
return BinnedTimeSeries(
data=table,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
time_bin_size=time_bin_size,
n_bins=len(table),
)
else:
return BinnedTimeSeries(
data=table,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
time_bin_size=time_bin_size,
)
|
0b22e7d4d6666fc666f9e825eef4be940dbabe826a4a1efcdb4023f8e173c501 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import abc
import inspect
from typing import TYPE_CHECKING, Any, TypeVar
import numpy as np
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.utils.decorators import classproperty
from astropy.utils.metadata import MetaData
from .connect import (
CosmologyFromFormat,
CosmologyRead,
CosmologyToFormat,
CosmologyWrite,
)
from .parameter import Parameter
if TYPE_CHECKING: # pragma: no cover
from collections.abc import Mapping
from astropy.cosmology.funcs.comparison import _FormatType
# Originally authored by Andrew Becker ([email protected]),
# and modified by Neil Crighton ([email protected]), Roban Kramer
# ([email protected]), and Nathaniel Starkman ([email protected]).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"]
__doctest_requires__ = {} # needed until __getattr__ removed
##############################################################################
# Parameters
# registry of cosmology classes with {key=name : value=class}
_COSMOLOGY_CLASSES = dict()
# typing
_CosmoT = TypeVar("_CosmoT", bound="Cosmology")
_FlatCosmoT = TypeVar("_FlatCosmoT", bound="FlatCosmologyMixin")
##############################################################################
class CosmologyError(Exception):
pass
class Cosmology(metaclass=abc.ABCMeta):
"""Base-class for all Cosmologies.
Parameters
----------
*args
Arguments into the cosmology; used by subclasses, not this base class.
name : str or None (optional, keyword-only)
The name of the cosmology.
meta : dict or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
**kwargs
Arguments into the cosmology; used by subclasses, not this base class.
Notes
-----
Class instances are static -- you cannot (and should not) change the
values of the parameters. That is, all of the above attributes
(except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
Cosmology subclasses are automatically registered in a global registry
and with various I/O methods. To turn off or change this registration,
override the ``_register_cls`` classmethod in the subclass.
"""
meta = MetaData()
# Unified I/O object interchange methods
from_format = UnifiedReadWriteMethod(CosmologyFromFormat)
to_format = UnifiedReadWriteMethod(CosmologyToFormat)
# Unified I/O read and write methods
read = UnifiedReadWriteMethod(CosmologyRead)
write = UnifiedReadWriteMethod(CosmologyWrite)
# Parameters
__parameters__: tuple[str, ...] = ()
__all_parameters__: tuple[str, ...] = ()
# ---------------------------------------------------------------
def __init_subclass__(cls):
super().__init_subclass__()
# -------------------
# Parameters
# Get parameters that are still Parameters, either in this class or above.
parameters = []
derived_parameters = []
for n in cls.__parameters__:
p = getattr(cls, n)
if isinstance(p, Parameter):
derived_parameters.append(n) if p.derived else parameters.append(n)
# Add new parameter definitions
for n, v in cls.__dict__.items():
if n in parameters or n.startswith("_") or not isinstance(v, Parameter):
continue
derived_parameters.append(n) if v.derived else parameters.append(n)
# reorder to match signature
ordered = [
parameters.pop(parameters.index(n))
for n in cls._init_signature.parameters.keys()
if n in parameters
]
parameters = ordered + parameters # place "unordered" at the end
cls.__parameters__ = tuple(parameters)
cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters)
# -------------------
# Registration
if not inspect.isabstract(cls): # skip abstract classes
cls._register_cls()
@classproperty(lazy=True)
def _init_signature(cls):
"""Initialization signature (without 'self')."""
# get signature, dropping "self" by taking arguments [1:]
sig = inspect.signature(cls.__init__)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
return sig
@classmethod
def _register_cls(cls):
# register class
_COSMOLOGY_CLASSES[cls.__qualname__] = cls
# register to YAML
from astropy.cosmology._io.yaml import register_cosmology_yaml
register_cosmology_yaml(cls)
# ---------------------------------------------------------------
def __init__(self, name=None, meta=None):
self._name = str(name) if name is not None else name
self.meta.update(meta or {})
@property
def name(self):
"""The name of the Cosmology instance."""
return self._name
@property
@abc.abstractmethod
def is_flat(self):
"""
Return bool; `True` if the cosmology is flat.
This is abstract and must be defined in subclasses.
"""
raise NotImplementedError("is_flat is not implemented")
def clone(self, *, meta=None, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, so ``clone()``
cannot be used to change between flat and non-flat cosmologies.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
"""
# Quick return check, taking advantage of the Cosmology immutability.
if meta is None and not kwargs:
return self
# There are changed parameter or metadata values.
# The name needs to be changed accordingly, if it wasn't already.
_modname = self.name + " (modified)"
kwargs.setdefault("name", (_modname if self.name is not None else None))
# mix new meta into existing, preferring the former.
meta = meta if meta is not None else {}
new_meta = {**self.meta, **meta}
# Mix kwargs into initial arguments, preferring the former.
new_init = {**self._init_arguments, "meta": new_meta, **kwargs}
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self._init_signature.bind_partial(**new_init)
# Instantiate, respecting args vs kwargs
cloned = type(self)(*ba.args, **ba.kwargs)
# Check if nothing has changed.
# TODO! or should return self?
if (cloned.name == _modname) and not meta and cloned.is_equivalent(self):
cloned._name = self.name
return cloned
@property
def _init_arguments(self):
# parameters
kw = {n: getattr(self, n) for n in self.__parameters__}
# other info
kw["name"] = self.name
kw["meta"] = self.meta
return kw
# ---------------------------------------------------------------
# comparison methods
def is_equivalent(self, other: Any, /, *, format: _FormatType = False) -> bool:
r"""Check equivalence between Cosmologies.
Two cosmologies may be equivalent even if not the same class.
For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``.
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object to which to compare.
format : bool or None or str, optional keyword-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be
equivalent to a Cosmology.
`False` (default) will not allow conversion. `True` or `None` will,
and will use the auto-identification to try to infer the correct
format. A `str` is assumed to be the correct format to use when
converting.
``format`` is broadcast to match the shape of ``other``.
Note that the cosmology arguments are not broadcast against
``format``, so it cannot determine the output shape.
Returns
-------
bool
True if cosmologies are equivalent, False otherwise.
Examples
--------
Two cosmologies may be equivalent even if not of the same class.
In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value
calculated in ``FlatLambdaCDM``.
>>> import astropy.units as u
>>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM
>>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo1.is_equivalent(cosmo2)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmo3.is_equivalent(cosmo2)
False
Also, using the keyword argument, the notion of equivalence is extended
to any Python object that can be converted to a |Cosmology|.
>>> from astropy.cosmology import Planck18
>>> tbl = Planck18.to_format("astropy.table")
>>> Planck18.is_equivalent(tbl, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be
checked with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of
these kinds can still be checked for equivalence, but the correct
format string must be used.
>>> tbl = Planck18.to_format("yaml")
>>> Planck18.is_equivalent(tbl, format="yaml")
True
"""
from .funcs import cosmology_equal
try:
return cosmology_equal(
self, other, format=(None, format), allow_equivalent=True
)
except Exception:
# `is_equivalent` allows `other` to be any object and returns False
# if `other` cannot be converted to a Cosmology, rather than
# raising an Exception.
return False
def __equiv__(self, other: Any, /) -> bool:
"""Cosmology equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`NotImplemented` if ``other`` is from a different class.
`True` if ``other`` is of the same class and has matching parameters
and parameter values.
`False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__equiv__
# Check all parameters in 'other' match those in 'self' and 'other' has
# no extra parameters (latter part should never happen b/c same class)
return set(self.__all_parameters__) == set(other.__all_parameters__) and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__
)
def __eq__(self, other: Any, /) -> bool:
"""Check equality between Cosmologies.
Checks the Parameters and immutable fields (i.e. not "meta").
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool
`True` if Parameters and names are the same, `False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__eq__
eq = (
# non-Parameter checks: name
self.name == other.name
# check all parameters in 'other' match those in 'self' and 'other'
# has no extra parameters (latter part should never happen b/c same
# class) TODO! element-wise when there are array cosmologies
and set(self.__all_parameters__) == set(other.__all_parameters__)
and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__
)
)
return eq
# ---------------------------------------------------------------
def __repr__(self):
namelead = f"{self.__class__.__qualname__}("
if self.name is not None:
namelead += f'name="{self.name}", '
# nicely formatted parameters
fmtps = (f"{k}={getattr(self, k)}" for k in self.__parameters__)
return namelead + ", ".join(fmtps) + ")"
def __astropy_table__(self, cls, copy, **kwargs):
"""Return a `~astropy.table.Table` of type ``cls``.
Parameters
----------
cls : type
Astropy ``Table`` class or subclass.
copy : bool
Ignored.
**kwargs : dict, optional
Additional keyword arguments. Passed to ``self.to_format()``.
See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs.
Returns
-------
`astropy.table.Table` or subclass instance
Instance of type ``cls``.
"""
return self.to_format("astropy.table", cls=cls, **kwargs)
class FlatCosmologyMixin(metaclass=abc.ABCMeta):
"""
Mixin class for flat cosmologies. Do NOT instantiate directly.
Note that all instances of ``FlatCosmologyMixin`` are flat, but not all
flat cosmologies are instances of ``FlatCosmologyMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
__all_parameters__: tuple[str, ...]
__parameters__: tuple[str, ...]
def __init_subclass__(cls: type[_FlatCosmoT]) -> None:
super().__init_subclass__()
# Determine the non-flat class.
# This will raise a TypeError if the MRO is inconsistent.
cls.__nonflatclass__ # noqa: B018
# ===============================================================
@classmethod # TODO! make metaclass-method
def _get_nonflat_cls(
cls, kls: type[_CosmoT] | None = None
) -> type[Cosmology] | None:
"""Find the corresponding non-flat class.
The class' bases are searched recursively.
Parameters
----------
kls : :class:`astropy.cosmology.Cosmology` class or None, optional
If `None` (default) this class is searched instead of `kls`.
Raises
------
TypeError
If more than one non-flat class is found at the same level of the
inheritance. This is similar to the error normally raised by Python
for an inconsistent method resolution order.
Returns
-------
type
A :class:`Cosmology` subclass this class inherits from that is not a
:class:`FlatCosmologyMixin` subclass.
"""
_kls = cls if kls is None else kls
# Find non-flat classes
nonflat: set[type[Cosmology]]
nonflat = {
b
for b in _kls.__bases__
if issubclass(b, Cosmology) and not issubclass(b, FlatCosmologyMixin)
}
if not nonflat: # e.g. subclassing FlatLambdaCDM
nonflat = {
k for b in _kls.__bases__ if (k := cls._get_nonflat_cls(b)) is not None
}
if len(nonflat) > 1:
raise TypeError(
"cannot create a consistent non-flat class resolution order "
f"for {_kls} with bases {nonflat} at the same inheritance level."
)
if not nonflat: # e.g. FlatFLRWMixin(FlatCosmologyMixin)
return None
return nonflat.pop()
__nonflatclass__ = classproperty(
_get_nonflat_cls, lazy=True, doc="Return the corresponding non-flat class."
)
# ===============================================================
@property
def is_flat(self):
"""Return `True`, the cosmology is flat."""
return True
@abc.abstractmethod
def nonflat(self: _FlatCosmoT) -> _CosmoT:
"""Return the equivalent non-flat-class instance of this cosmology."""
def clone(self, *, meta: Mapping | None = None, to_nonflat: bool = False, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
"""
if to_nonflat:
return self.nonflat.clone(meta=meta, **kwargs)
return super().clone(meta=meta, **kwargs)
# ===============================================================
def __equiv__(self, other):
"""flat-|Cosmology| equivalence.
Use `astropy.cosmology.funcs.cosmology_equal` with
``allow_equivalent=True`` for actual checks!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance
The object to which to compare for equivalence.
Returns
-------
bool or `NotImplemented`
`True` if ``other`` is of the same class / non-flat class (e.g.
|FlatLambdaCDM| and |LambdaCDM|) has matching parameters and
parameter values.
`False` if ``other`` is of the same class but has different
parameters.
`NotImplemented` otherwise.
"""
if isinstance(other, FlatCosmologyMixin):
return super().__equiv__(other) # super gets from Cosmology
# check if `other` is the non-flat version of this class this makes the
# assumption that any further subclass of a flat cosmo keeps the same
# physics.
if not issubclass(other.__class__, self.__nonflatclass__):
return NotImplemented
# Check if have equivalent parameters and all parameters in `other`
# match those in `self`` and `other`` has no extra parameters.
params_eq = (
set(self.__all_parameters__) == set(other.__all_parameters__) # no extra
# equal
and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__parameters__
)
# flatness check
and other.is_flat
)
return params_eq
|
0ab925cc7eb7342b29bf06d120689b2ecbf9ff6e53ae70a5b58cbab4b8d11ac4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""``astropy.cosmology`` contains classes and functions for cosmological
distance measures and other cosmology-related calculations.
See the `Astropy documentation
<https://docs.astropy.org/en/latest/cosmology/index.html>`_ for more
detailed usage examples and references.
"""
from . import core, flrw, funcs, parameter, realizations, units, utils
from .core import *
from .flrw import *
from .funcs import *
from .parameter import *
from .realizations import available, default_cosmology
from .utils import *
__all__ = (
core.__all__
+ flrw.__all__ # cosmology classes
+ realizations.__all__ # instances thereof
+ ["units"]
# utils
+ funcs.__all__
+ parameter.__all__
+ utils.__all__
)
def __getattr__(name):
"""Get realizations using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in available:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
return getattr(realizations, name)
def __dir__():
"""Directory, including lazily-imported objects."""
return __all__
|
be3563a2f7012bf48ac62e33419fd4f2eade1ec892121d3683ca1713cfe78edb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
from numbers import Number
import numpy as np
from astropy.units import Quantity
from . import units as cu
__all__ = [] # nothing is publicly scoped
def vectorize_redshift_method(func=None, nin=1):
"""Vectorize a method of redshift(s).
Parameters
----------
func : callable or None
method to wrap. If `None` returns a :func:`functools.partial`
with ``nin`` loaded.
nin : int
Number of positional redshift arguments.
Returns
-------
wrapper : callable
:func:`functools.wraps` of ``func`` where the first ``nin``
arguments are converted from |Quantity| to :class:`numpy.ndarray`.
"""
# allow for pie-syntax & setting nin
if func is None:
return functools.partial(vectorize_redshift_method, nin=nin)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""
:func:`functools.wraps` of ``func`` where the first ``nin``
arguments are converted from |Quantity| to `numpy.ndarray` or scalar.
"""
# process inputs
# TODO! quantity-aware vectorization can simplify this.
zs = [
z if not isinstance(z, Quantity) else z.to_value(cu.redshift)
for z in args[:nin]
]
# scalar inputs
if all(isinstance(z, (Number, np.generic)) for z in zs):
return func(self, *zs, *args[nin:], **kwargs)
# non-scalar. use vectorized func
return wrapper.__vectorized__(self, *zs, *args[nin:], **kwargs)
wrapper.__vectorized__ = np.vectorize(func) # attach vectorized function
# TODO! use frompyfunc when can solve return type errors
return wrapper
def aszarr(z):
"""
Redshift as a `~numbers.Number` or `~numpy.ndarray` / |Quantity| / |Column|.
Allows for any ndarray ducktype by checking for attribute "shape".
"""
if isinstance(z, (Number, np.generic)): # scalars
return z
elif hasattr(z, "shape"): # ducktypes NumPy array
if hasattr(z, "unit"): # Quantity Column
return (z << cu.redshift).value # for speed only use enabled equivs
return z
# not one of the preferred types: Number / array ducktype
return Quantity(z, cu.redshift).value
|
ad728ba9366053fa37a2269fadeae79ae3a43a50cf373143211eaba89a01860f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Cosmological units and equivalencies.
""" # (newline needed for unit summary)
import astropy.units as u
from astropy.units.utils import generate_unit_summary as _generate_unit_summary
__all__ = [
"littleh",
"redshift",
# redshift equivalencies
"dimensionless_redshift",
"with_redshift",
"redshift_distance",
"redshift_hubble",
"redshift_temperature",
# other equivalencies
"with_H0",
]
__doctest_requires__ = {("with_redshift", "redshift_distance"): ["scipy"]}
_ns = globals()
###############################################################################
# Cosmological Units
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit.
redshift = u.def_unit(
["redshift"],
prefixes=False,
namespace=_ns,
doc="Cosmological redshift.",
format={"latex": r""},
)
u.def_physical_type(redshift, "redshift")
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit (see
# https://arxiv.org/pdf/1308.4150.pdf for more)
# Also note that h or h100 or h_100 would be a better name, but they either
# conflict or have numbers in them, which is disallowed
littleh = u.def_unit(
["littleh"],
namespace=_ns,
prefixes=False,
doc='Reduced/"dimensionless" Hubble constant',
format={"latex": r"h_{100}"},
)
###############################################################################
# Equivalencies
def dimensionless_redshift():
"""Allow redshift to be 1-to-1 equivalent to dimensionless.
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the redshift is raised,
and independent of whether it is part of a more complicated unit.
It is similar to u.dimensionless_angles() in this respect.
"""
return u.Equivalency([(redshift, None)], "dimensionless_redshift")
def redshift_distance(cosmology=None, kind="comoving", **atzkw):
"""Convert quantities between redshift and distance.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
kind : {'comoving', 'lookback', 'luminosity'}, optional
The distance type for the Equivalency.
Note this does NOT include the angular diameter distance as this
distance measure is not monotonic.
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`, which is used to
convert distance to redshift.
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Raises
------
`~astropy.cosmology.CosmologyError`
If the distance corresponds to a redshift that is larger than ``zmax``.
Exception
See :func:`~astropy.cosmology.z_at_value` for possible exceptions, e.g. if the
distance maps to a redshift that is larger than ``zmax``, the maximum redshift.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> d = z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving"))
>>> d # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
The reverse operation is also possible, though not always as simple. To convert a
very large distance to a redshift it might be necessary to specify a large enough
``zmax`` value. See :func:`~astropy.cosmology.z_at_value` for details.
>>> d.to(cu.redshift, cu.redshift_distance(WMAP9, kind="comoving", zmax=1200)) # doctest: +FLOAT_CMP
<Quantity 1100.000 redshift>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
allowed_kinds = ("comoving", "lookback", "luminosity")
if kind not in allowed_kinds:
raise ValueError(f"`kind` is not one of {allowed_kinds}")
method = getattr(cosmology, kind + "_distance")
def z_to_distance(z):
"""Redshift to distance."""
return method(z)
def distance_to_z(d):
"""Distance to redshift."""
return z_at_value(method, d << u.Mpc, **atzkw)
return u.Equivalency(
[(redshift, u.Mpc, z_to_distance, distance_to_z)],
"redshift_distance",
{"cosmology": cosmology, "distance": kind},
)
def redshift_hubble(cosmology=None, **atzkw):
"""Convert quantities between redshift and Hubble parameter and little-h.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and Hubble parameter and little-h unit.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_hubble(z):
"""Redshift to Hubble parameter."""
return cosmology.H(z)
def hubble_to_z(H):
"""Hubble parameter to redshift."""
return z_at_value(cosmology.H, H << (u.km / u.s / u.Mpc), **atzkw)
def z_to_littleh(z):
"""Redshift to :math:`h`-unit Quantity."""
return z_to_hubble(z).to_value(u.km / u.s / u.Mpc) / 100 * littleh
def littleh_to_z(h):
""":math:`h`-unit Quantity to redshift."""
return hubble_to_z(h * 100)
return u.Equivalency(
[
(redshift, u.km / u.s / u.Mpc, z_to_hubble, hubble_to_z),
(redshift, littleh, z_to_littleh, littleh_to_z),
],
"redshift_hubble",
{"cosmology": cosmology},
)
def redshift_temperature(cosmology=None, **atzkw):
"""Convert quantities between redshift and CMB temperature.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.K, cu.redshift_temperature(WMAP9))
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_Tcmb(z):
return cosmology.Tcmb(z)
def Tcmb_to_z(T):
return z_at_value(cosmology.Tcmb, T << u.K, **atzkw)
return u.Equivalency(
[(redshift, u.K, z_to_Tcmb, Tcmb_to_z)],
"redshift_temperature",
{"cosmology": cosmology},
)
def with_redshift(
cosmology=None, *, distance="comoving", hubble=True, Tcmb=True, atzkw=None
):
"""Convert quantities between measures of cosmological distance.
Note: by default all equivalencies are on and must be explicitly turned off.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If `None`, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only)
The type of distance equivalency to create or `None`.
Default is 'comoving'.
hubble : bool (optional, keyword-only)
Whether to create a Hubble parameter <-> redshift equivalency, using
``Cosmology.H``. Default is `True`.
Tcmb : bool (optional, keyword-only)
Whether to create a CMB temperature <-> redshift equivalency, using
``Cosmology.Tcmb``. Default is `True`.
atzkw : dict or None (optional, keyword-only)
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
With equivalencies between redshift and distance / Hubble / temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> equivalency = cu.with_redshift(WMAP9)
>>> z = 1100 * cu.redshift
Redshift to (comoving) distance:
>>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
Redshift to the Hubble parameter:
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
Redshift to CMB temperature:
>>> z.to(u.K, equivalency)
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
atzkw = atzkw if atzkw is not None else {}
equivs = [] # will append as built
# Hubble <-> Redshift
if hubble:
equivs.extend(redshift_hubble(cosmology, **atzkw))
# CMB Temperature <-> Redshift
if Tcmb:
equivs.extend(redshift_temperature(cosmology, **atzkw))
# Distance <-> Redshift, but need to choose which distance
if distance is not None:
equivs.extend(redshift_distance(cosmology, kind=distance, **atzkw))
# -----------
return u.Equivalency(
equivs,
"with_redshift",
{"cosmology": cosmology, "distance": distance, "hubble": hubble, "Tcmb": Tcmb},
)
# ===================================================================
def with_H0(H0=None):
"""
Convert between quantities with little-h and the equivalent physical units.
Parameters
----------
H0 : None or `~astropy.units.Quantity` ['frequency']
The value of the Hubble constant to assume. If a
`~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If
`None` (default), use the ``H0`` attribute from
:mod:`~astropy.cosmology.default_cosmology`.
References
----------
For an illuminating discussion on why you may or may not want to use
little-h at all, see https://arxiv.org/pdf/1308.4150.pdf
"""
if H0 is None:
from .realizations import default_cosmology
H0 = default_cosmology.get().H0
h100_val_unit = u.Unit(100 / (H0.to_value((u.km / u.s) / u.Mpc)) * littleh)
return u.Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0})
# ===================================================================
# Enable the set of default equivalencies.
# If the cosmology package is imported, this is added to the list astropy-wide.
u.add_enabled_equivalencies(dimensionless_redshift())
# =============================================================================
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
if __doc__ is not None:
__doc__ += _generate_unit_summary(_ns)
|
46ed7be7bab4a732719137d922f9c4c0e4b7c5f710d4309847042c9e7d71cf95 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import pathlib
import sys
from typing import Optional, Union
# LOCAL
from astropy.utils.data import get_pkg_data_path
from astropy.utils.decorators import deprecated
from astropy.utils.state import ScienceState
from . import _io # Ensure IO methods are registered, to read realizations # noqa: F401
from .core import Cosmology
_COSMOLOGY_DATA_DIR = pathlib.Path(
get_pkg_data_path("cosmology", "data", package="astropy")
)
available = tuple(sorted(p.stem for p in _COSMOLOGY_DATA_DIR.glob("*.ecsv")))
__all__ = ["available", "default_cosmology"] + list(available)
__doctest_requires__ = {"*": ["scipy"]}
def __getattr__(name):
"""Make specific realizations from data files with lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in available:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
cosmo = Cosmology.read(
str(_COSMOLOGY_DATA_DIR / name) + ".ecsv", format="ascii.ecsv"
)
cosmo.__doc__ = (
f"{name} instance of {cosmo.__class__.__qualname__} "
f"cosmology\n(from {cosmo.meta['reference']})"
)
# Cache in this module so `__getattr__` is only called once per `name`.
setattr(sys.modules[__name__], name, cosmo)
return cosmo
def __dir__():
"""Directory, including lazily-imported objects."""
return __all__
#########################################################################
# The science state below contains the current cosmology.
#########################################################################
class default_cosmology(ScienceState):
"""The default cosmology to use.
To change it::
>>> from astropy.cosmology import default_cosmology, WMAP7
>>> with default_cosmology.set(WMAP7):
... # WMAP7 cosmology in effect
... pass
Or, you may use a string::
>>> with default_cosmology.set('WMAP7'):
... # WMAP7 cosmology in effect
... pass
To get the default cosmology:
>>> default_cosmology.get()
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, ...
"""
_default_value = "Planck18"
_value = "Planck18"
@deprecated("5.0", alternative="get")
@classmethod
def get_cosmology_from_string(cls, arg):
"""Return a cosmology instance from a string."""
if arg == "no_default":
value = None
else:
value = cls._get_from_registry(arg)
return value
@classmethod
def validate(cls, value: Union[Cosmology, str, None]) -> Optional[Cosmology]:
"""Return a Cosmology given a value.
Parameters
----------
value : None, str, or `~astropy.cosmology.Cosmology`
Returns
-------
`~astropy.cosmology.Cosmology` instance
Raises
------
TypeError
If ``value`` is not a string or |Cosmology|.
"""
# None -> default
if value is None:
value = cls._default_value
# Parse to Cosmology. Error if cannot.
if isinstance(value, str):
# special-case one string
if value == "no_default":
value = None
else:
value = cls._get_from_registry(value)
elif not isinstance(value, Cosmology):
raise TypeError(
"default_cosmology must be a string or Cosmology instance, "
f"not {value}."
)
return value
@classmethod
def _get_from_registry(cls, name: str) -> Cosmology:
"""Get a registered Cosmology realization.
Parameters
----------
name : str
The built-in |Cosmology| realization to retrieve.
Returns
-------
`astropy.cosmology.Cosmology`
The cosmology realization of `name`.
Raises
------
ValueError
If ``name`` is a str, but not for a built-in Cosmology.
TypeError
If ``name`` is for a non-Cosmology object.
"""
try:
value = getattr(sys.modules[__name__], name)
except AttributeError:
raise ValueError(
f"Unknown cosmology {name!r}. Valid cosmologies:\n{available}"
)
if not isinstance(value, Cosmology):
raise TypeError(f"cannot find a Cosmology realization called {name}.")
return value
|
e5c3a49977e90ba98189cde5835905a6530bd2c8bc5e9a2c6e34fd8900c79a50 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.table.sorted_array import SortedArray
from astropy.table.table import Table
@pytest.fixture
def array():
# composite index
col0 = np.array([x % 2 for x in range(1, 11)])
col1 = np.array(list(range(1, 11)))
t = Table([col0, col1])
t = t[t.argsort()]
return SortedArray(t, t["col1"].copy())
@pytest.fixture
def wide_array():
# array with 100 columns
t = Table([[x] * 10 for x in np.arange(100)])
return SortedArray(t, t["col0"].copy())
def test_array_find(array):
for i in range(1, 11):
print(f"Searching for {i}")
assert array.find((i % 2, i)) == [i]
assert array.find((1, 4)) == []
def test_array_range(array):
assert np.all(array.range((0, 8), (1, 3), (True, True)) == [8, 10, 1, 3])
assert np.all(array.range((0, 8), (1, 3), (False, True)) == [10, 1, 3])
assert np.all(array.range((0, 8), (1, 3), (True, False)) == [8, 10, 1])
def test_wide_array(wide_array):
# checks for a previous bug in which the length of a
# sliced SortedArray was set to the number of columns
# instead of the number of elements in each column
first_row = wide_array[:1].data
assert np.all(first_row == Table([[x] for x in np.arange(100)]))
|
fe23664c9c15cc42b4263a8b5b37daf91ea719de2a276ded6c315a88b5c99b40 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import gc
import os
import pathlib
import pickle
import sys
from collections import OrderedDict
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import table
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import (
Column,
MaskedColumn,
QTable,
Table,
TableAttribute,
TableReplaceWarning,
)
from astropy.tests.helper import assert_follows_unicode_guidelines
from astropy.time import Time, TimeDelta
from astropy.utils.compat import NUMPY_LT_1_25
from astropy.utils.compat.optional_deps import HAS_PANDAS
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.tests.test_metadata import MetaBaseTest
from .conftest import MIXIN_COLS, MaskedTable
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, "_a"):
self._a = self._column_type(
[1, 2, 3], name="a", format="%d", meta={"aa": [0, 1, 2, 3, 4]}
)
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, "_b"):
self._b = self._column_type(
[4, 5, 6], name="b", format="%d", meta={"aa": 1}
)
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, "_c"):
self._c = self._column_type([7, 8, 9], "c")
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, "_d"):
self._d = self._column_type([7, 8, 7], "d")
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, "_obj"):
self._obj = self._column_type([1, "string", 3], "obj", dtype="O")
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures("table_types")
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t["a"][0] == 1
assert t["a"][1] == 20
assert t["a"][2] == 3
assert t["b"][0] == 4
assert t["b"][1] == 21
assert t["b"][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ("abc", "def")
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t["aa"] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t["aa"] == self.a)
assert t.colnames == ["aa"]
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t["aa"] = np.array([1, 2, 3]) * u.m
assert np.all(t["aa"] == np.array([1, 2, 3]))
assert t["aa"].unit == u.m
t["bb"] = 3 * u.m
assert np.all(t["bb"] == 3)
assert t["bb"].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t["bb"] = self.b
assert np.all(t["bb"] == self.b)
assert t.colnames == ["a", "bb"]
assert t["bb"].meta == self.b.meta
assert t["bb"].format == self.b.format
# Add another column
t["c"] = t["a"]
assert np.all(t["c"] == t["a"])
assert t.colnames == ["a", "bb", "c"]
assert t["c"].meta == t["a"].meta
assert t["c"].format == t["a"].format
# Add a multi-dimensional column
t["d"] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t["d"].shape == (3, 2, 2)
assert t["d"][0, 0, 1] == 1
# Add column from a list
t["e"] = ["hello", "the", "world"]
assert np.all(t["e"] == np.array(["hello", "the", "world"]))
# Make sure setting existing column still works
t["e"] = ["world", "hello", "the"]
assert np.all(t["e"] == np.array(["world", "hello", "the"]))
# Add a column via broadcasting
t["f"] = 10
assert np.all(t["f"] == 10)
# Add a column from a Quantity
t["g"] = np.array([1, 2, 3]) * u.m
assert np.all(t["g"].data == np.array([1, 2, 3]))
assert t["g"].unit == u.m
# Add a column from a (scalar) Quantity
t["g"] = 3 * u.m
assert np.all(t["g"].data == 3)
assert t["g"].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name="b", data=[1, 2, 3]) # unmasked
t["b"] = b
assert np.all(t["b"] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name="b", data=[1, 2, 3]) # masked
t["b"] = b
assert np.all(t["b"] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t["b"] = [1, 2]
@pytest.mark.usefixtures("table_types")
class TestEmptyData:
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", dtype=int, length=100))
assert len(t["a"]) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", dtype=int, shape=(3,), length=100))
assert len(t["a"]) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name="a", dtype=int))
assert len(t["a"]) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name="a", dtype=int, shape=(3, 4)))
assert len(t["a"]) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a")) # dtype is not specified
assert len(t["a"]) == 0
def test_scalar(self, table_types):
"""Test related to #3811 where setting empty tables to scalar values
should raise an error instead of having an error raised when accessing
the table."""
t = table_types.Table()
with pytest.raises(
TypeError, match="Empty table cannot have column set to scalar value"
):
t.add_column(0)
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t["a"] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures("table_types")
class TestNewFromColumns:
def test_simple(self, table_types):
cols = [
table_types.Column(name="a", data=[1, 2, 3]),
table_types.Column(name="b", data=[4, 5, 6], dtype=np.float32),
]
t = table_types.Table(cols)
assert np.all(t["a"].data == np.array([1, 2, 3]))
assert np.all(t["b"].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t["b"][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [
table_types.Column(
name="a", data=np.array([1, 2, 3], dtype=np.int64), dtype=np.float64
),
table_types.Column(name="b", data=np.array([4, 5, 6], dtype=np.float32)),
]
t = table_types.Table(cols)
assert np.all(t["a"] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t["b"] == np.array([4, 5, 6], dtype=np.float32))
assert type(t["a"][1]) is np.float64
assert type(t["b"][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [
table_types.Column(name="a", data=[1, 2, 3]),
table_types.Column(name="b", data=[4, 5, 6, 7]),
]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name="c")
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, "d"))
assert t.colnames == ["c", "d"]
t = table_types.Table([c, d])
assert t.colnames == ["c", "col1"]
@pytest.mark.usefixtures("table_types")
class TestReverse:
def test_reverse(self, table_types):
t = table_types.Table(
[
[1, 2, 3],
["a", "b", "cc"],
]
)
t.reverse()
assert np.all(t["col0"] == np.array([3, 2, 1]))
assert np.all(t["col1"] == np.array(["cc", "b", "a"]))
t2 = table_types.Table(t, copy=False)
assert np.all(t2["col0"] == np.array([3, 2, 1]))
assert np.all(t2["col1"] == np.array(["cc", "b", "a"]))
t2 = table_types.Table(t, copy=True)
assert np.all(t2["col0"] == np.array([3, 2, 1]))
assert np.all(t2["col1"] == np.array(["cc", "b", "a"]))
t2.sort("col0")
assert np.all(t2["col0"] == np.array([1, 2, 3]))
assert np.all(t2["col1"] == np.array(["a", "b", "cc"]))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=("x", "y"))
t.reverse()
assert np.all(t["x"] == x[::-1])
assert np.all(t["y"] == y[::-1])
def test_reverse_mixin(self):
"""Test reverse for a mixin with no item assignment, fix for #9836"""
sc = SkyCoord([1, 2], [3, 4], unit="deg")
t = Table([[2, 1], sc], names=["a", "sc"])
t.reverse()
assert np.all(t["a"] == [1, 2])
assert np.allclose(t["sc"].ra.to_value("deg"), [2, 1])
@pytest.mark.usefixtures("table_types")
class TestRound:
def test_round_int(self, table_types):
t = table_types.Table(
[
["a", "b", "c"],
[1.11, 2.3, 3.0],
[1.123456, 2.9876, 3.901],
]
)
t.round()
assert np.all(t["col0"] == ["a", "b", "c"])
assert np.all(t["col1"] == [1.0, 2.0, 3.0])
assert np.all(t["col2"] == [1.0, 3.0, 4.0])
def test_round_dict(self, table_types):
t = table_types.Table(
[
["a", "b", "c"],
[1.5, 2.5, 3.0111],
[1.123456, 2.9876, 3.901],
]
)
t.round({"col1": 0, "col2": 3})
assert np.all(t["col0"] == ["a", "b", "c"])
assert np.all(t["col1"] == [2.0, 2.0, 3.0])
assert np.all(t["col2"] == [1.123, 2.988, 3.901])
def test_round_invalid(self, table_types):
t = table_types.Table([[1, 2, 3]])
with pytest.raises(
ValueError, match="'decimals' argument must be an int or a dict"
):
t.round(0.5)
def test_round_kind(self, table_types):
for typecode in "bBhHiIlLqQpPefdgFDG": # AllInteger, AllFloat
arr = np.array([4, 16], dtype=typecode)
t = Table([arr])
col0 = t["col0"]
t.round(decimals=-1) # Round to nearest 10
assert np.all(t["col0"] == [0, 20])
assert t["col0"] is col0
@pytest.mark.usefixtures("table_types")
class TestColumnAccess:
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t["a"]
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[1, 2, 3]))
assert np.all(t["a"] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t["b"] # column does not exist
def test_itercols(self, table_types):
names = ["a", "b", "c"]
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures("table_types")
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(
table_types.Column(name="b", data=[4, 5, 6, 7])
) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name="b", data=[4, 5])) # data too short
@pytest.mark.usefixtures("table_types")
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column("b")
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.colnames == ["a", "b"]
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column("a"))
assert t.colnames == ["b", "a"]
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column("a") + 1)
assert t.colnames == ["a", "b"]
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column("a") + 1)
t.add_column(self.c, t.index_column("b"))
assert t.colnames == ["a", "c", "b"]
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column("a")
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.colnames == ["c", "a", "b"]
@pytest.mark.usefixtures("table_types")
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name="b")
t.add_column(self.b, name="a")
assert t.colnames == ["b", "a"]
# Check that we did not change the name of the input column
assert self.a.info.name == "a"
assert self.b.info.name == "b"
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t["a"], name="c")
assert t2.colnames == ["c"]
# Check that we did not change the name of the input column
assert t.colnames == ["b", "a"]
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name="c")
assert t.colnames == ["b", "a", "c"]
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.colnames == ["col0"]
@pytest.mark.usefixtures("table_types")
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols["a"], cols["b"], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols["a"])
t2b.add_column(cols["b"])
t2b.add_column(self.c)
t["a"][1] = 20
t["b"][1] = 21
for t2 in [t2a, t2b]:
t2["a"][2] = 10
t2["b"][2] = 11
t2["c"][2] = 12
t2.columns["a"].meta["aa"][3] = 10
assert np.all(t["a"] == np.array([1, 20, 3]))
assert np.all(t["b"] == np.array([4, 21, 6]))
assert np.all(t2["a"] == np.array([1, 2, 10]))
assert np.all(t2["b"] == np.array([4, 5, 11]))
assert np.all(t2["c"] == np.array([7, 8, 12]))
assert t2["a"].name == "a"
assert t2.columns["a"].meta["aa"][3] == 10
assert t.columns["a"].meta["aa"][3] == 3
@pytest.mark.usefixtures("table_types")
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ["a", "b", "c"]
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ["a", "b", "c", "d"]
assert np.all(t["c"] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ["d", "a", "c", "b"]
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ["c", "d", "a", "b"]
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ["a", "b", "c", "d"]
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=["b", "c", "a"])
assert t.colnames == ["b", "c", "a"]
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ["col0", "col1"]
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name="a", data=[0, 1, 2]))
t.add_column(
table_types.Column(name="a", data=[0, 1, 2]), rename_duplicate=True
)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ["a", "a_1", "b", "c"]
t.add_column(
table_types.Column(name="a", data=[0, 1, 2]), rename_duplicate=True
)
assert t.colnames == ["a", "a_1", "b", "c", "a_2"]
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1["a"])
t.add_column(t1["a"], rename_duplicate=True)
t1["a"][0] = 100 # Change original column
assert t.colnames == ["a", "a_1", "b", "c", "a_2", "a_3"]
assert t1.colnames == ["a"]
# Check new column didn't change (since name conflict forced a copy)
assert t["a_3"][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(
table_types.Column(name="q", data=[0, 1, 2]), rename_duplicate=True
)
assert t.colnames == ["a", "a_1", "b", "c", "a_2", "a_3", "q"]
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns(
[
table_types.Column(name="a", data=[0, 1, 2]),
table_types.Column(name="b", data=[0, 1, 2]),
]
)
t.add_columns(
[
table_types.Column(name="a", data=[0, 1, 2]),
table_types.Column(name="b", data=[0, 1, 2]),
],
rename_duplicate=True,
)
t.add_column(self.d)
assert t.colnames == ["a", "b", "c", "a_1", "b_1", "d"]
@pytest.mark.usefixtures("table_types")
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, "_b"):
self._b = self._column_type(name="b", data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, "_c"):
self._c = self._column_type(name="c", data=["7", "8", "9"])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, "_d"):
self._d = self._column_type(name="d", data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=("a", "b", "c"), dtype=("(2,)i", "S4", "O"))
t.add_row()
assert np.all(t["a"][0] == [0, 0])
assert t["b"][0] == ""
assert t["c"][0] == 0
t.add_row()
assert np.all(t["a"][1] == [0, 0])
assert t["b"][1] == ""
assert t["c"][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=("a", "b", "obj"), dtype=("(2,)i", "S8", "O"))
t.add_row([[1, 2], "hello", "world"])
assert np.all(t["a"][0] == [1, 2])
assert t["b"][0] == "hello"
assert t["obj"][0] == "world"
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t["a"][1] == [0, 0])
assert t["b"][1] == ""
assert t["obj"][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t["d"] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 1]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t["c"] == np.array(["7", "8", "9", "7"]))
assert np.all(t["d"] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 1]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t["obj"] == np.array([1, "string", 3, [10]], dtype="O"))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt["col0"] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, "1"))
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t["c"] == np.array(["7", "8", "9", "1"]))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, "10"])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t["c"] == np.array(["7", "8", "9", "10"]))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({"a": 4, "b": 7.2})
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t["c"] == np.array(["7", "8", "9", "7"]))
else:
assert np.all(t["c"] == np.array(["7", "8", "9", ""]))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t["a"].data == np.array([1, 2, 3, 0]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t["c"].data == np.array(["7", "8", "9", ""]))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({"bad_column": 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(["one", 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, "x", [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == "f":
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures("table_types")
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns["a"]
a[2] = 10
assert t["a"][2] == 10
@pytest.mark.usefixtures("table_types")
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name="b", dtype=int, shape=(2,), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t["b"].shape == (3, 2)
assert t["b"][0].shape == (2,)
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name="b", dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t["b"].shape == (3, 2, 4)
assert t["b"][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name="b", dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t["b"].shape == (3, 2, 4, 6)
assert t["b"][0].shape == (2, 4, 6)
@pytest.mark.usefixtures("table_types")
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, "_t2"):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns("a")
assert self.t.colnames == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns("a")
assert self.t.colnames == ["b"]
assert self.t.dtype.names == ("b",)
assert np.all(self.t["b"] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t["new_column"] = self.t["a"]
assert "new_column" in self.t.columns.keys()
self.t.remove_columns("new_column")
assert "new_column" not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["b"] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["a"] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t["a"].meta == {"aa": [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([("a", "int"), ("b", "int")])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["a"] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.0]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t["a"]
assert self.t.colnames == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2["b"]
assert self.t2.colnames == ["a", "c"]
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2["a", "b"]
assert self.t2.colnames == ["c"]
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t["d"]
@pytest.mark.usefixtures("table_types")
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.colnames == []
assert t.as_array().size == 0
# Regression test for gh-8640
assert not t
assert isinstance(t == None, np.ndarray)
assert (t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns("b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
assert np.all(t["b"] == np.array([4, 5, 6]))
@pytest.mark.usefixtures("table_types")
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column("a", "b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
assert np.all(t["b"] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column("a", "c")
t.rename_column("b", "a")
assert t.colnames == ["c", "a"]
assert t.dtype.names == ("c", "a")
if t.masked:
assert t.mask.dtype.names == ("c", "a")
assert np.all(t["c"] == np.array([1, 2, 3]))
assert np.all(t["a"] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t["a"].name = "c"
t["b"].name = "a"
assert t.colnames == ["c", "a"]
assert t.dtype.names == ("c", "a")
assert np.all(t["c"] == np.array([1, 2, 3]))
assert np.all(t["a"] == np.array([4, 5, 6]))
def test_rename_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
t.rename_columns(("a", "b", "c"), ("aa", "bb", "cc"))
assert t.colnames == ["aa", "bb", "cc"]
t.rename_columns(["bb", "cc"], ["b", "c"])
assert t.colnames == ["aa", "b", "c"]
with pytest.raises(TypeError):
t.rename_columns("aa", ["a"])
with pytest.raises(ValueError):
t.rename_columns(["a"], ["b", "c"])
@pytest.mark.usefixtures("table_types")
class TestSort:
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4]))
t.add_column(
table_types.Column(
name="c",
data=[
(1, 2),
(3, 4),
(4, 5),
],
)
)
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
t.sort("a")
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["b"] == np.array([5, 6, 4]))
assert np.all(
t["c"]
== np.array(
[
[3, 4],
[1, 2],
[4, 5],
]
)
)
t.sort("b")
assert np.all(t["a"] == np.array([3, 1, 2]))
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(
t["c"]
== np.array(
[
[4, 5],
[3, 4],
[1, 2],
]
)
)
@pytest.mark.parametrize("create_index", [False, True])
def test_single_reverse(self, table_types, create_index):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4]))
t.add_column(table_types.Column(name="c", data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
t.sort("a", reverse=True)
assert np.all(t["a"] == np.array([3, 2, 1]))
assert np.all(t["b"] == np.array([4, 6, 5]))
assert np.all(t["c"] == np.array([[4, 5], [1, 2], [3, 4]]))
t.sort("b", reverse=True)
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
assert np.all(t["c"] == np.array([[1, 2], [3, 4], [4, 5]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=("x", "y"))
t.sort("y")
idx = np.argsort(y)
assert np.all(t["x"] == x[idx])
assert np.all(t["y"] == y[idx])
@pytest.mark.parametrize("reverse", [True, False])
def test_empty_reverse(self, table_types, reverse):
t = table_types.Table([[], []], dtype=["f4", "U1"])
t.sort("col1", reverse=reverse)
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t["a"] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t["b"] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(["a", "b"])
assert np.all(t["a"] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t["b"] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(["b", "a"])
assert np.all(t["a"] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t["b"] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(("a", "b"))
assert np.all(t["a"] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t["b"] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t["a"] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t["b"] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(["a", "b"], reverse=True)
assert np.all(t["a"] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t["b"] == np.array([5, 4, 6, 3, 5, 4]))
t.sort(["b", "a"], reverse=True)
assert np.all(t["a"] == np.array([2, 3, 1, 3, 1, 2]))
assert np.all(t["b"] == np.array([6, 5, 5, 4, 4, 3]))
t.sort(("a", "b"), reverse=True)
assert np.all(t["a"] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t["b"] == np.array([5, 4, 6, 3, 5, 4]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(
table_types.Column(name="firstname", data=[b"Max", b"Jo", b"John"])
)
t.add_column(
table_types.Column(name="name", data=[b"Miller", b"Miller", b"Jackson"])
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
t.sort(["name", "firstname"])
assert np.all([t["firstname"] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t["name"] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t["tel"] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(
table_types.Column(
name="firstname", data=[str(x) for x in ["Max", "Jo", "John"]]
)
)
t.add_column(
table_types.Column(
name="name", data=[str(x) for x in ["Miller", "Miller", "Jackson"]]
)
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
t.sort(["name", "firstname"])
assert np.all(
[t["firstname"] == np.array([str(x) for x in ["John", "Jo", "Max"]])]
)
assert np.all(
[t["name"] == np.array([str(x) for x in ["Jackson", "Miller", "Miller"]])]
)
assert np.all([t["tel"] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort("a")
i1 = t.as_array().argsort(order=["a"])
assert np.all(t["a"][i0] == t["a"][i1])
i0 = t.argsort(["a", "b"])
i1 = t.as_array().argsort(order=["a", "b"])
assert np.all(t["a"][i0] == t["a"][i1])
assert np.all(t["b"][i0] == t["b"][i1])
@pytest.mark.parametrize("add_index", [False, True])
def test_argsort_reverse(self, table_types, add_index):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
if add_index:
t.add_index("a")
assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5]))
i0 = t.argsort("a", reverse=True)
i1 = np.array([4, 2, 3, 0, 5, 1])
assert np.all(t["a"][i0] == t["a"][i1])
i0 = t.argsort(["a", "b"], reverse=True)
i1 = np.array([4, 2, 0, 3, 1, 5])
assert np.all(t["a"][i0] == t["a"][i1])
assert np.all(t["b"][i0] == t["b"][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(
table_types.Column(name="firstname", data=[b"Max", b"Jo", b"John"])
)
t.add_column(
table_types.Column(name="name", data=[b"Miller", b"Miller", b"Jackson"])
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
assert np.all(t.argsort(["name", "firstname"]) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(
table_types.Column(
name="firstname", data=[str(x) for x in ["Max", "Jo", "John"]]
)
)
t.add_column(
table_types.Column(
name="name", data=[str(x) for x in ["Miller", "Miller", "Jackson"]]
)
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
assert np.all(t.argsort(["name", "firstname"]) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=("a",))
assert t.colnames == ["a"]
assert t.dtype.names == ("a",)
t.add_row((2,))
assert t.colnames == ["a"]
assert t.dtype.names == ("a",)
t.rename_column("a", "b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
t.sort("b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
t.rename_column("b", "c")
assert t.colnames == ["c"]
assert t.dtype.names == ("c",)
@pytest.mark.parametrize("kwargs", [{}, {"kind": "stable"}, {"kind": "quicksort"}])
def test_sort_kind(kwargs):
t = Table()
t["a"] = [2, 1, 3, 2, 3, 1]
t["b"] = [6, 5, 4, 3, 5, 4]
t_struct = t.as_array()
# Since sort calls Table.argsort this covers `kind` for both methods
t.sort(["a", "b"], **kwargs)
assert np.all(t.as_array() == np.sort(t_struct, **kwargs))
@pytest.mark.usefixtures("table_types")
class TestIterator:
def test_iterator(self, table_types):
d = np.array(
[
(2, 1),
(3, 6),
(4, 5),
],
dtype=[("a", "i4"), ("b", "i4")],
)
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0] # noqa: B015
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures("table_types")
class TestSetMeta:
def test_set_meta(self, table_types):
d = table_types.Table(names=("a", "b"))
d.meta["a"] = 1
d.meta["b"] = 1
d.meta["c"] = 1
d.meta["d"] = 1
assert list(d.meta.keys()) == ["a", "b", "c", "d"]
@pytest.mark.usefixtures("table_types")
class TestConvertNumpyArray:
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[("c", "i8"), ("d", "i8")])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = (">", "<")
native_order = byte_orders[sys.byteorder == "little"]
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name="a", dtype=order + "f8")
t = table_types.Table([col])
arr = t.as_array()
assert arr["a"].dtype.byteorder in (native_order, "=")
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr["a"].dtype.byteorder in (order, "=")
else:
assert arr["a"].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = (">", "<")[sys.byteorder != "little"]
filename = get_pkg_data_filename("data/tb.fits", "astropy.io.fits.tests")
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert data[colname].dtype.byteorder == arr2[colname].dtype.byteorder
def test_convert_numpy_object_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
# Single table
np_d = np.array(d, dtype=object)
assert isinstance(np_d, np.ndarray)
assert np_d[()] is d
def test_convert_list_numpy_object_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
ds = [d, d, d]
np_ds = np.array(ds, dtype=object)
assert all(isinstance(t, table_types.Table) for t in np_ds)
assert all(np.array_equal(t, d) for t in np_ds)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table(
[[1, 2, 3], [2, 3, 4]], names=["x", "y"], masked=True, meta={"name": "test"}
)
t["x"].mask = [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2 # noqa: B015
with pytest.raises(TypeError):
t < 1.1 # noqa: B015
with pytest.raises(TypeError):
t >= 5.5 # noqa: B015
with pytest.raises(TypeError):
t <= -1.1 # noqa: B015
def test_values_equal_part1():
col1 = [1, 2]
col2 = [1.0, 2.0]
col3 = ["a", "b"]
t1 = table.Table([col1, col2, col3], names=["a", "b", "c"])
t2 = table.Table([col1, col2], names=["a", "b"])
t3 = table.table_helpers.simple_table()
tm = t1.copy()
tm["time"] = Time([1, 2], format="cxcsec")
tm1 = tm.copy()
tm1["time"][0] = np.ma.masked
tq = table.table_helpers.simple_table()
tq["quantity"] = [1.0, 2.0, 3.0] * u.m
tsk = table.table_helpers.simple_table()
tsk["sk"] = SkyCoord(1, 2, unit="deg")
eqsk = tsk.values_equal(tsk)
for col in eqsk.itercols():
assert np.all(col)
with pytest.raises(
ValueError, match="cannot compare tables with different column names"
):
t2.values_equal(t1)
with pytest.raises(ValueError, match="unable to compare column a"):
# Shape mismatch
t3.values_equal(t1)
if NUMPY_LT_1_25:
with pytest.raises(ValueError, match="unable to compare column c"):
# Type mismatch in column c causes FutureWarning
t1.values_equal(2)
with pytest.raises(ValueError, match="unable to compare column c"):
t1.values_equal([1, 2])
else:
eq = t2.values_equal(2)
for col in eq.colnames:
assert np.all(eq[col] == [False, True])
eq = t2.values_equal([1, 2])
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq = t2.values_equal(t2)
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq1 = tm1.values_equal(tm)
for col in eq1.colnames:
assert np.all(eq1[col] == [True, True])
eq2 = tq.values_equal(tq)
for col in eq2.colnames:
assert np.all(eq2[col] == [True, True, True])
eq3 = t2.values_equal(2)
for col in eq3.colnames:
assert np.all(eq3[col] == [False, True])
eq4 = t2.values_equal([1, 2])
for col in eq4.colnames:
assert np.all(eq4[col] == [True, True])
# Compare table to its first row
t = table.Table(rows=[(1, "a"), (1, "b")])
eq = t.values_equal(t[0])
assert np.all(eq["col0"] == [True, True])
assert np.all(eq["col1"] == [True, False])
def test_rows_equal():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all(
(t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
assert np.all(
(t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
def test_equality_masked():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask["a"][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all(
(t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
t = table.Table(t, masked=True)
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
assert np.all(
(t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance("", bytes):
return
# Define unicode literals
string_a = "астрономическая питона"
string_b = "миллиарды световых лет"
a = table.Table([[string_a, 2], [string_b, 3]], names=("a", "b"))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode("utf-8") in bytes(a)
def test_unicode_policy():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize("uni", ["питона", "ascii"])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. This
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode("utf-8")
t = table_types.Table([[byt], [uni], [1]], dtype=("S", "U", "i"))
assert t["col0"].dtype.kind == "S"
assert t["col1"].dtype.kind == "U"
assert t["col2"].dtype.kind == "i"
t["col0"].description = "col0"
t["col1"].description = "col1"
t["col0"].meta["val"] = "val0"
t["col1"].meta["val"] = "val1"
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1["col0"].dtype.kind == "S"
assert t1["col1"].dtype.kind == "S"
assert t1["col2"].dtype.kind == "i"
# Meta made it through
assert t1["col0"].description == "col0"
assert t1["col1"].description == "col1"
assert t1["col0"].meta["val"] == "val0"
assert t1["col1"].meta["val"] == "val1"
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1["col0"])[0] == byt
assert np.array(t1["col1"])[0] == byt
assert np.array(t1["col2"])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1["col0"].dtype.kind == "U"
assert t1["col1"].dtype.kind == "U"
assert t1["col2"].dtype.kind == "i"
# Meta made it through
assert t1["col0"].description == "col0"
assert t1["col1"].description == "col1"
assert t1["col0"].meta["val"] == "val0"
assert t1["col1"].meta["val"] == "val1"
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1["col0"])[0] == uni
assert np.array(t1["col1"])[0] == uni
assert np.array(t1["col2"])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({"a": [1, 2, 3]})
the_id = id(t)
assert t["a"].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=["a"])
out = []
for r1 in t:
for r2 in t:
out.append((r1["a"], r2["a"]))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif(not HAS_PANDAS, reason="requires pandas")
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ["<", ">", "="]:
for kind in ["f", "i"]:
for byte in ["2", "4", "8"]:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x.newbyteorder(endian)
t["u"] = ["a", "b", "c"]
t["s"] = ["a", "b", "c"]
d = t.to_pandas()
for column in t.columns:
if column == "u":
assert np.all(t["u"] == np.array(["a", "b", "c"]))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == "s":
assert np.all(t["s"] == np.array(["a", "b", "c"]))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.isnative:
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
# Explicitly testing little/big/native endian separately -
# regression for a case in astropy/astropy#11286 not caught by #3729.
d[["<i4", ">i4"]]
d[["<f4", ">f4"]]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ("u", "s"):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.isnative:
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
@pytest.mark.parametrize("unsigned", ["u", ""])
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_nullable_int(self, unsigned, bits):
np_dtype = f"{unsigned}int{bits}"
c = MaskedColumn([1, 2], mask=[False, True], dtype=np_dtype)
t = Table([c])
df = t.to_pandas()
pd_dtype = np_dtype.replace("i", "I").replace("u", "U")
assert str(df["col0"].dtype) == pd_dtype
t2 = Table.from_pandas(df)
assert str(t2["col0"].dtype) == np_dtype
assert np.all(t2["col0"].mask == [False, True])
assert np.all(t2["col0"] == c)
def test_2d(self):
t = table.Table()
t["a"] = [1, 2, 3]
t["b"] = np.ones((3, 2))
with pytest.raises(
ValueError, match="Cannot convert a table with multidimensional columns"
):
t.to_pandas()
def test_mixin_pandas(self):
t = table.QTable()
for name in sorted(MIXIN_COLS):
if not name.startswith("ndarray"):
t[name] = MIXIN_COLS[name]
t["dt"] = TimeDelta([0, 2, 4, 6], format="sec")
tp = t.to_pandas()
t2 = table.Table.from_pandas(tp)
assert np.allclose(t2["quantity"], [0, 1, 2, 3])
assert np.allclose(t2["longitude"], [0.0, 1.0, 5.0, 6.0])
assert np.allclose(t2["latitude"], [5.0, 6.0, 10.0, 11.0])
assert np.allclose(t2["skycoord.ra"], [0, 1, 2, 3])
assert np.allclose(t2["skycoord.dec"], [0, 1, 2, 3])
assert np.allclose(t2["arraywrap"], [0, 1, 2, 3])
assert np.allclose(t2["arrayswap"], [0, 1, 2, 3])
assert np.allclose(
t2["earthlocation.y"], [0, 110708, 547501, 654527], rtol=0, atol=1
)
# For pandas, Time, TimeDelta are the mixins that round-trip the class
assert isinstance(t2["time"], Time)
assert np.allclose(t2["time"].jyear, [2000, 2001, 2002, 2003])
assert np.all(
t2["time"].isot
== [
"2000-01-01T12:00:00.000",
"2000-12-31T18:00:00.000",
"2002-01-01T00:00:00.000",
"2003-01-01T06:00:00.000",
]
)
assert t2["time"].format == "isot"
# TimeDelta
assert isinstance(t2["dt"], TimeDelta)
assert np.allclose(t2["dt"].value, [0, 2, 4, 6])
assert t2["dt"].format == "sec"
@pytest.mark.parametrize("use_IndexedTable", [False, True])
def test_to_pandas_index(self, use_IndexedTable):
"""Test to_pandas() with different indexing options.
This also tests the fix for #12014. The exception seen there is
reproduced here without the fix.
"""
import pandas as pd
class IndexedTable(table.QTable):
"""Always index the first column"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_index(self.colnames[0])
row_index = pd.RangeIndex(0, 2, 1)
tm_index = pd.DatetimeIndex(
["1998-01-01", "2002-01-01"], dtype="datetime64[ns]", name="tm", freq=None
)
tm = Time([1998, 2002], format="jyear")
x = [1, 2]
table_cls = IndexedTable if use_IndexedTable else table.QTable
t = table_cls([tm, x], names=["tm", "x"])
tp = t.to_pandas()
if not use_IndexedTable:
assert np.all(tp.index == row_index)
tp = t.to_pandas(index="tm")
assert np.all(tp.index == tm_index)
t.add_index("tm")
tp = t.to_pandas()
assert np.all(tp.index == tm_index)
# Make sure writing to pandas didn't hack the original table
assert t["tm"].info.indices
tp = t.to_pandas(index=True)
assert np.all(tp.index == tm_index)
tp = t.to_pandas(index=False)
assert np.all(tp.index == row_index)
with pytest.raises(ValueError) as err:
t.to_pandas(index="not a column")
assert "index must be None, False" in str(err.value)
def test_mixin_pandas_masked(self):
tm = Time([1, 2, 3], format="cxcsec")
dt = TimeDelta([1, 2, 3], format="sec")
tm[1] = np.ma.masked
dt[1] = np.ma.masked
t = table.QTable([tm, dt], names=["tm", "dt"])
tp = t.to_pandas()
assert np.all(tp["tm"].isnull() == [False, True, False])
assert np.all(tp["dt"].isnull() == [False, True, False])
t2 = table.Table.from_pandas(tp)
assert np.all(t2["tm"].mask == tm.mask)
assert np.ma.allclose(t2["tm"].jd, tm.jd, rtol=1e-14, atol=1e-14)
assert np.all(t2["dt"].mask == dt.mask)
assert np.ma.allclose(t2["dt"].jd, dt.jd, rtol=1e-14, atol=1e-14)
def test_from_pandas_index(self):
tm = Time([1998, 2002], format="jyear")
x = [1, 2]
t = table.Table([tm, x], names=["tm", "x"])
tp = t.to_pandas(index="tm")
t2 = table.Table.from_pandas(tp)
assert t2.colnames == ["x"]
t2 = table.Table.from_pandas(tp, index=True)
assert t2.colnames == ["tm", "x"]
assert np.allclose(t2["tm"].jyear, tm.jyear)
@pytest.mark.parametrize("use_nullable_int", [True, False])
def test_masking(self, use_nullable_int):
t = table.Table(masked=True)
t["a"] = [1, 2, 3]
t["a"].mask = [True, False, True]
t["b"] = [1.0, 2.0, 3.0]
t["b"].mask = [False, False, True]
t["u"] = ["a", "b", "c"]
t["u"].mask = [False, True, False]
t["s"] = ["a", "b", "c"]
t["s"].mask = [False, True, False]
# https://github.com/astropy/astropy/issues/7741
t["Source"] = [2584290278794471936, 2584290038276303744, 2584288728310999296]
t["Source"].mask = [False, False, False]
if use_nullable_int: # Default
# No warning with the default use_nullable_int=True
d = t.to_pandas(use_nullable_int=use_nullable_int)
else:
import pandas
from packaging.version import Version
PANDAS_LT_2_0 = Version(pandas.__version__) < Version("2.0dev")
if PANDAS_LT_2_0:
with pytest.warns(
TableReplaceWarning,
match=r"converted column 'a' from int(32|64) to float64",
):
d = t.to_pandas(use_nullable_int=use_nullable_int)
else:
from pandas.core.dtypes.cast import IntCastingNaNError
with pytest.raises(
IntCastingNaNError,
match=r"Cannot convert non-finite values \(NA or inf\) to integer",
):
d = t.to_pandas(use_nullable_int=use_nullable_int)
return # Do not continue
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
if hasattr(t2[name], "mask"):
assert np.all(column.mask == t2[name].mask)
if column.dtype.kind == "i":
if np.any(column.mask) and not use_nullable_int:
assert t2[name].dtype.kind == "f"
else:
assert t2[name].dtype.kind == "i"
# This warning pops up when use_nullable_int is False
# for pandas 1.5.2.
with np.errstate(invalid="ignore"):
assert_array_equal(column.data, t2[name].data.astype(column.dtype))
else:
if column.dtype.byteorder in ("=", "|"):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
def test_units(self):
import pandas as pd
import astropy.units as u
df = pd.DataFrame({"x": [1, 2, 3], "t": [1.3, 1.2, 1.8]})
t = table.Table.from_pandas(df, units={"x": u.m, "t": u.s})
assert t["x"].unit == u.m
assert t["t"].unit == u.s
# test error if not a mapping
with pytest.raises(TypeError):
table.Table.from_pandas(df, units=[u.m, u.s])
# test warning is raised if additional columns in units dict
with pytest.warns(UserWarning) as record:
table.Table.from_pandas(df, units={"x": u.m, "t": u.s, "y": u.m})
assert len(record) == 1
assert "{'y'}" in record[0].message.args[0]
def test_to_pandas_masked_int_data_with__index(self):
data = {"data": [0, 1, 2], "index": [10, 11, 12]}
t = table.Table(data=data, masked=True)
t.add_index("index")
t["data"].mask = [1, 1, 0]
df = t.to_pandas()
assert df["data"].iloc[-1] == 2
@pytest.mark.usefixtures("table_types")
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(
ValueError,
match=r"Cannot replace column 'a'. Use " "Table.replace_column.. instead.",
):
t.columns["a"] = [1, 2, 3]
with pytest.raises(
ValueError, match=r"column name not there is not in the table"
):
t.replace_column("not there", [1, 2, 3])
with pytest.raises(
ValueError, match=r"length of new column must match table length"
):
t.replace_column("a", [1, 2])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t["a"]
tb = t["b"]
vals = [1.2, 3.4, 5.6]
for col in (
vals,
table_types.Column(vals),
table_types.Column(vals, name="a"),
table_types.Column(vals, name="b"),
):
t.replace_column("a", col)
assert np.all(t["a"] == vals)
assert t["a"] is not ta # New a column
assert t["b"] is tb # Original b column unchanged
assert t.colnames == ["a", "b"]
assert t["a"].meta == {}
assert t["a"].format is None
# Special case: replacing the only column can resize table
del t["b"]
assert len(t) == 3
t["a"] = [1, 2]
assert len(t) == 2
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index("a")
with pytest.raises(ValueError) as err:
t.replace_column("a", [1, 2, 3])
assert err.value.args[0] == "cannot replace a table index column"
def test_replace_column_no_copy(self):
t = Table([[1, 2], [3, 4]], names=["a", "b"])
a = np.array([1.5, 2.5])
t.replace_column("a", a, copy=False)
assert t["a"][0] == a[0]
t["a"][0] = 10
assert t["a"][0] == a[0]
class TestQTableColumnConversionCornerCases:
def test_replace_with_masked_col_with_units_in_qtable(self):
"""This is a small regression from #8902"""
t = QTable([[1, 2], [3, 4]], names=["a", "b"])
t["a"] = MaskedColumn([5, 6], unit="m")
assert isinstance(t["a"], u.Quantity)
def test_do_not_replace_string_column_with_units_in_qtable(self):
t = QTable([[1 * u.m]])
with pytest.warns(AstropyUserWarning, match="convert it to Quantity failed"):
t["a"] = Column(["a"], unit=u.m)
assert isinstance(t["a"], Column)
class Test__Astropy_Table__:
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3], [4, 5, 6], [7, 8, 9] * u.m]
self.names = ["a", "b", "c"]
self.meta = OrderedDict([("a", 1), ("b", 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = "c"
cols = [table.Column(a, name="a"), table.MaskedColumn(b, name="b"), c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.Column
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta="extra!")
assert t.colnames == ["a", "b", "c"]
assert t.meta == {"extra_meta": "extra!"}
assert np.all(t["a"] == st.columns[0])
assert np.all(t["b"] == st.columns[1])
vals = t["c"].value if table_cls is table.QTable else t["c"]
assert np.all(st.columns[2].value == vals)
assert isinstance(t["a"], table.Column)
assert isinstance(t["b"], table.MaskedColumn)
assert isinstance(t["c"], col_c_class)
assert t["c"].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t["a"][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ["a", "b", "c"]
meta = OrderedDict([("c", 3)])
t = table.Table(st, dtype=dtypes, names=names, meta=meta)
assert t.colnames == names
assert all(
col.dtype.type is dtype for col, dtype in zip(t.columns.values(), dtypes)
)
# The supplied meta is overrides the existing meta. Changed in astropy 3.2.
assert t.meta != st.meta
assert t.meta == meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta="extra!")
assert "__init__() got unexpected keyword argument" in str(err.value)
class TestUpdate:
def _setup(self):
self.a = Column((1, 2, 3), name="a")
self.b = Column((4, 5, 6), name="b")
self.c = Column((7, 8, 9), name="c")
self.d = Column((10, 11, 12), name="d")
def test_different_lengths(self):
self._setup()
t1 = Table([self.a])
t2 = Table([self.b[:-1]])
msg = "Inconsistent data column lengths"
with pytest.raises(ValueError, match=msg):
t1.update(t2)
# If update didn't succeed then t1 and t2 should not have changed.
assert t1.colnames == ["a"]
assert np.all(t1["a"] == self.a)
assert t2.colnames == ["b"]
assert np.all(t2["b"] == self.b[:-1])
def test_invalid_inputs(self):
# If input is invalid then nothing should be modified.
self._setup()
t = Table([self.a])
d = {"b": self.b, "c": [0]}
msg = "Inconsistent data column lengths: {1, 3}"
with pytest.raises(ValueError, match=msg):
t.update(d)
assert t.colnames == ["a"]
assert np.all(t["a"] == self.a)
assert d == {"b": self.b, "c": [0]}
def test_metadata_conflict(self):
self._setup()
t1 = Table([self.a], meta={"a": 0, "b": [0], "c": True})
t2 = Table([self.b], meta={"a": 1, "b": [1]})
t2meta = copy.deepcopy(t2.meta)
t1.update(t2)
assert t1.meta == {"a": 1, "b": [0, 1], "c": True}
# t2 metadata should not have changed.
assert t2.meta == t2meta
def test_update(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
t2["b"] += 1
t1.update(t2)
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b + 1)
assert np.all(t1["c"] == self.c)
# t2 should not have changed.
assert t2.colnames == ["b", "c"]
assert np.all(t2["b"] == self.b + 1)
assert np.all(t2["c"] == self.c)
d = {"b": list(self.b), "d": list(self.d)}
dc = copy.deepcopy(d)
t2.update(d)
assert t2.colnames == ["b", "c", "d"]
assert np.all(t2["b"] == self.b)
assert np.all(t2["c"] == self.c)
assert np.all(t2["d"] == self.d)
# d should not have changed.
assert d == dc
# Columns were copied, so changing t2 shouldn't have affected t1.
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b + 1)
assert np.all(t1["c"] == self.c)
def test_update_without_copy(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
t1.update(t2, copy=False)
t2["b"] -= 1
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b - 1)
assert np.all(t1["c"] == self.c)
d = {"b": np.array(self.b), "d": np.array(self.d)}
t2.update(d, copy=False)
d["b"] *= 2
assert t2.colnames == ["b", "c", "d"]
assert np.all(t2["b"] == 2 * self.b)
assert np.all(t2["c"] == self.c)
assert np.all(t2["d"] == self.d)
def test_merge_operator(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
with pytest.raises(TypeError):
_ = 1 | t1
with pytest.raises(TypeError):
_ = t1 | 1
t1_copy = t1.copy(True)
t3 = t1 | t2
assert t1.colnames == ["a", "b"] # t1 should remain unchanged
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b)
t1_copy.update(t2)
assert t3.colnames == ["a", "b", "c"]
assert np.all(t3["a"] == t1_copy["a"])
assert np.all(t3["b"] == t1_copy["b"])
assert np.all(t3["c"] == t1_copy["c"])
def test_update_operator(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
with pytest.raises(ValueError):
t1 |= 1
t1_copy = t1.copy(True)
t1 |= t2
t1_copy.update(t2)
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == t1_copy["a"])
assert np.all(t1["b"] == t1_copy["b"])
assert np.all(t1["c"] == t1_copy["c"])
def test_table_meta_copy():
"""
Test no copy vs light (key) copy vs deep copy of table meta for different
situations. #8404.
"""
t = table.Table([[1]])
meta = {1: [1, 2]}
# Assigning meta directly implies using direct object reference
t.meta = meta
assert t.meta is meta
# Table slice implies key copy, so values are unchanged
t2 = t[:]
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the list same object
# Table init with copy=False implies key copy
t2 = table.Table(t, copy=False)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the same list object
# Table init with copy=True implies deep copy
t2 = table.Table(t, copy=True)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object
def test_table_meta_copy_with_meta_arg():
"""
Test no copy vs light (key) copy vs deep copy of table meta when meta is
supplied as a table init argument. #8404.
"""
meta = {1: [1, 2]}
meta2 = {2: [3, 4]}
t = table.Table([[1]], meta=meta, copy=False)
assert t.meta is meta
t = table.Table([[1]], meta=meta) # default copy=True
assert t.meta is not meta
assert t.meta == meta
# Test initializing from existing table with meta with copy=False
t2 = table.Table(t, meta=meta2, copy=False)
assert t2.meta is meta2
assert t2.meta != t.meta # Change behavior in #8404
# Test initializing from existing table with meta with default copy=True
t2 = table.Table(t, meta=meta2)
assert t2.meta is not meta2
assert t2.meta != t.meta # Change behavior in #8404
# Table init with copy=True and empty dict meta gets that empty dict
t2 = table.Table(t, copy=True, meta={})
assert t2.meta == {}
# Table init with copy=True and kwarg meta=None gets the original table dict.
# This is a somewhat ambiguous case because it could be interpreted as the
# user wanting NO meta set on the output. This could be implemented by inspecting
# call args.
t2 = table.Table(t, copy=True, meta=None)
assert t2.meta == t.meta
# Test initializing empty table with meta with copy=False
t = table.Table(meta=meta, copy=False)
assert t.meta is meta
assert t.meta[1] is meta[1]
# Test initializing empty table with meta with default copy=True (deepcopy meta)
t = table.Table(meta=meta)
assert t.meta is not meta
assert t.meta == meta
assert t.meta[1] is not meta[1]
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=["a", "b"])
ta = t["a"]
tb = t["b"]
ta.info.meta = {"aa": [0, 1, 2, 3, 4]}
ta.info.format = "%f"
t.replace_column("a", a.to("cm"))
assert np.all(t["a"] == ta)
assert t["a"] is not ta # New a column
assert t["b"] is tb # Original b column unchanged
assert t.colnames == ["a", "b"]
assert t["a"].info.meta is None
assert t["a"].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=["a", "b"])
assert isinstance(t["a"], u.Quantity)
# Inplace update
ta = t["a"]
t["a"] = 5 * u.m
assert np.all(t["a"] == [5, 5] * u.m)
assert t["a"] is ta
# Replace
t["a"] = [5, 6]
assert np.all(t["a"] == [5, 6])
assert isinstance(t["a"], table.Column)
assert t["a"] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]):
t["a"] = 0 # in-place update
t["a"] = [10, 20, 30] # replace column
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]):
t2 = t[:2]
t2["a"] = 0 # in-place slice update
assert np.all(t["a"] == [0, 0, 3])
with pytest.warns(
TableReplaceWarning,
match="replaced column 'a' which looks like an array slice",
) as w:
t2["a"] = [10, 20] # replace slice
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
t["a"].unit = "m"
with pytest.warns(
TableReplaceWarning,
match=r"replaced column 'a' " r"and column attributes \['unit'\]",
) as w:
with table.conf.set_temp(
"replace_warnings", ["refcount", "attributes", "slice"]
):
t["a"] = [10, 20, 30]
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
ta = t["a"] # Generate an extra reference to original column
with pytest.warns(
TableReplaceWarning, match="replaced column 'a' and the number of references"
) as w:
with table.conf.set_temp(
"replace_warnings", ["refcount", "attributes", "slice"]
):
t["a"] = [10, 20, 30]
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
from inspect import currentframe, getframeinfo
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["always"]):
t["a"] = 0 # in-place slice update
with pytest.warns(TableReplaceWarning, match="replaced column 'a'") as w:
frameinfo = getframeinfo(currentframe())
t["a"] = [10, 20, 30] # replace column
assert len(w) == 1
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert "test_table" in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
ta = t["a"]
t["a"].unit = "m"
with table.conf.set_temp("replace_inplace", True):
with table.conf.set_temp(
"replace_warnings", ["always", "refcount", "attributes", "slice"]
):
t["a"] = 0 # in-place update
assert ta is t["a"]
t["a"] = [10, 20, 30] # normally replaces column, but not now
assert ta is t["a"]
assert np.all(t["a"] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=("a", "b"))
t.add_index("a")
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] == "a"
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
"""Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails."""
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
def test_create_table_from_final_row():
"""Regression test for issue #8422: passing the last row of a table into
Table should return a new table containing that row."""
t1 = table.Table([(1, 2)], names=["col"])
row = t1[-1]
t2 = table.Table(row)["col"]
assert t2[0] == 2
def test_key_values_in_as_array():
# Test for checking column slicing using key_values in Table.as_array()
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
# Creating a table with three columns
t1 = table.Table(
rows=data_rows,
names=("a", "b", "c"),
meta={"name": "first table"},
dtype=("i4", "f8", "S1"),
)
# Values of sliced column a,b is stored in a numpy array
a = np.array([(1, 2.0), (4, 5.0), (5, 8.2)], dtype=[("a", "<i4"), ("b", "<f8")])
# Values for sliced column c is stored in a numpy array
b = np.array([(b"x",), (b"y",), (b"z",)], dtype=[("c", "S1")])
# Comparing initialised array with sliced array using Table.as_array()
assert np.array_equal(a, t1.as_array(names=["a", "b"]))
assert np.array_equal(b, t1.as_array(names=["c"]))
def test_tolist():
t = table.Table(
[[1, 2, 3], [1.1, 2.2, 3.3], [b"foo", b"bar", b"hello"]], names=("a", "b", "c")
)
assert t["a"].tolist() == [1, 2, 3]
assert_array_equal(t["b"].tolist(), [1.1, 2.2, 3.3])
assert t["c"].tolist() == ["foo", "bar", "hello"]
assert isinstance(t["a"].tolist()[0], int)
assert isinstance(t["b"].tolist()[0], float)
assert isinstance(t["c"].tolist()[0], str)
t = table.Table(
[[[1, 2], [3, 4]], [[b"foo", b"bar"], [b"hello", b"world"]]], names=("a", "c")
)
assert t["a"].tolist() == [[1, 2], [3, 4]]
assert t["c"].tolist() == [["foo", "bar"], ["hello", "world"]]
assert isinstance(t["a"].tolist()[0][0], int)
assert isinstance(t["c"].tolist()[0][0], str)
class MyTable(Table):
foo = TableAttribute()
bar = TableAttribute(default=[])
baz = TableAttribute(default=1)
def test_table_attribute():
assert repr(MyTable.baz) == "<TableAttribute name=baz default=1>"
t = MyTable([[1, 2]])
# __attributes__ created on the fly on the first access of an attribute
# that has a non-None default.
assert "__attributes__" not in t.meta
assert t.foo is None
assert "__attributes__" not in t.meta
assert t.baz == 1
assert "__attributes__" in t.meta
t.bar.append(2.0)
assert t.bar == [2.0]
assert t.baz == 1
t.baz = "baz"
assert t.baz == "baz"
# Table attributes round-trip through pickle
tp = pickle.loads(pickle.dumps(t))
assert tp.foo is None
assert tp.baz == "baz"
assert tp.bar == [2.0]
# Allow initialization of attributes in table creation, with / without data
for data in None, [[1, 2]]:
t2 = MyTable(data, foo=3, bar="bar", baz="baz")
assert t2.foo == 3
assert t2.bar == "bar"
assert t2.baz == "baz"
# Initializing from an existing MyTable works, with and without kwarg attrs
t3 = MyTable(t2)
assert t3.foo == 3
assert t3.bar == "bar"
assert t3.baz == "baz"
t3 = MyTable(t2, foo=5, bar="fubar")
assert t3.foo == 5
assert t3.bar == "fubar"
assert t3.baz == "baz"
# Deleting attributes removes it from attributes
del t.baz
assert "baz" not in t.meta["__attributes__"]
del t.bar
assert "__attributes__" not in t.meta
def test_table_attribute_ecsv():
# Table attribute round-trip through ECSV
t = MyTable([[1, 2]], bar=[2.0], baz="baz")
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = MyTable.read(out.getvalue(), format="ascii.ecsv")
assert t2.foo is None
assert t2.bar == [2.0]
assert t2.baz == "baz"
def test_table_attribute_fail():
# Code raises ValueError(f'{attr} not allowed as TableAttribute') but in this
# context it gets re-raised as a RuntimeError during class definition.
with pytest.raises(RuntimeError, match="Error calling __set_name__"):
class MyTable2(Table):
descriptions = TableAttribute() # Conflicts with init arg
with pytest.raises(RuntimeError, match="Error calling __set_name__"):
class MyTable3(Table):
colnames = TableAttribute() # Conflicts with built-in property
def test_set_units_fail():
dat = [[1.0, 2.0], ["aa", "bb"]]
with pytest.raises(
ValueError, match="sequence of unit values must match number of columns"
):
Table(dat, units=[u.m])
with pytest.raises(
ValueError, match="invalid column name c for setting unit attribute"
):
Table(dat, units={"c": u.m})
def test_set_units():
dat = [[1.0, 2.0], ["aa", "bb"], [3, 4]]
exp_units = (u.m, None, None)
for cls in Table, QTable:
for units in ({"a": u.m, "c": ""}, exp_units):
qt = cls(dat, units=units, names=["a", "b", "c"])
if cls is QTable:
assert isinstance(qt["a"], u.Quantity)
assert isinstance(qt["b"], table.Column)
assert isinstance(qt["c"], table.Column)
for col, unit in zip(qt.itercols(), exp_units):
assert col.info.unit is unit
def test_set_descriptions():
dat = [[1.0, 2.0], ["aa", "bb"]]
exp_descriptions = ("my description", None)
for cls in Table, QTable:
for descriptions in ({"a": "my description"}, exp_descriptions):
qt = cls(dat, descriptions=descriptions, names=["a", "b"])
for col, description in zip(qt.itercols(), exp_descriptions):
assert col.info.description == description
def test_set_units_from_row():
text = ["a,b", ",s", "1,2", "3,4"]
units = Table.read(text, format="ascii", data_start=1, data_end=2)[0]
t = Table.read(text, format="ascii", data_start=2, units=units)
assert isinstance(units, table.Row)
assert t["a"].info.unit is None
assert t["b"].info.unit is u.s
def test_set_units_descriptions_read():
"""Test setting units and descriptions via Table.read. The test here
is less comprehensive because the implementation is exactly the same
as for Table.__init__ (calling Table._set_column_attribute)"""
for cls in Table, QTable:
t = cls.read(
["a b", "1 2"],
format="ascii",
units=[u.m, u.s],
descriptions=["hi", "there"],
)
assert t["a"].info.unit is u.m
assert t["b"].info.unit is u.s
assert t["a"].info.description == "hi"
assert t["b"].info.description == "there"
def test_broadcasting_8933():
"""Explicitly check re-work of code related to broadcasting in #8933"""
t = table.Table([[1, 2]]) # Length=2 table
t["a"] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1
t["b"] = 5
t["c"] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail)
assert np.all(t["a"] == [[3, 4], [3, 4]])
assert np.all(t["b"] == [5, 5])
assert np.all(t["c"] == [1, 1])
# Test that broadcasted column is writeable
t["c"][1] = 10
assert np.all(t["c"] == [1, 10])
def test_custom_masked_column_in_nonmasked_table():
"""Test the refactor and change in column upgrades introduced
in 95902650f. This fixes a regression introduced by #8789
(Change behavior of Table regarding masked columns)."""
class MyMaskedColumn(table.MaskedColumn):
pass
class MySubMaskedColumn(MyMaskedColumn):
pass
class MyColumn(table.Column):
pass
class MySubColumn(MyColumn):
pass
class MyTable(table.Table):
Column = MyColumn
MaskedColumn = MyMaskedColumn
a = table.Column([1])
b = table.MaskedColumn([2], mask=[True])
c = MyMaskedColumn([3], mask=[True])
d = MySubColumn([4])
e = MySubMaskedColumn([5], mask=[True])
# Two different pathways for making table
t1 = MyTable([a, b, c, d, e], names=["a", "b", "c", "d", "e"])
t2 = MyTable()
t2["a"] = a
t2["b"] = b
t2["c"] = c
t2["d"] = d
t2["e"] = e
for t in (t1, t2):
assert type(t["a"]) is MyColumn
assert type(t["b"]) is MyMaskedColumn # upgrade
assert type(t["c"]) is MyMaskedColumn
assert type(t["d"]) is MySubColumn
assert type(t["e"]) is MySubMaskedColumn # sub-class not downgraded
def test_sort_with_mutable_skycoord():
"""Test sorting a table that has a mutable column such as SkyCoord.
In this case the sort is done in-place
"""
t = Table([[2, 1], SkyCoord([4, 3], [6, 5], unit="deg,deg")], names=["a", "sc"])
meta = {"a": [1, 2]}
ta = t["a"]
tsc = t["sc"]
t["sc"].info.meta = meta
t.sort("a")
assert np.all(t["a"] == [1, 2])
assert np.allclose(t["sc"].ra.to_value(u.deg), [3, 4])
assert np.allclose(t["sc"].dec.to_value(u.deg), [5, 6])
assert t["a"] is ta
assert t["sc"] is tsc
# Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1
# it is a reference.
t["sc"].info.meta["a"][0] = 100
assert meta["a"][0] == 100
def test_sort_with_non_mutable():
"""Test sorting a table that has a non-mutable column."""
t = Table([[2, 1], [3, 4]], names=["a", "b"])
ta = t["a"]
tb = t["b"]
t["b"].setflags(write=False)
meta = {"a": [1, 2]}
t["b"].info.meta = meta
t.sort("a")
assert np.all(t["a"] == [1, 2])
assert np.all(t["b"] == [4, 3])
assert ta is t["a"]
assert tb is not t["b"]
# Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1
# it is a reference.
t["b"].info.meta["a"][0] = 100
assert meta["a"][0] == 1
def test_init_with_list_of_masked_arrays():
"""Test the fix for #8977"""
m0 = np.ma.array([0, 1, 2], mask=[True, False, True])
m1 = np.ma.array([3, 4, 5], mask=[False, True, False])
mc = [m0, m1]
# Test _init_from_list
t = table.Table([mc], names=["a"])
# Test add_column
t["b"] = [m1, m0]
assert t["a"].shape == (2, 3)
assert np.all(t["a"][0] == m0)
assert np.all(t["a"][1] == m1)
assert np.all(t["a"][0].mask == m0.mask)
assert np.all(t["a"][1].mask == m1.mask)
assert t["b"].shape == (2, 3)
assert np.all(t["b"][0] == m1)
assert np.all(t["b"][1] == m0)
assert np.all(t["b"][0].mask == m1.mask)
assert np.all(t["b"][1].mask == m0.mask)
def test_data_to_col_convert_strategy():
"""Test the update to how data_to_col works (#8972), using the regression
example from #8971.
"""
t = table.Table([[0, 1]])
t["a"] = 1
t["b"] = np.int64(2) # Failed previously
assert np.all(t["a"] == [1, 1])
assert np.all(t["b"] == [2, 2])
def test_structured_masked_column():
"""Test that adding a masked ndarray with a structured dtype works"""
dtype = np.dtype([("z", "f8"), ("x", "f8"), ("y", "i4")])
t = Table()
t["a"] = np.ma.array(
[
(1, 2, 3),
(4, 5, 6),
],
mask=[
(False, False, True),
(False, True, False),
],
dtype=dtype,
)
assert np.all(t["a"]["z"].mask == [False, False])
assert np.all(t["a"]["x"].mask == [False, True])
assert np.all(t["a"]["y"].mask == [True, False])
assert isinstance(t["a"], MaskedColumn)
def test_rows_with_mixins():
"""Test for #9165 to allow adding a list of mixin objects.
Also test for fix to #9357 where group_by() failed due to
mixin object not having info.indices set to [].
"""
tm = Time([1, 2], format="cxcsec")
q = [1, 2] * u.m
mixed1 = [1 * u.m, 2] # Mixed input, fails to convert to Quantity
mixed2 = [2, 1 * u.m] # Mixed input, not detected as potential mixin
rows = [
(1, q[0], tm[0]),
(2, q[1], tm[1]),
]
t = table.QTable(rows=rows)
t["a"] = [q[0], q[1]]
t["b"] = [tm[0], tm[1]]
t["m1"] = mixed1
t["m2"] = mixed2
assert np.all(t["col1"] == q)
assert np.all(t["col2"] == tm)
assert np.all(t["a"] == q)
assert np.all(t["b"] == tm)
assert np.all(t["m1"][ii] == mixed1[ii] for ii in range(2))
assert np.all(t["m2"][ii] == mixed2[ii] for ii in range(2))
assert type(t["m1"]) is table.Column
assert t["m1"].dtype is np.dtype(object)
assert type(t["m2"]) is table.Column
assert t["m2"].dtype is np.dtype(object)
# Ensure group_by() runs without failing for sortable columns.
# The columns 'm1', and 'm2' are object dtype and not sortable.
for name in ["col0", "col1", "col2", "a", "b"]:
t.group_by(name)
# For good measure include exactly the failure in #9357 in which the
# list of Time() objects is in the Table initializer.
mjds = [Time(58000, format="mjd")]
t = Table([mjds, ["gbt"]], names=("mjd", "obs"))
t.group_by("obs")
def test_iterrows():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 6),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
c_s = []
a_s = []
for c, a in t.iterrows("c", "a"):
a_s.append(a)
c_s.append(c)
assert np.all(t["a"] == a_s)
assert np.all(t["c"] == c_s)
rows = list(t.iterrows())
assert rows == dat
with pytest.raises(ValueError, match="d is not a valid column name"):
t.iterrows("d")
def test_values_and_types():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 6),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
assert isinstance(t.values(), type(OrderedDict().values()))
assert isinstance(t.columns.values(), type(OrderedDict().values()))
assert isinstance(t.columns.keys(), type(OrderedDict().keys()))
for i in t.values():
assert isinstance(i, table.column.Column)
def test_items():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 9),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
assert isinstance(t.items(), type(OrderedDict({}).items()))
for i in list(t.items()):
assert isinstance(i, tuple)
def test_read_write_not_replaceable():
t = table.Table()
with pytest.raises(AttributeError):
t.read = "fake_read"
with pytest.raises(AttributeError):
t.write = "fake_write"
def test_keep_columns_with_generator():
# Regression test for #12529
t = table.table_helpers.simple_table(1)
t.keep_columns(col for col in t.colnames if col == "a")
assert t.colnames == ["a"]
def test_remove_columns_with_generator():
# Regression test for #12529
t = table.table_helpers.simple_table(1)
t.remove_columns(col for col in t.colnames if col == "a")
assert t.colnames == ["b", "c"]
def test_keep_columns_invalid_names_messages():
t = table.table_helpers.simple_table(1)
with pytest.raises(KeyError, match='column "d" does not exist'):
t.keep_columns(["c", "d"])
with pytest.raises(KeyError, match="columns {'[de]', '[de]'} do not exist"):
t.keep_columns(["c", "d", "e"])
def test_remove_columns_invalid_names_messages():
t = table.table_helpers.simple_table(1)
with pytest.raises(KeyError, match='column "d" does not exist'):
t.remove_columns(["c", "d"])
with pytest.raises(KeyError, match="columns {'[de]', '[de]'} do not exist"):
t.remove_columns(["c", "d", "e"])
@pytest.mark.parametrize("path_type", ["str", "Path"])
def test_read_write_tilde_path(path_type, home_is_tmpdir):
if path_type == "str":
test_file = os.path.join("~", "test.csv")
else:
test_file = pathlib.Path("~", "test.csv")
t1 = Table()
t1["a"] = [1, 2, 3]
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3])
# Ensure the data wasn't written to the literal tilde-prefixed path
assert not os.path.exists(test_file)
def test_add_list_order():
t = Table()
names = list(map(str, range(20)))
array = np.empty((20, 1))
t.add_columns(array, names=names)
assert t.colnames == names
|
39e721f19bfe04f42efdfd4dfd6be28ae6ce44c7d26e734729b61d6ab8c4c4db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Column, QTable, Row, Table, hstack
from astropy.table.bst import BST
from astropy.table.column import BaseColumn
from astropy.table.index import SlicedIndex, get_index
from astropy.table.soco import SCEngine
from astropy.table.sorted_array import SortedArray
from astropy.time import Time
from astropy.utils.compat.optional_deps import HAS_SORTEDCONTAINERS
from .test_table import SetupData
available_engines = [BST, SortedArray]
if HAS_SORTEDCONTAINERS:
available_engines.append(SCEngine)
@pytest.fixture(params=available_engines)
def engine(request):
return request.param
_col = [1, 2, 3, 4, 5]
@pytest.fixture(
params=[
_col,
u.Quantity(_col),
Time(_col, format="jyear"),
]
)
def main_col(request):
return request.param
def assert_col_equal(col, array):
if isinstance(col, Time):
assert np.all(col == Time(array, format="jyear"))
else:
assert np.all(col == col.__class__(array))
@pytest.mark.usefixtures("table_types")
class TestIndex(SetupData):
def _setup(self, main_col, table_types):
super()._setup(table_types)
self.main_col = main_col
if isinstance(main_col, u.Quantity):
self._table_type = QTable
if not isinstance(main_col, list):
self._column_type = lambda x: x # don't change mixin type
self.mutable = isinstance(main_col, (list, u.Quantity))
def make_col(self, name, lst):
return self._column_type(lst, name=name)
def make_val(self, val):
if isinstance(self.main_col, Time):
return Time(val, format="jyear")
return val
@property
def t(self):
if not hasattr(self, "_t"):
# Note that order of columns is important, and the 'a' column is
# last to ensure that the index column does not need to be the first
# column (as was discovered in #10025). Most testing uses 'a' and
# ('a', 'b') for the columns.
self._t = self._table_type()
self._t["b"] = self._column_type([4.0, 5.1, 6.2, 7.0, 1.1])
self._t["c"] = self._column_type(["7", "8", "9", "10", "11"])
self._t["a"] = self._column_type(self.main_col)
return self._t
@pytest.mark.parametrize("composite", [False, True])
def test_table_index(self, main_col, table_types, composite, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index(("a", "b") if composite else "a", engine=engine)
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
if not self.mutable:
return
# test altering table columns
t["a"][0] = 4
t.add_row((6.0, "7", 6))
t["a"][3] = 10
t.remove_row(2)
t.add_row((5.0, "9", 4))
assert_col_equal(t["a"], np.array([4, 2, 10, 5, 6, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 7.0, 1.1, 6.0, 5.0]))
assert np.all(t["c"].data == np.array(["7", "8", "10", "11", "7", "9"]))
index = t.indices[0]
ll = list(index.data.items())
if composite:
assert np.all(
ll
== [
((2, 5.1), [1]),
((4, 4.0), [0]),
((4, 5.0), [5]),
((5, 1.1), [3]),
((6, 6.0), [4]),
((10, 7.0), [2]),
]
)
else:
assert np.all(
ll
== [((2,), [1]), ((4,), [0, 5]), ((5,), [3]), ((6,), [4]), ((10,), [2])]
)
t.remove_indices("a")
assert len(t.indices) == 0
def test_table_slicing(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index("a", engine=engine)
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
for slice_ in ([0, 2], np.array([0, 2])):
t2 = t[slice_]
# t2 should retain an index on column 'a'
assert len(t2.indices) == 1
assert_col_equal(t2["a"], [1, 3])
# the index in t2 should reorder row numbers after slicing
assert np.all(t2.indices[0].sorted_data() == [0, 1])
# however, this index should be a deep copy of t1's index
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
def test_remove_rows(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index("a", engine=engine)
# remove individual row
t2 = t.copy()
t2.remove_rows(2)
assert_col_equal(t2["a"], [1, 2, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3])
# remove by list, ndarray, or slice
for cut in ([0, 2, 4], np.array([0, 2, 4]), slice(0, 5, 2)):
t2 = t.copy()
t2.remove_rows(cut)
assert_col_equal(t2["a"], [2, 4])
assert np.all(t2.indices[0].sorted_data() == [0, 1])
with pytest.raises(ValueError):
t.remove_rows((0, 2, 4))
def test_col_get_slice(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index("a", engine=engine)
# get slice
t2 = t[1:3] # table slice
assert_col_equal(t2["a"], [2, 3])
assert np.all(t2.indices[0].sorted_data() == [0, 1])
col_slice = t["a"][1:3]
assert_col_equal(col_slice, [2, 3])
# true column slices discard indices
if isinstance(t["a"], BaseColumn):
assert len(col_slice.info.indices) == 0
# take slice of slice
t2 = t[::2]
assert_col_equal(t2["a"], np.array([1, 3, 5]))
t3 = t2[::-1]
assert_col_equal(t3["a"], np.array([5, 3, 1]))
assert np.all(t3.indices[0].sorted_data() == [2, 1, 0])
t3 = t2[:2]
assert_col_equal(t3["a"], np.array([1, 3]))
assert np.all(t3.indices[0].sorted_data() == [0, 1])
# out-of-bound slices
for t_empty in (t2[3:], t2[2:1], t3[2:]):
assert len(t_empty["a"]) == 0
assert np.all(t_empty.indices[0].sorted_data() == [])
if self.mutable:
# get boolean mask
mask = t["a"] % 2 == 1
t2 = t[mask]
assert_col_equal(t2["a"], [1, 3, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2])
def test_col_set_slice(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index("a", engine=engine)
# set slice
t2 = t.copy()
t2["a"][1:3] = np.array([6, 7])
assert_col_equal(t2["a"], np.array([1, 6, 7, 4, 5]))
assert np.all(t2.indices[0].sorted_data() == [0, 3, 4, 1, 2])
# change original table via slice reference
t2 = t.copy()
t3 = t2[1:3]
assert_col_equal(t3["a"], np.array([2, 3]))
assert np.all(t3.indices[0].sorted_data() == [0, 1])
t3["a"][0] = 5
assert_col_equal(t3["a"], np.array([5, 3]))
assert_col_equal(t2["a"], np.array([1, 5, 3, 4, 5]))
assert np.all(t3.indices[0].sorted_data() == [1, 0])
assert np.all(t2.indices[0].sorted_data() == [0, 2, 3, 1, 4])
# set boolean mask
t2 = t.copy()
mask = t["a"] % 2 == 1
t2["a"][mask] = 0.0
assert_col_equal(t2["a"], [0, 2, 0, 4, 0])
assert np.all(t2.indices[0].sorted_data() == [0, 2, 4, 1, 3])
def test_multiple_slices(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index("a", engine=engine)
for i in range(6, 51):
t.add_row((1.0, "A", i))
assert_col_equal(t["a"], list(range(1, 51)))
assert np.all(t.indices[0].sorted_data() == list(range(50)))
evens = t[::2]
assert np.all(evens.indices[0].sorted_data() == list(range(25)))
reverse = evens[::-1]
index = reverse.indices[0]
assert (index.start, index.stop, index.step) == (48, -2, -2)
assert np.all(index.sorted_data() == list(range(24, -1, -1)))
# modify slice of slice
reverse[-10:] = 0
expected = np.array(list(range(1, 51)))
expected[:20][expected[:20] % 2 == 1] = 0
assert_col_equal(t["a"], expected)
assert_col_equal(evens["a"], expected[::2])
assert_col_equal(reverse["a"], expected[::2][::-1])
# first ten evens are now zero
assert np.all(
t.indices[0].sorted_data()
== (
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
+ list(range(20, 50))
)
)
assert np.all(evens.indices[0].sorted_data() == list(range(25)))
assert np.all(reverse.indices[0].sorted_data() == list(range(24, -1, -1)))
# try different step sizes of slice
t2 = t[1:20:2]
assert_col_equal(t2["a"], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
assert np.all(t2.indices[0].sorted_data() == list(range(10)))
t3 = t2[::3]
assert_col_equal(t3["a"], [2, 8, 14, 20])
assert np.all(t3.indices[0].sorted_data() == [0, 1, 2, 3])
t4 = t3[2::-1]
assert_col_equal(t4["a"], [14, 8, 2])
assert np.all(t4.indices[0].sorted_data() == [2, 1, 0])
def test_sort(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t[::-1] # reverse table
assert_col_equal(t["a"], [5, 4, 3, 2, 1])
t.add_index("a", engine=engine)
assert np.all(t.indices[0].sorted_data() == [4, 3, 2, 1, 0])
if not self.mutable:
return
# sort table by column a
t2 = t.copy()
t2.sort("a")
assert_col_equal(t2["a"], [1, 2, 3, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
# sort table by primary key
t2 = t.copy()
t2.sort()
assert_col_equal(t2["a"], [1, 2, 3, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
def test_insert_row(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index("a", engine=engine)
t.insert_row(2, (1.0, "12", 6))
assert_col_equal(t["a"], [1, 2, 6, 3, 4, 5])
assert np.all(t.indices[0].sorted_data() == [0, 1, 3, 4, 5, 2])
t.insert_row(1, (4.0, "13", 0))
assert_col_equal(t["a"], [1, 0, 2, 6, 3, 4, 5])
assert np.all(t.indices[0].sorted_data() == [1, 0, 2, 4, 5, 6, 3])
def test_index_modes(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index("a", engine=engine)
# first, no special mode
assert len(t[[1, 3]].indices) == 1
assert len(t[::-1].indices) == 1
assert len(self._table_type(t).indices) == 1
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t2 = t.copy()
# non-copy mode
with t.index_mode("discard_on_copy"):
assert len(t[[1, 3]].indices) == 0
assert len(t[::-1].indices) == 0
assert len(self._table_type(t).indices) == 0
assert len(t2.copy().indices) == 1 # mode should only affect t
# make sure non-copy mode is exited correctly
assert len(t[[1, 3]].indices) == 1
if not self.mutable:
return
# non-modify mode
with t.index_mode("freeze"):
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t["a"][0] = 6
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t.add_row((1.5, "12", 2))
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t.remove_rows([1, 3])
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
assert_col_equal(t["a"], [6, 3, 5, 2])
# mode should only affect t
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t2["a"][0] = 6
assert np.all(t2.indices[0].sorted_data() == [1, 2, 3, 4, 0])
# make sure non-modify mode is exited correctly
assert np.all(t.indices[0].sorted_data() == [3, 1, 2, 0])
if isinstance(t["a"], BaseColumn):
assert len(t["a"][::-1].info.indices) == 0
with t.index_mode("copy_on_getitem"):
assert len(t["a"][[1, 2]].info.indices) == 1
# mode should only affect t
assert len(t2["a"][[1, 2]].info.indices) == 0
assert len(t["a"][::-1].info.indices) == 0
assert len(t2["a"][::-1].info.indices) == 0
def test_index_retrieval(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index("a", engine=engine)
t.add_index(["a", "c"], engine=engine)
assert len(t.indices) == 2
assert len(t.indices["a"].columns) == 1
assert len(t.indices["a", "c"].columns) == 2
with pytest.raises(IndexError):
t.indices["b"]
def test_col_rename(self, main_col, table_types, engine):
"""
Checks for a previous bug in which copying a Table
with different column names raised an exception.
"""
self._setup(main_col, table_types)
t = self.t
t.add_index("a", engine=engine)
t2 = self._table_type(self.t, names=["d", "e", "f"])
assert len(t2.indices) == 1
def test_table_loc(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index("a", engine=engine)
t.add_index("b", engine=engine)
t2 = t.loc[self.make_val(3)] # single label, with primary key 'a'
assert_col_equal(t2["a"], [3])
assert isinstance(t2, Row)
# list search
t2 = t.loc[[self.make_val(1), self.make_val(4), self.make_val(2)]]
assert_col_equal(t2["a"], [1, 4, 2]) # same order as input list
if not isinstance(main_col, Time):
# ndarray search
t2 = t.loc[np.array([1, 4, 2])]
assert_col_equal(t2["a"], [1, 4, 2])
assert_col_equal(t2["a"], [1, 4, 2])
t2 = t.loc[self.make_val(3) : self.make_val(5)] # range search
assert_col_equal(t2["a"], [3, 4, 5])
t2 = t.loc["b", 5.0:7.0]
assert_col_equal(t2["b"], [5.1, 6.2, 7.0])
# search by sorted index
t2 = t.iloc[0:2] # two smallest rows by column 'a'
assert_col_equal(t2["a"], [1, 2])
t2 = t.iloc["b", 2:] # exclude two smallest rows in column 'b'
assert_col_equal(t2["b"], [5.1, 6.2, 7.0])
for t2 in (t.loc[:], t.iloc[:]):
assert_col_equal(t2["a"], [1, 2, 3, 4, 5])
def test_table_loc_indices(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index("a", engine=engine)
t.add_index("b", engine=engine)
t2 = t.loc_indices[self.make_val(3)] # single label, with primary key 'a'
assert t2 == 2
# list search
t2 = t.loc_indices[[self.make_val(1), self.make_val(4), self.make_val(2)]]
for i, p in zip(t2, [1, 4, 2]): # same order as input list
assert i == p - 1
def test_invalid_search(self, main_col, table_types, engine):
# using .loc and .loc_indices with a value not present should raise an exception
self._setup(main_col, table_types)
t = self.t
t.add_index("a")
with pytest.raises(KeyError):
t.loc[self.make_val(6)]
with pytest.raises(KeyError):
t.loc_indices[self.make_val(6)]
def test_copy_index_references(self, main_col, table_types, engine):
# check against a bug in which indices were given an incorrect
# column reference when copied
self._setup(main_col, table_types)
t = self.t
t.add_index("a")
t.add_index("b")
t2 = t.copy()
assert t2.indices["a"].columns[0] is t2["a"]
assert t2.indices["b"].columns[0] is t2["b"]
def test_unique_index(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index("a", engine=engine, unique=True)
assert np.all(t.indices["a"].sorted_data() == [0, 1, 2, 3, 4])
if self.mutable:
with pytest.raises(ValueError):
t.add_row((5.0, "9", 5))
def test_copy_indexed_table(self, table_types):
self._setup(_col, table_types)
t = self.t
t.add_index("a")
t.add_index(["a", "b"])
for tp in (self._table_type(t), t.copy()):
assert len(t.indices) == len(tp.indices)
for index, indexp in zip(t.indices, tp.indices):
assert np.all(index.data.data == indexp.data.data)
assert index.data.data.colnames == indexp.data.data.colnames
def test_updating_row_byindex(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = Table(
[["a", "b", "c", "d"], [2, 3, 4, 5], [3, 4, 5, 6]],
names=("a", "b", "c"),
meta={"name": "first table"},
)
t.add_index("a", engine=engine)
t.add_index("b", engine=engine)
t.loc["c"] = ["g", 40, 50] # single label, with primary key 'a'
t2 = t[2]
assert list(t2) == ["g", 40, 50]
# list search
t.loc[["a", "d", "b"]] = [["a", 20, 30], ["d", 50, 60], ["b", 30, 40]]
t2 = [["a", 20, 30], ["d", 50, 60], ["b", 30, 40]]
for i, p in zip(t2, [1, 4, 2]): # same order as input list
assert list(t[p - 1]) == i
def test_invalid_updates(self, main_col, table_types, engine):
# using .loc and .loc_indices with a value not present should raise an exception
self._setup(main_col, table_types)
t = Table(
[[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]],
names=("a", "b", "c"),
meta={"name": "first table"},
)
t.add_index("a")
with pytest.raises(ValueError):
t.loc[3] = [[1, 2, 3]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6], [2, 3]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5], [2, 3]]
def test_get_index():
a = [1, 4, 5, 2, 7, 4, 45]
b = [2.0, 5.0, 8.2, 3.7, 4.3, 6.5, 3.3]
t = Table([a, b], names=("a", "b"), meta={"name": "first table"})
t.add_index(["a"])
# Getting the values of index using names
x1 = get_index(t, names=["a"])
assert isinstance(x1, SlicedIndex)
assert len(x1.columns) == 1
assert len(x1.columns[0]) == 7
assert x1.columns[0].info.name == "a"
# Getting the vales of index using table_copy
x2 = get_index(t, table_copy=t[["a"]])
assert isinstance(x2, SlicedIndex)
assert len(x2.columns) == 1
assert len(x2.columns[0]) == 7
assert x2.columns[0].info.name == "a"
with pytest.raises(ValueError):
get_index(t, names=["a"], table_copy=t[["a"]])
with pytest.raises(ValueError):
get_index(t, names=None, table_copy=None)
def test_table_index_time_warning(engine):
# Make sure that no ERFA warnings are emitted when indexing a table by
# a Time column with a non-default time scale
tab = Table()
tab["a"] = Time([1, 2, 3], format="jyear", scale="tai")
tab["b"] = [4, 3, 2]
with warnings.catch_warnings(record=True) as wlist:
tab.add_index(("a", "b"), engine=engine)
assert len(wlist) == 0
@pytest.mark.parametrize(
"col",
[
Column(np.arange(50000, 50005)),
np.arange(50000, 50005) * u.m,
Time(np.arange(50000, 50005), format="mjd"),
],
)
def test_table_index_does_not_propagate_to_column_slices(col):
# They lost contact to the parent table, so they should also not have
# information on the indices; this helps prevent large memory usage if,
# e.g., a large time column is turned into an object array; see gh-10688.
tab = QTable()
tab["t"] = col
tab.add_index("t")
t = tab["t"]
assert t.info.indices
tx = t[1:]
assert not tx.info.indices
tabx = tab[1:]
t = tabx["t"]
assert t.info.indices
def test_hstack_qtable_table():
# Check in particular that indices are initialized or copied correctly
# for a Column that is being converted to a Quantity.
qtab = QTable([np.arange(5.0) * u.m], names=["s"])
qtab.add_index("s")
tab = Table([Column(np.arange(5.0), unit=u.s)], names=["t"])
qstack = hstack([qtab, tab])
assert qstack["t"].info.indices == []
assert qstack.indices == []
def test_index_slice_exception():
with pytest.raises(TypeError, match="index_slice must be tuple or slice"):
SlicedIndex(None, None)
|
4e7f614f7afd8f9ba8e813c68d0ceea598068bf29575eb28dcd8360b20f35742 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import coordinates, time
from astropy import units as u
from astropy.table import Column, NdarrayMixin, QTable, Table, table_helpers, unique
from astropy.time import Time
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyUserWarning
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def test_column_group_by(T1q):
"""Test grouping a Column by various key types."""
# T1q["a"] could be Column or Quantity, so force the object we want to group to be
# Column. Then later we are using the "a" column as a grouping key.
t1a = Column(T1q["a"])
unit = T1q["a"].unit or 1
# Group by a Column (i.e. numpy array)
t1ag = t1a.group_by(T1q["a"])
keys = t1ag.groups.keys
assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8]))
assert np.all(keys == np.array([0, 1, 2]) * unit)
# Group by a Table and numpy structured array
for t1ag, key_unit in (
(t1a.group_by(T1q["a", "b"]), unit),
(t1a.group_by(T1q["a", "b"].as_array()), 1),
):
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
keys = t1ag.groups.keys
assert keys.dtype.names == ("a", "b")
assert np.all(keys["a"] == np.array([0, 1, 1, 2, 2, 2]) * key_unit)
assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"]))
def test_column_group_by_no_argsort(T1b):
t1a = T1b["a"]
with pytest.raises(
TypeError, match=r"keys input \(list\) must have an `argsort` method"
):
# Pass a Python list with no argsort method
t1a.group_by(list(range(len(t1a))))
def test_table_group_by(T1):
"""
Test basic table group_by functionality for possible key types and for
masked/unmasked tables.
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# Group by a single column key specified by name
tg = t1.group_by("a")
assert np.all(tg.groups.indices == np.array([0, 1, 4, 8]))
assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>"
assert str(tg["a"].groups) == "<ColumnGroups indices=[0 1 4 8]>"
# Sorted by 'a' and in original order for rest
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a 0.0 4 4.0",
" 1 b 3.0 5 5.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 c 7.0 0 0.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 a 4.0 3 3.0",
]
assert tg.meta["ta"] == 1
assert tg["c"].meta["a"] == 1
assert tg["c"].description == "column c"
# Group by a table column
tg2 = t1.group_by(t1["a"])
assert tg.pformat() == tg2.pformat()
# Group by two columns spec'd by name
for keys in (["a", "b"], ("a", "b")):
tg = t1.group_by(keys)
assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Sorted by 'a', 'b' and in original order for rest
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a 0.0 4 4.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 1 b 3.0 5 5.0",
" 2 a 4.0 3 3.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 c 7.0 0 0.0",
]
# Group by a Table
tg2 = t1.group_by(t1["a", "b"])
assert tg.pformat() == tg2.pformat()
# Group by a structured array
tg2 = t1.group_by(t1["a", "b"].as_array())
assert tg.pformat() == tg2.pformat()
# Group by a simple ndarray
tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0]))
assert np.all(tg.groups.indices == np.array([0, 4, 7, 8]))
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 2 c 7.0 0 0.0",
" 2 b 6.0 2 2.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 b 5.0 1 1.0",
" 2 a 4.0 3 3.0",
" 1 b 3.0 5 5.0",
" 0 a 0.0 4 4.0",
]
def test_groups_keys(T1m: QTable):
tg = T1m.group_by("a")
unit = T1m["a"].unit or 1
keys = tg.groups.keys
assert keys.dtype.names == ("a",)
assert np.all(keys["a"] == np.array([0, 1, 2]) * unit)
tg = T1m.group_by(["a", "b"])
keys = tg.groups.keys
assert keys.dtype.names == ("a", "b")
assert np.all(keys["a"] == np.array([0, 1, 1, 2, 2, 2]) * unit)
assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"]))
# Grouping by Column ignores column name
tg = T1m.group_by(T1m["b"])
keys = tg.groups.keys
assert keys.dtype.names is None
def test_groups_keys_time(T1b: QTable):
"""Group a table with a time column using that column as a key."""
T1b = T1b.copy()
T1b["a"] = Time(T1b["a"], format="cxcsec")
tg = T1b.group_by("a")
keys = tg.groups.keys
assert keys.dtype.names == ("a",)
assert np.all(keys["a"] == Time(np.array([0, 1, 2]), format="cxcsec"))
tg = T1b.group_by(["a", "b"])
keys = tg.groups.keys
assert keys.dtype.names == ("a", "b")
assert np.all(keys["a"] == Time(np.array([0, 1, 1, 2, 2, 2]), format="cxcsec"))
assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"]))
def test_groups_iterator(T1):
tg = T1.group_by("a")
for ii, group in enumerate(tg.groups):
assert group.pformat() == tg.groups[ii].pformat()
assert group["a"][0] == tg["a"][tg.groups.indices[ii]]
def test_grouped_copy(T1):
"""
Test that copying a table or column copies the groups properly
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
tg = t1.group_by("a")
tgc = tg.copy()
assert np.all(tgc.groups.indices == tg.groups.indices)
assert np.all(tgc.groups.keys == tg.groups.keys)
tac = tg["a"].copy()
assert np.all(tac.groups.indices == tg["a"].groups.indices)
c1 = t1["a"].copy()
gc1 = c1.group_by(t1["a"])
gc1c = gc1.copy()
assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8]))
def test_grouped_slicing(T1):
"""
Test that slicing a table removes previous grouping
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by("a")
tg2 = tg[3:5]
assert np.all(tg2.groups.indices == np.array([0, len(tg2)]))
assert tg2.groups.keys is None
def test_group_column_from_table(T1):
"""
Group a column that is part of a table
"""
cg = T1["c"].group_by(np.array(T1["a"]))
assert np.all(cg.groups.keys == np.array([0, 1, 2]))
assert np.all(cg.groups.indices == np.array([0, 1, 4, 8]))
def test_table_groups_mask_index(T1):
"""
Use boolean mask as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
t2 = t1.groups[np.array([True, False, True])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_table_groups_array_index(T1):
"""
Use numpy array as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
t2 = t1.groups[np.array([0, 2])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_table_groups_slicing(T1):
"""
Test that slicing table groups works
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
# slice(0, 2)
t2 = t1.groups[0:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 1]))
# slice(1, 2)
t2 = t1.groups[1:2]
assert len(t2.groups) == 1
assert t2.groups[0].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys["a"] == np.array([1]))
# slice(0, 3, 2)
t2 = t1.groups[0:3:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_grouped_item_access(T1):
"""
Test that column slicing preserves grouping
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by("a")
tgs = tg["a", "c", "d"]
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
tgs = tg["c", "d"]
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [
" c d ",
"---- ---",
" 0.0 4",
" 6.0 18",
"22.0 6",
]
def test_mutable_operations(T1):
"""
Operations like adding or deleting a row should removing grouping,
but adding or removing or renaming a column should retain grouping.
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# add row
tg = t1.group_by("a")
tg.add_row((0, "a", 3.0, 4, 4 * u.m))
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# remove row
tg = t1.group_by("a")
tg.remove_row(4)
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# add column
tg = t1.group_by("a")
indices = tg.groups.indices.copy()
tg.add_column(Column(name="e", data=np.arange(len(tg))))
assert np.all(tg.groups.indices == indices)
assert np.all(tg["e"].groups.indices == indices)
assert np.all(tg["e"].groups.keys == tg.groups.keys)
# remove column (not key column)
tg = t1.group_by("a")
tg.remove_column("b")
assert np.all(tg.groups.indices == indices)
# Still has original key col names
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["a"].groups.indices == indices)
# remove key column
tg = t1.group_by("a")
tg.remove_column("a")
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["b"].groups.indices == indices)
# rename key column
tg = t1.group_by("a")
tg.rename_column("a", "aa")
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["aa"].groups.indices == indices)
def test_group_by_masked(T1):
t1m = QTable(T1, masked=True)
t1m["c"].mask[4] = True
t1m["d"].mask[5] = True
assert t1m.group_by("a").pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a -- 4 4.0",
" 1 b 3.0 -- 5.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 c 7.0 0 0.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 a 4.0 3 3.0",
]
def test_group_by_errors(T1):
"""
Appropriate errors get raised.
"""
# Bad column name as string
with pytest.raises(ValueError):
T1.group_by("f")
# Bad column names in list
with pytest.raises(ValueError):
T1.group_by(["f", "g"])
# Wrong length array
with pytest.raises(ValueError):
T1.group_by(np.array([1, 2]))
# Wrong type
with pytest.raises(TypeError):
T1.group_by(None)
# Masked key column
t1 = QTable(T1, masked=True)
t1["a"].mask[4] = True
with pytest.raises(ValueError):
t1.group_by("a")
def test_groups_keys_meta(T1):
"""
Make sure the keys meta['grouped_by_table_cols'] is working.
"""
# Group by column in this table
tg = T1.group_by("a")
assert tg.groups.keys.meta["grouped_by_table_cols"] is True
assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is True
assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is True
assert (
tg["d"]
.groups[np.array([False, True, True])]
.groups.keys.meta["grouped_by_table_cols"]
is True
)
# Group by external Table
tg = T1.group_by(T1["a", "b"])
assert tg.groups.keys.meta["grouped_by_table_cols"] is False
assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is False
assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is False
# Group by external numpy array
tg = T1.group_by(T1["a", "b"].as_array())
assert not hasattr(tg.groups.keys, "meta")
assert not hasattr(tg["c"].groups.keys, "meta")
# Group by Column
tg = T1.group_by(T1["a"])
assert "grouped_by_table_cols" not in tg.groups.keys.meta
assert "grouped_by_table_cols" not in tg["c"].groups.keys.meta
def test_table_aggregate(T1):
"""
Aggregate a table
"""
# Table with only summable cols
t1 = T1["a", "c", "d"]
tg = t1.group_by("a")
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
# Reverts to default groups
assert np.all(tga.groups.indices == np.array([0, 3]))
assert tga.groups.keys is None
# metadata survives
assert tga.meta["ta"] == 1
assert tga["c"].meta["a"] == 1
assert tga["c"].description == "column c"
# Aggregate with np.sum with masked elements. This results
# in one group with no elements, hence a nan result and conversion
# to float for the 'd' column.
t1m = QTable(T1, masked=True)
t1m["c"].mask[4:6] = True
t1m["d"].mask[4:6] = True
tg = t1m.group_by("a")
with pytest.warns(UserWarning, match="converting a masked element to nan"):
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d q ",
" m ",
"--- ---- ---- ----",
" 0 nan nan 4.0",
" 1 3.0 13.0 18.0",
" 2 22.0 6.0 6.0",
]
# Aggregate with np.sum with masked elements, but where every
# group has at least one remaining (unmasked) element. Then
# the int column stays as an int.
t1m = QTable(t1, masked=True)
t1m["c"].mask[5] = True
t1m["d"].mask[5] = True
tg = t1m.group_by("a")
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 3.0 13",
" 2 22.0 6",
]
# Aggregate with a column type that cannot by supplied to the aggregating
# function. This raises a warning but still works.
tg = T1.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"):
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d q ",
" m ",
"--- ---- --- ----",
" 0 0.0 4 4.0",
" 1 6.0 18 18.0",
" 2 22.0 6 6.0",
]
def test_table_aggregate_reduceat(T1):
"""
Aggregate table with functions which have a reduceat method
"""
# Comparison functions without reduceat
def np_mean(x):
return np.mean(x)
def np_sum(x):
return np.sum(x)
def np_add(x):
return np.add(x)
# Table with only summable cols
t1 = T1["a", "c", "d"]
tg = t1.group_by("a")
# Comparison
tga_r = tg.groups.aggregate(np.sum)
tga_a = tg.groups.aggregate(np.add)
tga_n = tg.groups.aggregate(np_sum)
assert np.all(tga_r == tga_n)
assert np.all(tga_a == tga_n)
assert tga_n.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
tga_r = tg.groups.aggregate(np.mean)
tga_n = tg.groups.aggregate(np_mean)
assert np.all(tga_r == tga_n)
assert tga_n.pformat() == [
" a c d ",
"--- --- ---",
" 0 0.0 4.0",
" 1 2.0 6.0",
" 2 5.5 1.5",
]
# Binary ufunc np_add should raise warning without reduceat
t2 = T1["a", "c"]
tg = t2.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"):
tga = tg.groups.aggregate(np_add)
assert tga.pformat() == [" a ", "---", " 0", " 1", " 2"]
def test_column_aggregate(T1):
"""
Aggregate a single table column
"""
for masked in (False, True):
tg = QTable(T1, masked=masked).group_by("a")
tga = tg["c"].groups.aggregate(np.sum)
assert tga.pformat() == [" c ", "----", " 0.0", " 6.0", "22.0"]
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1,
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_column_aggregate_f8():
"""https://github.com/astropy/astropy/issues/12706"""
# Just want to make sure it does not crash again.
for masked in (False, True):
tg = Table({"a": np.arange(2, dtype=">f8")}, masked=masked).group_by("a")
tga = tg["a"].groups.aggregate(np.sum)
assert tga.pformat() == [" a ", "---", "0.0", "1.0"]
def test_table_filter():
"""
Table groups filtering
"""
def all_positive(table, key_colnames):
return all(
np.all(table[colname] >= 0)
for colname in table.colnames
if colname not in key_colnames
)
# Negative value in 'a' column should not filter because it is a key col
t = Table.read(
[
" a c d",
" -2 7.0 0",
" -2 5.0 1",
" 0 0.0 4",
" 1 3.0 5",
" 1 2.0 -6",
" 1 1.0 7",
" 3 3.0 5",
" 3 -2.0 6",
" 3 1.0 7",
],
format="ascii",
)
tg = t.group_by("a")
t2 = tg.groups.filter(all_positive)
assert t2.groups[0].pformat() == [
" a c d ",
"--- --- ---",
" -2 7.0 0",
" -2 5.0 1",
]
assert t2.groups[1].pformat() == [" a c d ", "--- --- ---", " 0 0.0 4"]
def test_column_filter():
"""
Table groups filtering
"""
def all_positive(column):
if np.any(column < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read(
[
" a c d",
" -2 7.0 0",
" -2 5.0 1",
" 0 0.0 4",
" 1 3.0 5",
" 1 2.0 -6",
" 1 1.0 7",
" 3 3.0 5",
" 3 -2.0 6",
" 3 1.0 7",
],
format="ascii",
)
tg = t.group_by("a")
c2 = tg["c"].groups.filter(all_positive)
assert len(c2.groups) == 3
assert c2.groups[0].pformat() == [" c ", "---", "7.0", "5.0"]
assert c2.groups[1].pformat() == [" c ", "---", "0.0"]
assert c2.groups[2].pformat() == [" c ", "---", "3.0", "2.0", "1.0"]
def test_group_mixins():
"""
Test grouping a table with mixin columns
"""
# Setup mixins
idx = np.arange(4)
x = np.array([3.0, 1.0, 2.0, 1.0])
q = x * u.m
lon = coordinates.Longitude(x * u.deg)
lat = coordinates.Latitude(x * u.deg)
# For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision)
tm = time.Time(2000, format="jyear") + time.TimeDelta(x * 1e-10, format="sec")
sc = coordinates.SkyCoord(ra=lon, dec=lat)
aw = table_helpers.ArrayWrapper(x)
nd = np.array([(3, "c"), (1, "a"), (2, "b"), (1, "a")], dtype="<i4,|S1").view(
NdarrayMixin
)
qt = QTable(
[idx, x, q, lon, lat, tm, sc, aw, nd],
names=["idx", "x", "q", "lon", "lat", "tm", "sc", "aw", "nd"],
)
# Test group_by with each supported mixin type
mixin_keys = ["x", "q", "lon", "lat", "tm", "sc", "aw", "nd"]
for key in mixin_keys:
qtg = qt.group_by(key)
# Test that it got the sort order correct
assert np.all(qtg["idx"] == [1, 3, 2, 0])
# Test that the groups are right
# Note: skip testing SkyCoord column because that doesn't have equality
for name in ["x", "q", "lon", "lat", "tm", "aw", "nd"]:
assert np.all(qt[name][[1, 3]] == qtg.groups[0][name])
assert np.all(qt[name][[2]] == qtg.groups[1][name])
assert np.all(qt[name][[0]] == qtg.groups[2][name])
# Test that unique also works with mixins since most of the work is
# done with group_by(). This is using *every* mixin as key.
uqt = unique(qt, keys=mixin_keys)
assert len(uqt) == 3
assert np.all(uqt["idx"] == [1, 2, 0])
assert np.all(uqt["x"] == [1.0, 2.0, 3.0])
# Column group_by() with mixins
idxg = qt["idx"].group_by(qt[mixin_keys])
assert np.all(idxg == [1, 3, 2, 0])
@pytest.mark.parametrize(
"col",
[
time.TimeDelta([1, 2], format="sec"),
time.Time([1, 2], format="cxcsec"),
coordinates.SkyCoord([1, 2], [3, 4], unit="deg,deg"),
],
)
def test_group_mixins_unsupported(col):
"""Test that aggregating unsupported mixins produces a warning only"""
t = Table([[1, 1], [3, 4], col], names=["a", "b", "mix"])
tg = t.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column 'mix'"):
tg.groups.aggregate(np.sum)
@pytest.mark.parametrize("add_index", [False, True])
def test_group_stable_sort(add_index):
"""Test that group_by preserves the order of the table.
This table has 5 groups with an average of 200 rows per group, so it is not
statistically possible that the groups will be in order by chance.
This tests explicitly the case where grouping is done via the index sort.
See: https://github.com/astropy/astropy/issues/14882
"""
a = np.random.randint(0, 5, 1000)
b = np.arange(len(a))
t = Table([a, b], names=["a", "b"])
if add_index:
t.add_index("a")
tg = t.group_by("a")
for grp in tg.groups:
assert np.all(grp["b"] == np.sort(grp["b"]))
|
313fedf227e503621544005aab354758e2aae96f21f8fa2653130748edd99a44 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
All of the pytest fixtures used by astropy.table are defined here.
`conftest.py` is a "special" module name for pytest that is always
imported, but is not looked in for tests, and it is the recommended
place to put fixtures that are shared between modules. These fixtures
can not be defined in a module by a different name and still be shared
between modules.
"""
import pickle
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import pytest
from astropy import coordinates, table, time
from astropy import units as u
from astropy.table import QTable, Table, pprint
from astropy.table.table_helpers import ArrayWrapper
@pytest.fixture(params=[table.Column, table.MaskedColumn])
def Column(request):
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
return request.param
class MaskedTable(table.Table):
def __init__(self, *args, **kwargs):
kwargs["masked"] = True
table.Table.__init__(self, *args, **kwargs)
class MyRow(table.Row):
pass
class MyColumn(table.Column):
pass
class MyMaskedColumn(table.MaskedColumn):
pass
class MyTableColumns(table.TableColumns):
pass
class MyTableFormatter(pprint.TableFormatter):
pass
class MyTable(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=["unmasked", "masked", "subclass"])
def table_types(request):
class TableTypes:
def __init__(self, request):
if request.param == "unmasked":
self.Table = table.Table
self.Column = table.Column
elif request.param == "masked":
self.Table = MaskedTable
self.Column = table.MaskedColumn
elif request.param == "subclass":
self.Table = MyTable
self.Column = MyColumn
return TableTypes(request)
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_data(request):
class TableData:
def __init__(self, request):
self.Table = MaskedTable if request.param else table.Table
self.Column = table.MaskedColumn if request.param else table.Column
self.COLS = [
self.Column(
name="a",
data=[1, 2, 3],
description="da",
format="%i",
meta={"ma": 1},
unit="ua",
),
self.Column(
name="b",
data=[4, 5, 6],
description="db",
format="%d",
meta={"mb": 1},
unit="ub",
),
self.Column(
name="c",
data=[7, 8, 9],
description="dc",
format="%f",
meta={"mc": 1},
unit="ub",
),
]
self.DATA = self.Table(self.COLS)
return TableData(request)
class SubclassTable(table.Table):
pass
@pytest.fixture(params=[True, False])
def tableclass(request):
return table.Table if request.param else SubclassTable
@pytest.fixture(params=list(range(0, pickle.HIGHEST_PROTOCOL + 1)))
def protocol(request):
"""
Fixture to run all the tests for all available pickle protocols.
"""
return request.param
# Fixture to run all tests for both an unmasked (ndarray) and masked
# (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_type(request):
return MaskedTable if request.param else table.Table
# Stuff for testing mixin columns
MIXIN_COLS = {
"quantity": [0, 1, 2, 3] * u.m,
"longitude": coordinates.Longitude(
[0.0, 1.0, 5.0, 6.0] * u.deg, wrap_angle=180.0 * u.deg
),
"latitude": coordinates.Latitude([5.0, 6.0, 10.0, 11.0] * u.deg),
"time": time.Time([2000, 2001, 2002, 2003], format="jyear"),
"timedelta": time.TimeDelta([1, 2, 3, 4], format="jd"),
"skycoord": coordinates.SkyCoord(ra=[0, 1, 2, 3] * u.deg, dec=[0, 1, 2, 3] * u.deg),
"sphericalrep": coordinates.SphericalRepresentation(
[0, 1, 2, 3] * u.deg, [0, 1, 2, 3] * u.deg, 1 * u.kpc
),
"cartesianrep": coordinates.CartesianRepresentation(
[0, 1, 2, 3] * u.pc, [4, 5, 6, 7] * u.pc, [9, 8, 8, 6] * u.pc
),
"sphericaldiff": coordinates.SphericalCosLatDifferential(
[0, 1, 2, 3] * u.mas / u.yr, [0, 1, 2, 3] * u.mas / u.yr, 10 * u.km / u.s
),
"arraywrap": ArrayWrapper([0, 1, 2, 3]),
"arrayswap": ArrayWrapper(np.arange(4, dtype="i").byteswap().newbyteorder()),
"ndarraylil": np.array(
[(7, "a"), (8, "b"), (9, "c"), (9, "c")], dtype="<i4,|S1"
).view(table.NdarrayMixin),
"ndarraybig": np.array(
[(7, "a"), (8, "b"), (9, "c"), (9, "c")], dtype=">i4,|S1"
).view(table.NdarrayMixin),
"stokescoord": coordinates.StokesCoord(range(1, 5)),
}
MIXIN_COLS["earthlocation"] = coordinates.EarthLocation(
lon=MIXIN_COLS["longitude"],
lat=MIXIN_COLS["latitude"],
height=MIXIN_COLS["quantity"],
)
MIXIN_COLS["sphericalrepdiff"] = coordinates.SphericalRepresentation(
MIXIN_COLS["sphericalrep"], differentials=MIXIN_COLS["sphericaldiff"]
)
@pytest.fixture(params=sorted(MIXIN_COLS))
def mixin_cols(request):
"""
Fixture to return a set of columns for mixin testing which includes
an index column 'i', two string cols 'a', 'b' (for joins etc), and
one of the available mixin column types.
"""
cols = OrderedDict()
mixin_cols = deepcopy(MIXIN_COLS)
cols["i"] = table.Column([0, 1, 2, 3], name="i")
cols["a"] = table.Column(["a", "b", "b", "c"], name="a")
cols["b"] = table.Column(["b", "c", "a", "d"], name="b")
cols["m"] = mixin_cols[request.param]
return cols
def _get_test_table():
T = QTable.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
T["q"] = np.arange(len(T)) * u.m
T.meta.update({"ta": 1})
T["c"].meta.update({"a": 1})
T["c"].description = "column c"
return T
@pytest.fixture()
def T1b(request):
"""Basic table"""
T = _get_test_table()
return T
@pytest.fixture(params=[False, True])
def T1(request):
"""Basic table with or without index on integer column a"""
T = _get_test_table()
if request.param:
T.add_index("a")
return T
@pytest.fixture(params=[False, True])
def T1q(request):
"""Basic table where a column is integer or Quantity"""
T = _get_test_table()
if request.param:
T["a"] = T["a"] * u.m
return T
@pytest.fixture(params=[(False, False), (False, True), (True, False), (True, True)])
def T1m(request):
"""Basic table with or without index on column a, where a is integer or Quantity"""
T = _get_test_table()
add_index, is_quantity = request.param
if is_quantity:
T["a"] = T["a"] * u.m
if add_index:
T.add_index("a")
return T
@pytest.fixture(params=[Table, QTable])
def operation_table_type(request):
return request.param
|
cf89d0d7aab7127dc8609bdcf00023f3d619e8d2b59b6e897722065ed173e360 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
from contextlib import nullcontext
import numpy as np
import pytest
from astropy import table
from astropy import units as u
from astropy.coordinates import (
BaseRepresentationOrDifferential,
CartesianRepresentation,
SkyCoord,
StokesCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
search_around_3d,
)
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.coordinates.tests.test_representation import representation_equal
from astropy.table import Column, MaskedColumn, QTable, Table, TableMergeError
from astropy.table.operations import _get_out_class, join_distance, join_skycoord
from astropy.time import Time, TimeDelta
from astropy.units.quantity import Quantity
from astropy.utils import metadata
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.metadata import MergeConflictError
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def check_mask(col, exp_mask):
"""Check that col.mask == exp_mask"""
if hasattr(col, "mask"):
# Coerce expected mask into dtype of col.mask. In particular this is
# needed for types like EarthLocation where the mask is a structured
# array.
exp_mask = np.array(exp_mask).astype(col.mask.dtype)
out = np.all(col.mask == exp_mask)
else:
# With no mask the check is OK if all the expected mask values
# are False (i.e. no auto-conversion to MaskedQuantity if it was
# not required by the join).
out = np.all(exp_mask == False) # noqa: E712
return out
class TestJoin:
def _setup(self, t_cls=Table):
lines1 = [
" a b c ",
" 0 foo L1",
" 1 foo L2",
" 1 bar L3",
" 2 bar L4",
]
lines2 = [
" a b d ",
" 1 foo R1",
" 1 foo R2",
" 2 bar R3",
" 4 bar R4",
]
self.t1 = t_cls.read(lines1, format="ascii")
self.t2 = t_cls.read(lines2, format="ascii")
self.t3 = t_cls(self.t2, copy=True)
self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]))
self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]))
self.t3.meta.update(OrderedDict([("b", 3), ("c", [1, 2]), ("d", 2), ("a", 1)]))
self.meta_merge = OrderedDict(
[
("b", [1, 2, 3, 4]),
("c", {"a": 1, "b": 1}),
("d", 1),
("a", 1),
]
)
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.join(self.t1, self.t2, join_type="inner")
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.join(self.t1, self.t3, join_type="inner")
assert len(w) == 3
assert out.meta == self.t3.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.join(
self.t1, self.t3, join_type="inner", metadata_conflicts="warn"
)
assert len(w) == 3
assert out.meta == self.t3.meta
out = table.join(
self.t1, self.t3, join_type="inner", metadata_conflicts="silent"
)
assert out.meta == self.t3.meta
with pytest.raises(MergeConflictError):
out = table.join(
self.t1, self.t3, join_type="inner", metadata_conflicts="error"
)
with pytest.raises(ValueError):
out = table.join(
self.t1, self.t3, join_type="inner", metadata_conflicts="nonsense"
)
def test_both_unmasked_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Basic join with default parameters (inner join on common keys)
t12 = table.join(t1, t2)
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"])
assert type(t12["b"]) is type(t1["b"])
assert type(t12["c"]) is type(t1["c"])
assert type(t12["d"]) is type(t2["d"])
assert t12.masked is False
assert sort_eq(
t12.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 1 foo L2 R1",
" 1 foo L2 R2",
" 2 bar L4 R3",
],
)
# Table meta merged properly
assert t12.meta == self.meta_merge
def test_both_unmasked_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type="left")
assert t12.has_masked_columns is True
assert t12.masked is False
for name in ("a", "b", "c"):
assert type(t12[name]) is Column
assert type(t12["d"]) is MaskedColumn
assert sort_eq(
t12.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 foo L1 --",
" 1 bar L3 --",
" 1 foo L2 R1",
" 1 foo L2 R2",
" 2 bar L4 R3",
],
)
# Right join
t12 = table.join(t1, t2, join_type="right")
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(
t12.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 1 foo L2 R1",
" 1 foo L2 R2",
" 2 bar L4 R3",
" 4 bar -- R4",
],
)
# Outer join
t12 = table.join(t1, t2, join_type="outer")
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(
t12.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 foo L1 --",
" 1 bar L3 --",
" 1 foo L2 R1",
" 1 foo L2 R2",
" 2 bar L4 R3",
" 4 bar -- R4",
],
)
# Check that the common keys are 'a', 'b'
t12a = table.join(t1, t2, join_type="outer")
t12b = table.join(t1, t2, join_type="outer", keys=["a", "b"])
assert np.all(t12a.as_array() == t12b.as_array())
def test_both_unmasked_single_key_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Inner join on 'a' column
t12 = table.join(t1, t2, keys="a")
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"])
assert type(t12["b_1"]) is type(t1["b"])
assert type(t12["c"]) is type(t1["c"])
assert type(t12["b_2"]) is type(t2["b"])
assert type(t12["d"]) is type(t2["d"])
assert t12.masked is False
assert sort_eq(
t12.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 1 foo L2 foo R1",
" 1 foo L2 foo R2",
" 1 bar L3 foo R1",
" 1 bar L3 foo R2",
" 2 bar L4 bar R3",
],
)
def test_both_unmasked_single_key_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type="left", keys="a")
assert t12.has_masked_columns is True
assert sort_eq(
t12.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 0 foo L1 -- --",
" 1 foo L2 foo R1",
" 1 foo L2 foo R2",
" 1 bar L3 foo R1",
" 1 bar L3 foo R2",
" 2 bar L4 bar R3",
],
)
# Right join
t12 = table.join(t1, t2, join_type="right", keys="a")
assert t12.has_masked_columns is True
assert sort_eq(
t12.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 1 foo L2 foo R1",
" 1 foo L2 foo R2",
" 1 bar L3 foo R1",
" 1 bar L3 foo R2",
" 2 bar L4 bar R3",
" 4 -- -- bar R4",
],
)
# Outer join
t12 = table.join(t1, t2, join_type="outer", keys="a")
assert t12.has_masked_columns is True
assert sort_eq(
t12.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 0 foo L1 -- --",
" 1 foo L2 foo R1",
" 1 foo L2 foo R2",
" 1 bar L3 foo R1",
" 1 bar L3 foo R2",
" 2 bar L4 bar R3",
" 4 -- -- bar R4",
],
)
def test_masked_unmasked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
# Result table is never masked
t1m2 = table.join(t1m, t2, join_type="inner")
assert t1m2.masked is False
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2))
# Mask out some values in left table and make sure they propagate
t1m["b"].mask[1] = True
t1m["c"].mask[2] = True
t1m2 = table.join(t1m, t2, join_type="inner", keys="a")
assert sort_eq(
t1m2.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 1 -- L2 foo R1",
" 1 -- L2 foo R2",
" 1 bar -- foo R1",
" 1 bar -- foo R2",
" 2 bar L4 bar R3",
],
)
t21m = table.join(t2, t1m, join_type="inner", keys="a")
assert sort_eq(
t21m.pformat(),
[
" a b_1 d b_2 c ",
"--- --- --- --- ---",
" 1 foo R2 -- L2",
" 1 foo R2 bar --",
" 1 foo R1 -- L2",
" 1 foo R1 bar --",
" 2 bar R3 bar L4",
],
)
def test_masked_masked(self, operation_table_type):
self._setup(operation_table_type)
"""Two masked tables"""
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
t2m = operation_table_type(self.t2, masked=True)
# Result table is never masked but original column types are preserved
t1m2m = table.join(t1m, t2m, join_type="inner")
assert t1m2m.masked is False
for col in t1m2m.itercols():
assert type(col) is MaskedColumn
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2m))
# Mask out some values in both tables and make sure they propagate
t1m["b"].mask[1] = True
t1m["c"].mask[2] = True
t2m["d"].mask[2] = True
t1m2m = table.join(t1m, t2m, join_type="inner", keys="a")
assert sort_eq(
t1m2m.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 1 -- L2 foo R1",
" 1 -- L2 foo R2",
" 1 bar -- foo R1",
" 1 bar -- foo R2",
" 2 bar L4 bar --",
],
)
def test_classes(self):
"""Ensure that classes and subclasses get through as expected"""
class MyCol(Column):
pass
class MyMaskedCol(MaskedColumn):
pass
t1 = Table()
t1["a"] = MyCol([1])
t1["b"] = MyCol([2])
t1["c"] = MyMaskedCol([3])
t2 = Table()
t2["a"] = Column([1, 2])
t2["d"] = MyCol([3, 4])
t2["e"] = MyMaskedCol([5, 6])
t12 = table.join(t1, t2, join_type="inner")
for name, exp_type in (
("a", MyCol),
("b", MyCol),
("c", MyMaskedCol),
("d", MyCol),
("e", MyMaskedCol),
):
assert type(t12[name] is exp_type)
t21 = table.join(t2, t1, join_type="left")
# Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be
# masked, but col 'c' stays since MyMaskedCol supports masking.
for name, exp_type in (
("a", MyCol),
("b", MaskedColumn),
("c", MyMaskedCol),
("d", MyCol),
("e", MyMaskedCol),
):
assert type(t21[name] is exp_type)
def test_col_rename(self, operation_table_type):
self._setup(operation_table_type)
"""
Test auto col renaming when there is a conflict. Use
non-default values of uniq_col_name and table_names.
"""
t1 = self.t1
t2 = self.t2
t12 = table.join(
t1,
t2,
uniq_col_name="x_{table_name}_{col_name}_y",
table_names=["L", "R"],
keys="a",
)
assert t12.colnames == ["a", "x_L_b_y", "c", "x_R_b_y", "d"]
def test_rename_conflict(self, operation_table_type):
self._setup(operation_table_type)
"""
Test that auto-column rename fails because of a conflict
with an existing column
"""
t1 = self.t1
t2 = self.t2
t1["b_1"] = 1 # Add a new column b_1 that will conflict with auto-rename
with pytest.raises(TableMergeError):
table.join(t1, t2, keys="a")
def test_missing_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that doesn't exist"""
t1 = self.t1
t2 = self.t2
with pytest.raises(TableMergeError):
table.join(t1, t2, keys=["a", "not there"])
def test_bad_join_type(self, operation_table_type):
self._setup(operation_table_type)
"""Bad join_type input"""
t1 = self.t1
t2 = self.t2
with pytest.raises(ValueError):
table.join(t1, t2, join_type="illegal value")
def test_no_common_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge tables with no common keys"""
t1 = self.t1
t2 = self.t2
del t1["a"]
del t1["b"]
del t2["a"]
del t2["b"]
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_masked_key_column(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that has a masked element"""
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
t1 = self.t1
t2 = operation_table_type(self.t2, masked=True)
table.join(t1, t2) # OK
t2["a"].mask[0] = True
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t2.rename_column("d", "c") # force col conflict and renaming
meta1 = OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])
meta2 = OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
# Key col 'a', should first value ('cm')
t1["a"].unit = "cm"
t2["a"].unit = "m"
# Key col 'b', take first value 't1_b'
t1["b"].info.description = "t1_b"
# Key col 'b', take first non-empty value 't1_b'
t2["b"].info.format = "%6s"
# Key col 'a', should be merged meta
t1["a"].info.meta = meta1
t2["a"].info.meta = meta2
# Key col 'b', should be meta2
t2["b"].info.meta = meta2
# All these should pass through
t1["c"].info.format = "%3s"
t1["c"].info.description = "t1_c"
t2["c"].info.format = "%6s"
t2["c"].info.description = "t2_c"
if operation_table_type is Table:
ctx = pytest.warns(
metadata.MergeConflictWarning,
match=(
r"In merged column 'a' the 'unit' attribute does not match \(cm"
r" != m\)"
),
)
else:
ctx = nullcontext()
with ctx:
t12 = table.join(t1, t2, keys=["a", "b"])
assert t12["a"].unit == "m"
assert t12["b"].info.description == "t1_b"
assert t12["b"].info.format == "%6s"
assert t12["a"].info.meta == self.meta_merge
assert t12["b"].info.meta == meta2
assert t12["c_1"].info.format == "%3s"
assert t12["c_1"].info.description == "t1_c"
assert t12["c_2"].info.format == "%6s"
assert t12["c_2"].info.description == "t2_c"
def test_join_multidimensional(self, operation_table_type):
self._setup(operation_table_type)
# Regression test for #2984, which was an issue where join did not work
# on multi-dimensional columns.
t1 = operation_table_type()
t1["a"] = [1, 2, 3]
t1["b"] = np.ones((3, 4))
t2 = operation_table_type()
t2["a"] = [1, 2, 3]
t2["c"] = [4, 5, 6]
t3 = table.join(t1, t2)
np.testing.assert_allclose(t3["a"], t1["a"])
np.testing.assert_allclose(t3["b"], t1["b"])
np.testing.assert_allclose(t3["c"], t2["c"])
def test_join_multidimensional_masked(self, operation_table_type):
self._setup(operation_table_type)
"""
Test for outer join with multidimensional columns where masking is required.
(Issue #4059).
"""
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
a = table.MaskedColumn([1, 2, 3], name="a")
a2 = table.Column([1, 3, 4], name="a")
b = table.MaskedColumn(
[
[1, 2],
[3, 4],
[5, 6],
],
name="b",
mask=[
[1, 0],
[0, 1],
[0, 0],
],
)
c = table.Column(
[
[1, 1],
[2, 2],
[3, 3],
],
name="c",
)
t1 = operation_table_type([a, b])
t2 = operation_table_type([a2, c])
t12 = table.join(t1, t2, join_type="inner")
assert np.all(
t12["b"].mask
== [
[True, False],
[False, False],
]
)
assert not hasattr(t12["c"], "mask")
t12 = table.join(t1, t2, join_type="outer")
assert np.all(
t12["b"].mask
== [
[True, False],
[False, True],
[False, False],
[True, True],
]
)
assert np.all(
t12["c"].mask
== [
[False, False],
[True, True],
[False, False],
[False, False],
]
)
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols["m"]
cls_name = type(col).__name__
len_col = len(col)
idx = np.arange(len_col)
t1 = table.QTable([idx, col], names=["idx", "m1"])
t2 = table.QTable([idx, col], names=["idx", "m2"])
# Set up join mismatches for different join_type cases
t1 = t1[[0, 1, 3]]
t2 = t2[[0, 2, 3]]
# Test inner join, which works for all mixin_cols
out = table.join(t1, t2, join_type="inner")
assert len(out) == 2
assert out["m2"].__class__ is col.__class__
assert np.all(out["idx"] == [0, 3])
if cls_name == "SkyCoord":
# SkyCoord doesn't support __eq__ so use our own
assert skycoord_equal(out["m1"], col[[0, 3]])
assert skycoord_equal(out["m2"], col[[0, 3]])
elif "Repr" in cls_name or "Diff" in cls_name:
assert np.all(representation_equal(out["m1"], col[[0, 3]]))
assert np.all(representation_equal(out["m2"], col[[0, 3]]))
else:
assert np.all(out["m1"] == col[[0, 3]])
assert np.all(out["m2"] == col[[0, 3]])
# Check for left, right, outer join which requires masking. Works for
# the listed mixins classes.
if isinstance(col, (Quantity, Time, TimeDelta)):
out = table.join(t1, t2, join_type="left")
assert len(out) == 3
assert np.all(out["idx"] == [0, 1, 3])
assert np.all(out["m1"] == t1["m1"])
assert np.all(out["m2"] == t2["m2"])
check_mask(out["m1"], [False, False, False])
check_mask(out["m2"], [False, True, False])
out = table.join(t1, t2, join_type="right")
assert len(out) == 3
assert np.all(out["idx"] == [0, 2, 3])
assert np.all(out["m1"] == t1["m1"])
assert np.all(out["m2"] == t2["m2"])
check_mask(out["m1"], [False, True, False])
check_mask(out["m2"], [False, False, False])
out = table.join(t1, t2, join_type="outer")
assert len(out) == 4
assert np.all(out["idx"] == [0, 1, 2, 3])
assert np.all(out["m1"] == col)
assert np.all(out["m2"] == col)
assert check_mask(out["m1"], [False, False, True, False])
assert check_mask(out["m2"], [False, True, False, False])
else:
# Otherwise make sure it fails with the right exception message
for join_type in ("outer", "left", "right"):
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type=join_type)
assert "join requires masking" in str(
err.value
) or "join unavailable" in str(err.value)
def test_cartesian_join(self, operation_table_type):
t1 = Table(rows=[(1, "a"), (2, "b")], names=["a", "b"])
t2 = Table(rows=[(3, "c"), (4, "d")], names=["a", "c"])
t12 = table.join(t1, t2, join_type="cartesian")
assert t1.colnames == ["a", "b"]
assert t2.colnames == ["a", "c"]
assert len(t12) == len(t1) * len(t2)
assert str(t12).splitlines() == [
"a_1 b a_2 c ",
"--- --- --- ---",
" 1 a 3 c",
" 1 a 4 d",
" 2 b 3 c",
" 2 b 4 d",
]
with pytest.raises(ValueError, match="cannot supply keys for a cartesian join"):
t12 = table.join(t1, t2, join_type="cartesian", keys="a")
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_skycoord_sky(self):
sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit="deg")
sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit="deg")
t1 = Table([sc1], names=["sc"])
t2 = Table([sc2], names=["sc"])
t12 = table.join(t1, t2, join_funcs={"sc": join_skycoord(0.2 * u.deg)})
exp = [
"sc_id sc_1 sc_2 ",
" deg,deg deg,deg ",
"----- ------- --------",
" 1 1.0,0.0 1.05,0.0",
" 1 1.1,0.0 1.05,0.0",
" 2 2.0,0.0 2.1,0.0",
]
assert str(t12).splitlines() == exp
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("distance_func", ["search_around_3d", search_around_3d])
def test_join_with_join_skycoord_3d(self, distance_func):
sc1 = SkyCoord([0, 1, 1.1, 2] * u.deg, [0, 0, 0, 0] * u.deg, [1, 1, 2, 1] * u.m)
sc2 = SkyCoord([0.5, 1.05, 2.1] * u.deg, [0, 0, 0] * u.deg, [1, 1, 1] * u.m)
t1 = Table([sc1], names=["sc"])
t2 = Table([sc2], names=["sc"])
join_func = join_skycoord(np.deg2rad(0.2) * u.m, distance_func=distance_func)
t12 = table.join(t1, t2, join_funcs={"sc": join_func})
exp = [
"sc_id sc_1 sc_2 ",
" deg,deg,m deg,deg,m ",
"----- ----------- ------------",
" 1 1.0,0.0,1.0 1.05,0.0,1.0",
" 2 2.0,0.0,1.0 2.1,0.0,1.0",
]
assert str(t12).splitlines() == exp
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_distance_1d(self):
c1 = [0, 1, 1.1, 2]
c2 = [0.5, 1.05, 2.1]
t1 = Table([c1], names=["col"])
t2 = Table([c2], names=["col"])
join_func = join_distance(
0.2, kdtree_args={"leafsize": 32}, query_args={"p": 2}
)
t12 = table.join(t1, t2, join_type="outer", join_funcs={"col": join_func})
exp = [
"col_id col_1 col_2",
"------ ----- -----",
" 1 1.0 1.05",
" 1 1.1 1.05",
" 2 2.0 2.1",
" 3 0.0 --",
" 4 -- 0.5",
]
assert str(t12).splitlines() == exp
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_distance_1d_multikey(self):
from astropy.table.operations import _apply_join_funcs
c1 = [0, 1, 1.1, 1.2, 2]
id1 = [0, 1, 2, 2, 3]
o1 = ["a", "b", "c", "d", "e"]
c2 = [0.5, 1.05, 2.1]
id2 = [0, 2, 4]
o2 = ["z", "y", "x"]
t1 = Table([c1, id1, o1], names=["col", "id", "o1"])
t2 = Table([c2, id2, o2], names=["col", "id", "o2"])
join_func = join_distance(0.2)
join_funcs = {"col": join_func}
t12 = table.join(t1, t2, join_type="outer", join_funcs=join_funcs)
exp = [
"col_id col_1 id o1 col_2 o2",
"------ ----- --- --- ----- ---",
" 1 1.0 1 b -- --",
" 1 1.1 2 c 1.05 y",
" 1 1.2 2 d 1.05 y",
" 2 2.0 3 e -- --",
" 2 -- 4 -- 2.1 x",
" 3 0.0 0 a -- --",
" 4 -- 0 -- 0.5 z",
]
assert str(t12).splitlines() == exp
left, right, keys = _apply_join_funcs(t1, t2, ("col", "id"), join_funcs)
assert keys == ("col_id", "id")
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_distance_1d_quantity(self):
c1 = [0, 1, 1.1, 2] * u.m
c2 = [500, 1050, 2100] * u.mm
t1 = QTable([c1], names=["col"])
t2 = QTable([c2], names=["col"])
join_func = join_distance(20 * u.cm)
t12 = table.join(t1, t2, join_funcs={"col": join_func})
exp = [
"col_id col_1 col_2 ",
" m mm ",
"------ ----- ------",
" 1 1.0 1050.0",
" 1 1.1 1050.0",
" 2 2.0 2100.0",
]
assert str(t12).splitlines() == exp
# Generate column name conflict
t2["col_id"] = [0, 0, 0]
t2["col__id"] = [0, 0, 0]
t12 = table.join(t1, t2, join_funcs={"col": join_func})
exp = [
"col___id col_1 col_2 col_id col__id",
" m mm ",
"-------- ----- ------ ------ -------",
" 1 1.0 1050.0 0 0",
" 1 1.1 1050.0 0 0",
" 2 2.0 2100.0 0 0",
]
assert str(t12).splitlines() == exp
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_distance_2d(self):
c1 = np.array([[0, 1, 1.1, 2], [0, 0, 1, 0]]).transpose()
c2 = np.array([[0.5, 1.05, 2.1], [0, 0, 0]]).transpose()
t1 = Table([c1], names=["col"])
t2 = Table([c2], names=["col"])
join_func = join_distance(
0.2, kdtree_args={"leafsize": 32}, query_args={"p": 2}
)
t12 = table.join(t1, t2, join_type="outer", join_funcs={"col": join_func})
exp = [
"col_id col_1 col_2 ",
f'{t12["col_id"].dtype.name} float64[2] float64[2]', # int32 or int64
"------ ---------- -----------",
" 1 1.0 .. 0.0 1.05 .. 0.0",
" 2 2.0 .. 0.0 2.1 .. 0.0",
" 3 0.0 .. 0.0 -- .. --",
" 4 1.1 .. 1.0 -- .. --",
" 5 -- .. -- 0.5 .. 0.0",
]
assert t12.pformat(show_dtype=True) == exp
def test_keys_left_right_basic(self):
"""Test using the keys_left and keys_right args to specify different
join keys. This takes the standard test case but renames column 'a'
to 'x' and 'y' respectively for tables 1 and 2. Then it compares the
normal join on 'a' to the new join on 'x' and 'y'."""
self._setup()
for join_type in ("inner", "left", "right", "outer"):
t1 = self.t1.copy()
t2 = self.t2.copy()
# Expected is same as joining on 'a' but with names 'x', 'y' instead
t12_exp = table.join(t1, t2, keys="a", join_type=join_type)
t12_exp.add_column(t12_exp["a"], name="x", index=1)
t12_exp.add_column(t12_exp["a"], name="y", index=len(t1.colnames) + 1)
del t12_exp["a"]
# Different key names
t1.rename_column("a", "x")
t2.rename_column("a", "y")
keys_left_list = ["x"] # Test string key name
keys_right_list = [["y"]] # Test list of string key names
if join_type == "outer":
# Just do this for the outer join (others are the same)
keys_left_list.append([t1["x"].tolist()]) # Test list key column
keys_right_list.append([t2["y"]]) # Test Column key column
for keys_left, keys_right in zip(keys_left_list, keys_right_list):
t12 = table.join(
t1,
t2,
keys_left=keys_left,
keys_right=keys_right,
join_type=join_type,
)
assert t12.colnames == t12_exp.colnames
for col in t12.values_equal(t12_exp).itercols():
assert np.all(col)
assert t12_exp.meta == t12.meta
def test_keys_left_right_exceptions(self):
"""Test exceptions using the keys_left and keys_right args to specify
different join keys.
"""
self._setup()
t1 = self.t1
t2 = self.t2
msg = r"left table does not have key column 'z'"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left="z", keys_right=["a"])
msg = r"left table has different length from key \[1, 2\]"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=[[1, 2]], keys_right=["a"])
msg = r"keys arg must be None if keys_left and keys_right are supplied"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left="z", keys_right=["a"], keys="a")
msg = r"keys_left and keys_right args must have same length"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=["a", "b"], keys_right=["a"])
msg = r"keys_left and keys_right must both be provided"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=["a", "b"])
msg = r"cannot supply join_funcs arg and keys_left / keys_right"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=["a"], keys_right=["a"], join_funcs={})
def test_join_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table(
[
np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]),
["one", "two"],
],
names=["structured", "string"],
)
t2 = Table(
[
np.array([(2.0, 2), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]),
["three", "four"],
],
names=["structured", "string"],
)
t12 = table.join(t1, t2, ["structured"], join_type="outer")
assert t12.pformat() == [
"structured [f, i] string_1 string_2",
"----------------- -------- --------",
" (1., 1) one --",
" (2., 2) two three",
" (4., 4) -- four",
]
class TestSetdiff:
def _setup(self, t_cls=Table):
lines1 = [" a b ", " 0 foo ", " 1 foo ", " 1 bar ", " 2 bar "]
lines2 = [" a b ", " 0 foo ", " 3 foo ", " 4 bar ", " 2 bar "]
lines3 = [
" a b d ",
" 0 foo R1",
" 8 foo R2",
" 1 bar R3",
" 4 bar R4",
]
self.t1 = t_cls.read(lines1, format="ascii")
self.t2 = t_cls.read(lines2, format="ascii")
self.t3 = t_cls.read(lines3, format="ascii")
def test_default_same_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t2)
assert type(out["a"]) is type(self.t1["a"])
assert type(out["b"]) is type(self.t1["b"])
assert out.pformat() == [" a b ", "--- ---", " 1 bar", " 1 foo"]
def test_default_same_tables(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t1)
assert type(out["a"]) is type(self.t1["a"])
assert type(out["b"]) is type(self.t1["b"])
assert out.pformat() == [
" a b ",
"--- ---",
]
def test_extra_col_left_table(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.setdiff(self.t3, self.t1)
def test_extra_col_right_table(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t3)
assert type(out["a"]) is type(self.t1["a"])
assert type(out["b"]) is type(self.t1["b"])
assert out.pformat() == [
" a b ",
"--- ---",
" 1 foo",
" 2 bar",
]
def test_keys(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t3, self.t1, keys=["a", "b"])
assert type(out["a"]) is type(self.t1["a"])
assert type(out["b"]) is type(self.t1["b"])
assert out.pformat() == [
" a b d ",
"--- --- ---",
" 4 bar R4",
" 8 foo R2",
]
def test_missing_key(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.setdiff(self.t3, self.t1, keys=["a", "d"])
class TestVStack:
def _setup(self, t_cls=Table):
self.t1 = t_cls.read(
[
" a b",
" 0. foo",
" 1. bar",
],
format="ascii",
)
self.t2 = t_cls.read(
[
" a b c",
" 2. pez 4",
" 3. sez 5",
],
format="ascii",
)
self.t3 = t_cls.read(
[
" a b",
" 4. 7",
" 5. 8",
" 6. 9",
],
format="ascii",
)
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]))
self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]))
self.t4.meta.update(OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)]))
self.t5.meta.update(OrderedDict([("b", 3), ("c", "k"), ("d", 1)]))
self.meta_merge = OrderedDict(
[
("b", [1, 2, 3, 4, 5, 6]),
("c", {"a": 1, "b": 1, "c": 1}),
("d", 1),
("a", 1),
("e", 1),
]
)
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match="Did you accidentally call vstack"):
table.vstack(self.t1, self.t2)
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2[1]])
assert type(out["a"]) is type(self.t1["a"])
assert type(out["b"]) is type(self.t1["b"])
assert out.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"1.0 bar",
]
def test_stack_table_column(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2["a"]])
assert out.masked is False
assert out.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"0.0 --",
"1.0 --",
]
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.vstack([self.t1, self.t2, self.t4], join_type="inner")
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.vstack([self.t1, self.t5], join_type="inner")
assert len(w) == 2
assert out.meta == self.t5.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.vstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="warn"
)
assert len(w) == 2
assert out.meta == self.t5.meta
out = table.vstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="silent"
)
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.vstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="error"
)
with pytest.raises(ValueError):
out = table.vstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="nonsense"
)
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.vstack([])
with pytest.raises(TypeError):
table.vstack(1)
with pytest.raises(TypeError):
table.vstack([self.t2, 1])
with pytest.raises(ValueError):
table.vstack([self.t1, self.t2], join_type="invalid join type")
def test_stack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type="inner")
assert t12.masked is False
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"])
assert type(t12["b"]) is type(t1["b"])
assert t12.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"2.0 pez",
"3.0 sez",
]
t124 = table.vstack([t1, t2, t4], join_type="inner")
assert type(t124) is operation_table_type
assert type(t12["a"]) is type(t1["a"])
assert type(t12["b"]) is type(t1["b"])
assert t124.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"2.0 pez",
"3.0 sez",
"0.0 foo",
"1.0 bar",
]
def test_stack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type="outer")
assert t12.masked is False
assert t12.pformat() == [
" a b c ",
"--- --- ---",
"0.0 foo --",
"1.0 bar --",
"2.0 pez 4",
"3.0 sez 5",
]
t124 = table.vstack([t1, t2, t4], join_type="outer")
assert t124.masked is False
assert t124.pformat() == [
" a b c ",
"--- --- ---",
"0.0 foo --",
"1.0 bar --",
"2.0 pez 4",
"3.0 sez 5",
"0.0 foo --",
"1.0 bar --",
]
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type="inner")
assert "The 'b' columns have incompatible types: {}".format(
[self.t1["b"].dtype.name, self.t3["b"].dtype.name]
) in str(excinfo.value)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type="outer")
assert "The 'b' columns have incompatible types:" in str(excinfo.value)
with pytest.raises(TableMergeError):
table.vstack([self.t1, self.t2], join_type="exact")
t1_reshape = self.t1.copy()
t1_reshape["b"].shape = [2, 1]
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, t1_reshape])
assert "have different shape" in str(excinfo.value)
def test_vstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t4 = self.t4
t4["b"].mask[1] = True
t14 = table.vstack([t1, t4])
assert t14.masked is False
assert t14.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"0.0 foo",
"1.0 --",
]
def test_col_meta_merge_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1["a"].info.unit = "cm"
t2["a"].info.unit = "m"
t4["a"].info.unit = "km"
# Key col 'a' format should take last when all match
t1["a"].info.format = "%f"
t2["a"].info.format = "%f"
t4["a"].info.format = "%f"
# Key col 'b', take first value 't1_b'
t1["b"].info.description = "t1_b"
# Key col 'b', take first non-empty value '%6s'
t4["b"].info.format = "%6s"
# Key col 'a', should be merged meta
t1["a"].info.meta.update(
OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])
)
t2["a"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
t4["a"].info.meta.update(
OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)])
)
# Key col 'b', should be meta2
t2["b"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
if operation_table_type is Table:
ctx = pytest.warns(metadata.MergeConflictWarning)
else:
ctx = nullcontext()
with ctx as warning_lines:
out = table.vstack([t1, t2, t4], join_type="inner")
if operation_table_type is Table:
assert len(warning_lines) == 2
assert (
"In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message)
)
assert (
"In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message)
)
# Check units are suitably ignored for a regular Table
assert out.pformat() == [
" a b ",
" km ",
"-------- ------",
"0.000000 foo",
"1.000000 bar",
"2.000000 pez",
"3.000000 sez",
"0.000000 foo",
"1.000000 bar",
]
else:
# Check QTable correctly dealt with units.
assert out.pformat() == [
" a b ",
" km ",
"-------- ------",
"0.000000 foo",
"0.000010 bar",
"0.002000 pez",
"0.003000 sez",
"0.000000 foo",
"1.000000 bar",
]
assert out["a"].info.unit == "km"
assert out["a"].info.format == "%f"
assert out["b"].info.description == "t1_b"
assert out["b"].info.format == "%6s"
assert out["a"].info.meta == self.meta_merge
assert out["b"].info.meta == OrderedDict(
[("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]
)
def test_col_meta_merge_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1["a"].unit = "cm"
t2["a"].unit = "m"
t4["a"].unit = "km"
# Key col 'a' format should take last when all match
t1["a"].info.format = "%0d"
t2["a"].info.format = "%0d"
t4["a"].info.format = "%0d"
# Key col 'b', take first value 't1_b'
t1["b"].info.description = "t1_b"
# Key col 'b', take first non-empty value '%6s'
t4["b"].info.format = "%6s"
# Key col 'a', should be merged meta
t1["a"].info.meta.update(
OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])
)
t2["a"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
t4["a"].info.meta.update(
OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)])
)
# Key col 'b', should be meta2
t2["b"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
# All these should pass through
t2["c"].unit = "m"
t2["c"].info.format = "%6s"
t2["c"].info.description = "t2_c"
with pytest.warns(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type="outer")
assert len(warning_lines) == 2
assert (
"In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message)
)
assert (
"In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message)
)
assert out["a"].unit == "km"
assert out["a"].info.format == "%0d"
assert out["b"].info.description == "t1_b"
assert out["b"].info.format == "%6s"
assert out["a"].info.meta == self.meta_merge
assert out["b"].info.meta == OrderedDict(
[("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]
)
assert out["c"].info.unit == "m"
assert out["c"].info.format == "%6s"
assert out["c"].info.description == "t2_c"
def test_vstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.vstack(self.t1)).all()
assert (self.t1 == table.vstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols["m"]
len_col = len(col)
t = table.QTable([col], names=["a"])
cls_name = type(col).__name__
# Vstack works for these classes:
if isinstance(
col,
(
u.Quantity,
Time,
TimeDelta,
SkyCoord,
EarthLocation,
BaseRepresentationOrDifferential,
StokesCoord,
),
):
out = table.vstack([t, t])
assert len(out) == len_col * 2
if cls_name == "SkyCoord":
# Argh, SkyCoord needs __eq__!!
assert skycoord_equal(out["a"][len_col:], col)
assert skycoord_equal(out["a"][:len_col], col)
elif "Repr" in cls_name or "Diff" in cls_name:
assert np.all(representation_equal(out["a"][:len_col], col))
assert np.all(representation_equal(out["a"][len_col:], col))
else:
assert np.all(out["a"][:len_col] == col)
assert np.all(out["a"][len_col:] == col)
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t])
assert "vstack unavailable for mixin column type(s): {}".format(
cls_name
) in str(err.value)
# Check for outer stack which requires masking. Only Time supports
# this currently.
t2 = table.QTable([col], names=["b"]) # different from col name for t
if isinstance(col, (Time, TimeDelta, Quantity)):
out = table.vstack([t, t2], join_type="outer")
assert len(out) == len_col * 2
assert np.all(out["a"][:len_col] == col)
assert np.all(out["b"][len_col:] == col)
assert check_mask(out["a"], [False] * len_col + [True] * len_col)
assert check_mask(out["b"], [True] * len_col + [False] * len_col)
# check directly stacking mixin columns:
out2 = table.vstack([t, t2["b"]])
assert np.all(out["a"] == out2["a"])
assert np.all(out["b"] == out2["b"])
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t2], join_type="outer")
assert "vstack requires masking" in str(
err.value
) or "vstack unavailable" in str(err.value)
def test_vstack_different_representation(self):
"""Test that representations can be mixed together."""
rep1 = CartesianRepresentation([1, 2] * u.km, [3, 4] * u.km, 1 * u.km)
rep2 = SphericalRepresentation([0] * u.deg, [0] * u.deg, 10 * u.km)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.vstack([t1, t2])
expected = CartesianRepresentation(
[1, 2, 10] * u.km, [3, 4, 0] * u.km, [1, 1, 0] * u.km
)
assert np.all(representation_equal(t12["col0"], expected))
rep3 = UnitSphericalRepresentation([0] * u.deg, [0] * u.deg)
t3 = Table([rep3])
with pytest.raises(ValueError, match="representations are inconsistent"):
table.vstack([t1, t3])
def test_vstack_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table(
[
np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]),
["one", "two"],
],
names=["structured", "string"],
)
t2 = Table(
[
np.array([(3.0, 3), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]),
["three", "four"],
],
names=["structured", "string"],
)
t12 = table.vstack([t1, t2])
assert t12.pformat() == [
"structured [f, i] string",
"----------------- ------",
" (1., 1) one",
" (2., 2) two",
" (3., 3) three",
" (4., 4) four",
]
# One table without the structured column.
t3 = t2[("string",)]
t13 = table.vstack([t1, t3])
assert t13.pformat() == [
"structured [f, i] string",
"----------------- ------",
" (1.0, 1) one",
" (2.0, 2) two",
" -- three",
" -- four",
]
class TestDStack:
def _setup(self, t_cls=Table):
self.t1 = t_cls.read(
[
" a b",
" 0. foo",
" 1. bar",
],
format="ascii",
)
self.t2 = t_cls.read(
[
" a b c",
" 2. pez 4",
" 3. sez 5",
],
format="ascii",
)
self.t2["d"] = Time([1, 2], format="cxcsec")
self.t3 = t_cls(
{
"a": [[5.0, 6.0], [4.0, 3.0]],
"b": [["foo", "bar"], ["pez", "sez"]],
},
names=("a", "b"),
)
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
self.t5 = t_cls(
{
"a": [[4.0, 2.0], [1.0, 6.0]],
"b": [["foo", "pez"], ["bar", "sez"]],
},
names=("a", "b"),
)
self.t6 = t_cls.read(
[
" a b c",
" 7. pez 2",
" 4. sez 6",
" 6. foo 3",
],
format="ascii",
)
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match="Did you accidentally call dstack"):
table.dstack(self.t1, self.t2)
@staticmethod
def compare_dstack(tables, out):
for ii, tbl in enumerate(tables):
for name, out_col in out.columns.items():
if name in tbl.colnames:
# Columns always compare equal
assert np.all(tbl[name] == out[name][:, ii])
# If input has a mask then output must have same mask
if hasattr(tbl[name], "mask"):
assert np.all(tbl[name].mask == out[name].mask[:, ii])
# If input has no mask then output might have a mask (if other table
# is missing that column). If so then all mask values should be False.
elif hasattr(out[name], "mask"):
assert not np.any(out[name].mask[:, ii])
else:
# Column missing for this table, out must have a mask with all True.
assert np.all(out[name].mask[:, ii])
def test_dstack_table_column(self, operation_table_type):
"""Stack a table with 3 cols and one column (gets auto-converted to Table)."""
self._setup(operation_table_type)
t2 = self.t1.copy()
out = table.dstack([self.t1, t2["a"]])
self.compare_dstack([self.t1, t2[("a",)]], out)
def test_dstack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t4["a"].mask[0] = True
# Test for non-masked table
t12 = table.dstack([t1, t2], join_type="outer")
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"])
assert type(t12["b"]) is type(t1["b"])
self.compare_dstack([t1, t2], t12)
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type="outer")
assert type(t124) is operation_table_type
assert type(t124["a"]) is type(t4["a"])
assert type(t124["b"]) is type(t4["b"])
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type="inner")
assert type(t124) is operation_table_type
assert type(t124["a"]) is type(t4["a"])
assert type(t124["b"]) is type(t4["b"])
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_multi_dimension_column(self, operation_table_type):
self._setup(operation_table_type)
t3 = self.t3
t5 = self.t5
t2 = self.t2
t35 = table.dstack([t3, t5])
assert type(t35) is operation_table_type
assert type(t35["a"]) is type(t3["a"])
assert type(t35["b"]) is type(t3["b"])
self.compare_dstack([t3, t5], t35)
with pytest.raises(TableMergeError):
table.dstack([t2, t3])
def test_dstack_different_length_table(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t2
t6 = self.t6
with pytest.raises(ValueError):
table.dstack([t2, t6])
def test_dstack_single_table(self):
self._setup(Table)
out = table.dstack(self.t1)
assert np.all(out == self.t1)
def test_dstack_representation(self):
rep1 = SphericalRepresentation([1, 2] * u.deg, [3, 4] * u.deg, 1 * u.kpc)
rep2 = SphericalRepresentation([10, 20] * u.deg, [30, 40] * u.deg, 10 * u.kpc)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.dstack([t1, t2])
assert np.all(representation_equal(t12["col0"][:, 0], rep1))
assert np.all(representation_equal(t12["col0"][:, 1], rep2))
def test_dstack_skycoord(self):
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg)
t1 = Table([sc1])
t2 = Table([sc2])
t12 = table.dstack([t1, t2])
assert skycoord_equal(sc1, t12["col0"][:, 0])
assert skycoord_equal(sc2, t12["col0"][:, 1])
def test_dstack_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table(
[
np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]),
["one", "two"],
],
names=["structured", "string"],
)
t2 = Table(
[
np.array([(3.0, 3), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]),
["three", "four"],
],
names=["structured", "string"],
)
t12 = table.dstack([t1, t2])
assert t12.pformat() == [
"structured [f, i] string ",
"------------------ ------------",
"(1., 1) .. (3., 3) one .. three",
"(2., 2) .. (4., 4) two .. four",
]
# One table without the structured column.
t3 = t2[("string",)]
t13 = table.dstack([t1, t3])
assert t13.pformat() == [
"structured [f, i] string ",
"----------------- ------------",
" (1.0, 1) .. -- one .. three",
" (2.0, 2) .. -- two .. four",
]
class TestHStack:
def _setup(self, t_cls=Table):
self.t1 = t_cls.read(
[
" a b",
" 0. foo",
" 1. bar",
],
format="ascii",
)
self.t2 = t_cls.read(
[
" a b c",
" 2. pez 4",
" 3. sez 5",
],
format="ascii",
)
self.t3 = t_cls.read(
[
" d e",
" 4. 7",
" 5. 8",
" 6. 9",
],
format="ascii",
)
self.t4 = t_cls(self.t1, copy=True, masked=True)
self.t4["a"].name = "f"
self.t4["b"].name = "g"
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]))
self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]))
self.t4.meta.update(OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)]))
self.t5.meta.update(OrderedDict([("b", 3), ("c", "k"), ("d", 1)]))
self.meta_merge = OrderedDict(
[
("b", [1, 2, 3, 4, 5, 6]),
("c", {"a": 1, "b": 1, "c": 1}),
("d", 1),
("a", 1),
("e", 1),
]
)
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match="Did you accidentally call hstack"):
table.hstack(self.t1, self.t2)
def test_stack_same_table(self, operation_table_type):
"""
From #2995, test that hstack'ing references to the same table has the
expected output.
"""
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t1])
assert out.masked is False
assert out.pformat() == [
"a_1 b_1 a_2 b_2",
"--- --- --- ---",
"0.0 foo 0.0 foo",
"1.0 bar 1.0 bar",
]
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1[0], self.t2[1]])
assert out.masked is False
assert out.pformat() == [
"a_1 b_1 a_2 b_2 c ",
"--- --- --- --- ---",
"0.0 foo 3.0 sez 5",
]
def test_stack_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2["c"]])
assert type(out["a"]) is type(self.t1["a"])
assert type(out["b"]) is type(self.t1["b"])
assert type(out["c"]) is type(self.t2["c"])
assert out.pformat() == [
" a b c ",
"--- --- ---",
"0.0 foo 4",
"1.0 bar 5",
]
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2, self.t4], join_type="inner")
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.hstack([self.t1, self.t5], join_type="inner")
assert len(w) == 2
assert out.meta == self.t5.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.hstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="warn"
)
assert len(w) == 2
assert out.meta == self.t5.meta
out = table.hstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="silent"
)
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.hstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="error"
)
with pytest.raises(ValueError):
out = table.hstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="nonsense"
)
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.hstack([])
with pytest.raises(TypeError):
table.hstack(1)
with pytest.raises(TypeError):
table.hstack([self.t2, 1])
with pytest.raises(ValueError):
table.hstack([self.t1, self.t2], join_type="invalid join type")
def test_stack_basic(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t3 = self.t3
t4 = self.t4
out = table.hstack([t1, t2], join_type="inner")
assert out.masked is False
assert type(out) is operation_table_type
assert type(out["a_1"]) is type(t1["a"])
assert type(out["b_1"]) is type(t1["b"])
assert type(out["a_2"]) is type(t2["a"])
assert type(out["b_2"]) is type(t2["b"])
assert out.pformat() == [
"a_1 b_1 a_2 b_2 c ",
"--- --- --- --- ---",
"0.0 foo 2.0 pez 4",
"1.0 bar 3.0 sez 5",
]
# stacking as a list gives same result
out_list = table.hstack([t1, t2], join_type="inner")
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2], join_type="outer")
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2, t3, t4], join_type="outer")
assert out.masked is False
assert out.pformat() == [
"a_1 b_1 a_2 b_2 c d e f g ",
"--- --- --- --- --- --- --- --- ---",
"0.0 foo 2.0 pez 4 4.0 7 0.0 foo",
"1.0 bar 3.0 sez 5 5.0 8 1.0 bar",
" -- -- -- -- -- 6.0 9 -- --",
]
out = table.hstack([t1, t2, t3, t4], join_type="inner")
assert out.masked is False
assert out.pformat() == [
"a_1 b_1 a_2 b_2 c d e f g ",
"--- --- --- --- --- --- --- --- ---",
"0.0 foo 2.0 pez 4 4.0 7 0.0 foo",
"1.0 bar 3.0 sez 5 5.0 8 1.0 bar",
]
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
# For join_type exact, which will fail here because n_rows
# does not match
with pytest.raises(TableMergeError):
table.hstack([self.t1, self.t3], join_type="exact")
def test_hstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail()
self._setup(operation_table_type)
t1 = self.t1
t2 = operation_table_type(t1, copy=True, masked=True)
t2.meta.clear()
t2["b"].mask[1] = True
out = table.hstack([t1, t2])
assert out.pformat() == [
"a_1 b_1 a_2 b_2",
"--- --- --- ---",
"0.0 foo 0.0 foo",
"1.0 bar 1.0 --",
]
def test_table_col_rename(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack(
[self.t1, self.t2],
join_type="inner",
uniq_col_name="{table_name}_{col_name}",
table_names=("left", "right"),
)
assert out.masked is False
assert out.pformat() == [
"left_a left_b right_a right_b c ",
"------ ------ ------- ------- ---",
" 0.0 foo 2.0 pez 4",
" 1.0 bar 3.0 sez 5",
]
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t3 = self.t3[:2]
t4 = self.t4
# Just set a bunch of meta and make sure it is the same in output
meta1 = OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])
t1["a"].unit = "cm"
t1["b"].info.description = "t1_b"
t4["f"].info.format = "%6s"
t1["b"].info.meta.update(meta1)
t3["d"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
t4["g"].info.meta.update(
OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)])
)
t3["e"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
t3["d"].unit = "m"
t3["d"].info.format = "%6s"
t3["d"].info.description = "t3_c"
out = table.hstack([t1, t3, t4], join_type="exact")
for t in [t1, t3, t4]:
for name in t.colnames:
for attr in ("meta", "unit", "format", "description"):
assert getattr(out[name].info, attr) == getattr(t[name].info, attr)
# Make sure we got a copy of meta, not ref
t1["b"].info.meta["b"] = None
assert out["b"].info.meta["b"] == [1, 2]
def test_hstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.hstack(self.t1)).all()
assert (self.t1 == table.hstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col1 = mixin_cols["m"]
col2 = col1[2:4] # Shorter version of col1
t1 = table.QTable([col1])
t2 = table.QTable([col2])
cls_name = type(col1).__name__
out = table.hstack([t1, t2], join_type="inner")
assert type(out["col0_1"]) is type(out["col0_2"])
assert len(out) == len(col2)
# Check that columns are as expected.
if cls_name == "SkyCoord":
assert skycoord_equal(out["col0_1"], col1[: len(col2)])
assert skycoord_equal(out["col0_2"], col2)
elif "Repr" in cls_name or "Diff" in cls_name:
assert np.all(representation_equal(out["col0_1"], col1[: len(col2)]))
assert np.all(representation_equal(out["col0_2"], col2))
else:
assert np.all(out["col0_1"] == col1[: len(col2)])
assert np.all(out["col0_2"] == col2)
# Time class supports masking, all other mixins do not
if isinstance(col1, (Time, TimeDelta, Quantity)):
out = table.hstack([t1, t2], join_type="outer")
assert len(out) == len(t1)
assert np.all(out["col0_1"] == col1)
assert np.all(out["col0_2"][: len(col2)] == col2)
assert check_mask(out["col0_2"], [False, False, True, True])
# check directly stacking mixin columns:
out2 = table.hstack([t1, t2["col0"]], join_type="outer")
assert np.all(out["col0_1"] == out2["col0_1"])
assert np.all(out["col0_2"] == out2["col0_2"])
else:
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type="outer")
assert "hstack requires masking" in str(err.value)
def test_unique(operation_table_type):
t = operation_table_type.read(
[
" a b c d",
" 2 b 7.0 0",
" 1 c 3.0 5",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 1 a 1.0 7",
" 2 b 5.0 1",
" 0 a 0.0 4",
" 1 a 2.0 6",
" 1 c 3.0 5",
],
format="ascii",
)
tu = operation_table_type(np.sort(t[:-1]))
t_all = table.unique(t)
assert sort_eq(t_all.pformat(), tu.pformat())
t_s = t.copy()
del t_s["b", "c", "d"]
t_all = table.unique(t_s)
assert sort_eq(
t_all.pformat(),
[
" a ",
"---",
" 0",
" 1",
" 2",
],
)
key1 = "a"
t1a = table.unique(t, key1)
assert sort_eq(
t1a.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 1 c 3.0 5",
" 2 b 7.0 0",
],
)
t1b = table.unique(t, key1, keep="last")
assert sort_eq(
t1b.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 1 c 3.0 5",
" 2 b 5.0 1",
],
)
t1c = table.unique(t, key1, keep="none")
assert sort_eq(
t1c.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
],
)
key2 = ["a", "b"]
t2a = table.unique(t, key2)
assert sort_eq(
t2a.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 1 a 1.0 7",
" 1 c 3.0 5",
" 2 a 4.0 3",
" 2 b 7.0 0",
],
)
t2b = table.unique(t, key2, keep="last")
assert sort_eq(
t2b.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 1 a 2.0 6",
" 1 c 3.0 5",
" 2 a 4.0 3",
" 2 b 5.0 1",
],
)
t2c = table.unique(t, key2, keep="none")
assert sort_eq(
t2c.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 2 a 4.0 3",
],
)
key2 = ["a", "a"]
with pytest.raises(ValueError) as exc:
t2a = table.unique(t, key2)
assert exc.value.args[0] == "duplicate key names"
with pytest.raises(ValueError) as exc:
table.unique(t, key2, keep=True)
assert exc.value.args[0] == "'keep' should be one of 'first', 'last', 'none'"
t1_m = operation_table_type(t1a, masked=True)
t1_m["a"].mask[1] = True
with pytest.raises(ValueError) as exc:
t1_mu = table.unique(t1_m)
assert (
exc.value.args[0] == "cannot use columns with masked values as keys; "
"remove column 'a' from keys and rerun unique()"
)
t1_mu = table.unique(t1_m, silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 2 b 7.0 0",
" -- c 3.0 5",
]
with pytest.raises(ValueError):
t1_mu = table.unique(t1_m, silent=True, keys="a")
t1_m = operation_table_type(t, masked=True)
t1_m["a"].mask[1] = True
t1_m["d"].mask[3] = True
# Test that multiple masked key columns get removed in the correct
# order
t1_mu = table.unique(t1_m, keys=["d", "a", "b"], silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [
" a b c d ",
"--- --- --- ---",
" 2 a 4.0 --",
" 2 b 7.0 0",
" -- c 3.0 5",
]
def test_vstack_bytes(operation_table_type):
"""
Test for issue #5617 when vstack'ing bytes columns in Py3.
This is really an upstream numpy issue numpy/numpy/#8403.
"""
t = operation_table_type([[b"a"]], names=["a"])
assert t["a"].itemsize == 1
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2["a"].itemsize == 1
def test_vstack_unicode():
"""
Test for problem related to issue #5617 when vstack'ing *unicode*
columns. In this case the character size gets multiplied by 4.
"""
t = table.Table([["a"]], names=["a"])
assert t["a"].itemsize == 4 # 4-byte / char for U dtype
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2["a"].itemsize == 4
def test_join_mixins_time_quantity():
"""
Test for table join using non-ndarray key columns.
"""
tm1 = Time([2, 1, 2], format="cxcsec")
q1 = [2, 1, 1] * u.m
idx1 = [1, 2, 3]
tm2 = Time([2, 3], format="cxcsec")
q2 = [2, 3] * u.m
idx2 = [10, 20]
t1 = Table([tm1, q1, idx1], names=["tm", "q", "idx"])
t2 = Table([tm2, q2, idx2], names=["tm", "q", "idx"])
# Output:
#
# <Table length=4>
# tm q idx_1 idx_2
# m
# object float64 int64 int64
# ------------------ ------- ----- -----
# 0.9999999999969589 1.0 2 --
# 2.00000000000351 1.0 3 --
# 2.00000000000351 2.0 1 10
# 3.000000000000469 3.0 -- 20
t12 = table.join(t1, t2, join_type="outer", keys=["tm", "q"])
# Key cols are lexically sorted
assert np.all(t12["tm"] == Time([1, 2, 2, 3], format="cxcsec"))
assert np.all(t12["q"] == [1, 1, 2, 3] * u.m)
assert np.all(t12["idx_1"] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1]))
assert np.all(t12["idx_2"] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0]))
def test_join_mixins_not_sortable():
"""
Test for table join using non-ndarray key columns that are not sortable.
"""
sc = SkyCoord([1, 2], [3, 4], unit="deg,deg")
t1 = Table([sc, [1, 2]], names=["sc", "idx1"])
t2 = Table([sc, [10, 20]], names=["sc", "idx2"])
with pytest.raises(TypeError, match="one or more key columns are not sortable"):
table.join(t1, t2, keys="sc")
def test_join_non_1d_key_column():
c1 = [[1, 2], [3, 4]]
c2 = [1, 2]
t1 = Table([c1, c2], names=["a", "b"])
t2 = t1.copy()
with pytest.raises(ValueError, match="key column 'a' must be 1-d"):
table.join(t1, t2, keys="a")
def test_argsort_time_column():
"""Regression test for #10823."""
times = Time(["2016-01-01", "2018-01-01", "2017-01-01"])
t = Table([times], names=["time"])
i = t.argsort("time")
assert np.all(i == times.argsort())
def test_sort_indexed_table():
"""Test fix for #9473 and #6545 - and another regression test for #10823."""
t = Table([[1, 3, 2], [6, 4, 5]], names=("a", "b"))
t.add_index("a")
t.sort("a")
assert np.all(t["a"] == [1, 2, 3])
assert np.all(t["b"] == [6, 5, 4])
t.sort("b")
assert np.all(t["b"] == [4, 5, 6])
assert np.all(t["a"] == [3, 2, 1])
times = ["2016-01-01", "2018-01-01", "2017-01-01"]
tm = Time(times)
t2 = Table([tm, [3, 2, 1]], names=["time", "flux"])
t2.sort("flux")
assert np.all(t2["flux"] == [1, 2, 3])
t2.sort("time")
assert np.all(t2["flux"] == [3, 1, 2])
assert np.all(t2["time"] == tm[[0, 2, 1]])
# Using the table as a TimeSeries implicitly sets the index, so
# this test is a bit different from the above.
from astropy.timeseries import TimeSeries
ts = TimeSeries(time=times)
ts["flux"] = [3, 2, 1]
ts.sort("flux")
assert np.all(ts["flux"] == [1, 2, 3])
ts.sort("time")
assert np.all(ts["flux"] == [3, 1, 2])
assert np.all(ts["time"] == tm[[0, 2, 1]])
def test_get_out_class():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
assert _get_out_class([c, mc]) is mc.__class__
assert _get_out_class([mc, c]) is mc.__class__
assert _get_out_class([c, c]) is c.__class__
assert _get_out_class([c]) is c.__class__
with pytest.raises(ValueError):
_get_out_class([c, q])
with pytest.raises(ValueError):
_get_out_class([q, c])
def test_masking_required_exception():
"""
Test that outer join, hstack and vstack fail for a mixin column which
does not support masking.
"""
col = table.NdarrayMixin([0, 1, 2, 3])
t1 = table.QTable([[1, 2, 3, 4], col], names=["a", "b"])
t2 = table.QTable([[1, 2], col[:2]], names=["a", "c"])
with pytest.raises(NotImplementedError) as err:
table.vstack([t1, t2], join_type="outer")
assert "vstack unavailable" in str(err.value)
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type="outer")
assert "hstack requires masking" in str(err.value)
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type="outer")
assert "join requires masking" in str(err.value)
def test_stack_columns():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
time = Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])
sc = SkyCoord([1, 2], [3, 4], unit="deg")
cq = table.Column([11, 22], unit=u.m)
t = table.hstack([c, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([q, c])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([mc, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([c, mc])
assert t.__class__ is table.Table
assert t.masked is False
t = table.vstack([q, q])
assert t.__class__ is table.QTable
t = table.vstack([c, c])
assert t.__class__ is table.Table
t = table.hstack([c, time])
assert t.__class__ is table.Table
t = table.hstack([c, sc])
assert t.__class__ is table.Table
t = table.hstack([q, time, sc])
assert t.__class__ is table.QTable
with pytest.raises(ValueError):
table.vstack([c, q])
with pytest.raises(ValueError):
t = table.vstack([q, cq])
def test_mixin_join_regression():
# This used to trigger a ValueError:
# ValueError: NumPy boolean array indexing assignment cannot assign
# 6 input values to the 4 output values where the mask is true
t1 = QTable()
t1["index"] = [1, 2, 3, 4, 5]
t1["flux1"] = [2, 3, 2, 1, 1] * u.Jy
t1["flux2"] = [2, 3, 2, 1, 1] * u.Jy
t2 = QTable()
t2["index"] = [3, 4, 5, 6]
t2["flux1"] = [2, 1, 1, 3] * u.Jy
t2["flux2"] = [2, 1, 1, 3] * u.Jy
t12 = table.join(t1, t2, keys=("index", "flux1", "flux2"), join_type="outer")
assert len(t12) == 6
|
7a0f0a837e049260b54f6ce71345fc11fa0eacd43b42dfca60715fd056239383 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import numpy as np
import pytest
from astropy import table
from astropy import units as u
from astropy.table import Row
from .conftest import MaskedTable
def test_masked_row_with_object_col():
"""
Numpy < 1.8 has a bug in masked array that prevents access a row if there is
a column with object type.
"""
t = table.Table([[1]], dtype=["O"], masked=True)
t["col0"].mask = False
assert t[0]["col0"] == 1
t["col0"].mask = True
assert t[0]["col0"] is np.ma.masked
@pytest.mark.usefixtures("table_types")
class TestRow:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def t(self):
# pytest wants to run this method once before table_types is run
# to set Table and Column. In this case just return None, which would
# cause any downstream test to fail if this happened in any other context.
if self._column_type is None:
return None
if not hasattr(self, "_t"):
a = self._column_type(name="a", data=[1, 2, 3], dtype="i8")
b = self._column_type(name="b", data=[4, 5, 6], dtype="i8")
self._t = self._table_type([a, b])
return self._t
def test_subclass(self, table_types):
"""Row is subclass of ndarray and Row"""
self._setup(table_types)
c = Row(self.t, 2)
assert isinstance(c, Row)
def test_values(self, table_types):
"""Row accurately reflects table values and attributes"""
self._setup(table_types)
table = self.t
row = table[1]
assert row["a"] == 2
assert row["b"] == 5
assert row[0] == 2
assert row[1] == 5
assert row.meta is table.meta
assert row.colnames == table.colnames
assert row.columns is table.columns
with pytest.raises(IndexError):
row[2]
if sys.byteorder == "little":
assert str(row.dtype) == "[('a', '<i8'), ('b', '<i8')]"
else:
assert str(row.dtype) == "[('a', '>i8'), ('b', '>i8')]"
def test_ref(self, table_types):
"""Row is a reference into original table data"""
self._setup(table_types)
table = self.t
row = table[1]
row["a"] = 10
if table_types.Table is not MaskedTable:
assert table["a"][1] == 10
def test_left_equal(self, table_types):
"""Compare a table row to the corresponding structured array row"""
self._setup(table_types)
np_t = self.t.as_array()
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0] # noqa: B015
else:
for row, np_row in zip(self.t, np_t):
assert np.all(row == np_row)
def test_left_not_equal(self, table_types):
"""Compare a table row to the corresponding structured array row"""
self._setup(table_types)
np_t = self.t.as_array()
np_t["a"] = [0, 0, 0]
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0] # noqa: B015
else:
for row, np_row in zip(self.t, np_t):
assert np.all(row != np_row)
def test_right_equal(self, table_types):
"""Test right equal"""
self._setup(table_types)
np_t = self.t.as_array()
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0] # noqa: B015
else:
for row, np_row in zip(self.t, np_t):
assert np.all(np_row == row)
def test_convert_numpy_array(self, table_types):
self._setup(table_types)
d = self.t[1]
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_void())
assert np_data is not d.as_void()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_void())
assert np_data is not d.as_void()
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[("c", "i8"), ("d", "i8")])
def test_format_row(self, table_types):
"""Test formatting row"""
self._setup(table_types)
table = self.t
row = table[0]
assert repr(row).splitlines() == [
"<{} {}{}>".format(
row.__class__.__name__,
"index=0",
" masked=True" if table.masked else "",
),
" a b ",
"int64 int64",
"----- -----",
" 1 4",
]
assert str(row).splitlines() == [" a b ", "--- ---", " 1 4"]
assert row._repr_html_().splitlines() == [
"<i>{} {}{}</i>".format(
row.__class__.__name__,
"index=0",
" masked=True" if table.masked else "",
),
f'<table id="table{id(table)}">',
"<thead><tr><th>a</th><th>b</th></tr></thead>",
"<thead><tr><th>int64</th><th>int64</th></tr></thead>",
"<tr><td>1</td><td>4</td></tr>",
"</table>",
]
def test_as_void(self, table_types):
"""Test the as_void() method"""
self._setup(table_types)
table = self.t
row = table[0]
# If masked then with no masks, issue numpy/numpy#483 should come
# into play. Make sure as_void() code is working.
row_void = row.as_void()
if table.masked:
assert isinstance(row_void, np.ma.mvoid)
else:
assert isinstance(row_void, np.void)
assert row_void["a"] == 1
assert row_void["b"] == 4
# Confirm row is a view of table but row_void is not.
table["a"][0] = -100
assert row["a"] == -100
assert row_void["a"] == 1
# Make sure it works for a table that has masked elements
if table.masked:
table["a"].mask = True
# row_void is not a view, need to re-make
assert row_void["a"] == 1
row_void = row.as_void() # but row is a view
assert row["a"] is np.ma.masked
def test_row_and_as_void_with_objects(self, table_types):
"""Test the deprecated data property and as_void() method"""
t = table_types.Table([[{"a": 1}, {"b": 2}]], names=("a",))
assert t[0][0] == {"a": 1}
assert t[0]["a"] == {"a": 1}
assert t[0].as_void()[0] == {"a": 1}
assert t[0].as_void()["a"] == {"a": 1}
def test_bounds_checking(self, table_types):
"""Row gives index error upon creation for out-of-bounds index"""
self._setup(table_types)
for ibad in (-5, -4, 3, 4):
with pytest.raises(IndexError):
self.t[ibad]
def test_create_rows_from_list(self, table_types):
"""https://github.com/astropy/astropy/issues/8976"""
orig_tab = table_types.Table([[1, 2, 3], [4, 5, 6]], names=("a", "b"))
new_tab = type(orig_tab)(rows=list(orig_tab), names=orig_tab.dtype.names)
assert np.all(orig_tab == new_tab)
def test_row_keys_values(self, table_types):
self._setup(table_types)
row = self.t[0]
for row_key, col_key in zip(row.keys(), self.t.columns.keys()):
assert row_key == col_key
for row_value, col in zip(row.values(), self.t.columns.values()):
assert row_value == col[0]
def test_row_as_mapping(self, table_types):
self._setup(table_types)
row = self.t[0]
row_dict = dict(row)
for key, value in row_dict.items():
assert row[key] == value
def f(**kwargs):
return kwargs
row_splatted = f(**row)
for key, value in row_splatted.items():
assert row[key] == value
def test_row_as_sequence(self, table_types):
self._setup(table_types)
row = self.t[0]
row_tuple = tuple(row)
keys = tuple(row.keys())
for key, value in zip(keys, row_tuple):
assert row[key] == value
def f(*args):
return args
row_splatted = f(*row)
for key, value in zip(keys, row_splatted):
assert row[key] == value
def test_row_tuple_column_slice():
"""
Test getting and setting a row using a tuple or list of column names
"""
t = table.QTable(
[
[1, 2, 3] * u.m,
[10.0, 20.0, 30.0],
[100.0, 200.0, 300.0],
["x", "y", "z"],
],
names=["a", "b", "c", "d"],
)
# Get a row for index=1
r1 = t[1]
# Column slice with tuple of col names
r1_abc = r1["a", "b", "c"] # Row object for these cols
r1_abc_repr = [
"<Row index=1>",
" a b c ",
" m ",
"float64 float64 float64",
"------- ------- -------",
" 2.0 20.0 200.0",
]
assert repr(r1_abc).splitlines() == r1_abc_repr
# Column slice with list of col names
r1_abc = r1[["a", "b", "c"]]
assert repr(r1_abc).splitlines() == r1_abc_repr
# Make sure setting on a tuple or slice updates parent table and row
r1["c"] = 1000
r1["a", "b"] = 1000 * u.cm, 100.0
assert r1["a"] == 10 * u.m
assert r1["b"] == 100
assert t["a"][1] == 10 * u.m
assert t["b"][1] == 100.0
assert t["c"][1] == 1000
# Same but using a list of column names instead of tuple
r1[["a", "b"]] = 2000 * u.cm, 200.0
assert r1["a"] == 20 * u.m
assert r1["b"] == 200
assert t["a"][1] == 20 * u.m
assert t["b"][1] == 200.0
# Set column slice of column slice
r1_abc["a", "c"] = -1 * u.m, -10
assert t["a"][1] == -1 * u.m
assert t["b"][1] == 200.0
assert t["c"][1] == -10.0
# Bad column name
with pytest.raises(KeyError) as err:
t[1]["a", "not_there"]
assert "'not_there'" in str(err.value)
# Too many values
with pytest.raises(ValueError) as err:
t[1]["a", "b"] = 1 * u.m, 2, 3
assert "right hand side must be a sequence" in str(err.value)
# Something without a length
with pytest.raises(ValueError) as err:
t[1]["a", "b"] = 1
assert "right hand side must be a sequence" in str(err.value)
def test_row_tuple_column_slice_transaction():
"""
Test that setting a row that fails part way through does not
change the table at all.
"""
t = table.QTable(
[
[10.0, 20.0, 30.0],
[1, 2, 3] * u.m,
],
names=["a", "b"],
)
tc = t.copy()
# First one succeeds but second fails.
with pytest.raises(ValueError) as err:
t[1]["a", "b"] = (-1, -1 * u.s) # Bad unit
assert "'s' (time) and 'm' (length) are not convertible" in str(err.value)
assert t[1] == tc[1]
def test_uint_indexing():
"""
Test that accessing a row with an unsigned integer
works as with a signed integer. Similarly tests
that printing such a row works.
This is non-trivial: adding a signed and unsigned
integer in numpy results in a float, which is an
invalid slice index.
Regression test for gh-7464.
"""
t = table.Table([[1.0, 2.0, 3.0]], names="a")
assert t["a"][1] == 2.0
assert t["a"][np.int_(1)] == 2.0
assert t["a"][np.uint(1)] == 2.0
assert t[np.uint(1)]["a"] == 2.0
trepr = [
"<Row index=1>",
" a ",
"float64",
"-------",
" 2.0",
]
assert repr(t[1]).splitlines() == trepr
assert repr(t[np.int_(1)]).splitlines() == trepr
assert repr(t[np.uint(1)]).splitlines() == trepr
def test_row_get():
row = table.Table({"a": [2, 4], "b": [3, 9]})[0]
assert row.get("a") == 2
assert row.get("x") is None
assert row.get("b", -1) == 3
assert row.get("y", -1) == -1
|
8c1660df56bf5dac0b900ec1ed1af03bb332c2773a993450513a5bae143f2501 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pickle
from io import StringIO
import numpy as np
import pytest
from astropy import coordinates, time
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.coordinates.tests.test_representation import representation_equal
from astropy.table import (
Column,
NdarrayMixin,
QTable,
Table,
hstack,
join,
serialize,
table_helpers,
vstack,
)
from astropy.table.column import BaseColumn
from astropy.table.serialize import represent_mixins_as_columns
from astropy.table.table_helpers import ArrayWrapper
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.metadata import MergeConflictWarning
from .conftest import MIXIN_COLS
def test_attributes(mixin_cols):
"""
Required attributes for a column can be set.
"""
m = mixin_cols["m"]
m.info.name = "a"
assert m.info.name == "a"
m.info.description = "a"
assert m.info.description == "a"
# Cannot set unit for these classes
if isinstance(
m,
(
u.Quantity,
coordinates.SkyCoord,
time.Time,
time.TimeDelta,
coordinates.BaseRepresentationOrDifferential,
coordinates.StokesCoord,
),
):
with pytest.raises(AttributeError):
m.info.unit = u.m
else:
m.info.unit = u.m
assert m.info.unit is u.m
m.info.format = "a"
assert m.info.format == "a"
m.info.meta = {"a": 1}
assert m.info.meta == {"a": 1}
with pytest.raises(AttributeError):
m.info.bad_attr = 1
with pytest.raises(AttributeError):
m.info.bad_attr
def check_mixin_type(table, table_col, in_col):
# We check for QuantityInfo rather than just isinstance(col, u.Quantity)
# since we want to treat EarthLocation as a mixin, even though it is
# a Quantity subclass.
if (
isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable
) or isinstance(in_col, Column):
assert type(table_col) is table.ColumnClass
else:
assert type(table_col) is type(in_col)
# Make sure in_col got copied and creating table did not touch it
assert in_col.info.name is None
def test_make_table(table_types, mixin_cols):
"""
Make a table with the columns in mixin_cols, which is an ordered dict of
three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin.
"""
t = table_types.Table(mixin_cols)
check_mixin_type(t, t["m"], mixin_cols["m"])
cols = list(mixin_cols.values())
t = table_types.Table(cols, names=("i", "a", "b", "m"))
check_mixin_type(t, t["m"], mixin_cols["m"])
t = table_types.Table(cols)
check_mixin_type(t, t["col3"], mixin_cols["m"])
def test_io_ascii_write():
"""
Test that table with mixin column can be written by io.ascii for
every pure Python writer. No validation of the output is done,
this just confirms no exceptions.
"""
from astropy.io.ascii.connect import _get_connectors_table
t = QTable(MIXIN_COLS)
for fmt in _get_connectors_table():
if fmt["Write"] and ".fast_" not in fmt["Format"]:
out = StringIO()
t.write(out, format=fmt["Format"])
def test_votable_quantity_write(tmp_path):
"""
Test that table with Quantity mixin column can be round-tripped by
io.votable. Note that FITS and HDF5 mixin support are tested (much more
thoroughly) in their respective subpackage tests
(io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py).
"""
t = QTable()
t["a"] = u.Quantity([1, 2, 4], unit="nm")
filename = tmp_path / "table-tmp"
t.write(filename, format="votable", overwrite=True)
qt = QTable.read(filename, format="votable")
assert isinstance(qt["a"], u.Quantity)
assert qt["a"].unit == "nm"
@pytest.mark.remote_data
@pytest.mark.parametrize("table_types", (Table, QTable))
def test_io_time_write_fits_standard(tmp_path, table_types):
"""
Test that table with Time mixin columns can be written by io.fits.
Validation of the output is done. Test that io.fits writes a table
containing Time mixin columns that can be partially round-tripped
(metadata scale, location).
Note that we postpone checking the "local" scale, since that cannot
be done with format 'cxcsec', as it requires an epoch.
"""
t = table_types([[1, 2], ["string", "column"]])
for scale in time.STANDARD_TIME_SCALES:
t["a" + scale] = time.Time(
[[1, 2], [3, 4]],
format="cxcsec",
scale=scale,
location=EarthLocation(-2446354, 4237210, 4077985, unit="m"),
)
t["b" + scale] = time.Time(
["1999-01-01T00:00:00.123456789", "2010-01-01T00:00:00"], scale=scale
)
t["c"] = [3.0, 4.0]
filename = tmp_path / "table-tmp"
# Show that FITS format succeeds
with pytest.warns(
AstropyUserWarning,
match=(
'Time Column "btai" has no specified location, '
"but global Time Position is present"
),
):
t.write(filename, format="fits", overwrite=True)
with pytest.warns(
AstropyUserWarning,
match='Time column reference position "TRPOSn" is not specified',
):
tm = table_types.read(filename, format="fits", astropy_native=True)
for scale in time.STANDARD_TIME_SCALES:
for ab in ("a", "b"):
name = ab + scale
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == "jd"
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ("col0", "col1", "c"):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format
for scale in time.STANDARD_TIME_SCALES:
for ab in ("a", "b"):
name = ab + scale
t[name].info.serialize_method["fits"] = "formatted_value"
t.write(filename, format="fits", overwrite=True)
tm = table_types.read(filename, format="fits")
for scale in time.STANDARD_TIME_SCALES:
for ab in ("a", "b"):
name = ab + scale
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
@pytest.mark.parametrize("table_types", (Table, QTable))
def test_io_time_write_fits_local(tmp_path, table_types):
"""
Test that table with a Time mixin with scale local can also be written
by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding
``cxcsec`` format, which requires an epoch and thus cannot be used for a
local time scale.
"""
t = table_types([[1, 2], ["string", "column"]])
t["a_local"] = time.Time(
[[50001, 50002], [50003, 50004]],
format="mjd",
scale="local",
location=EarthLocation(-2446354, 4237210, 4077985, unit="m"),
)
t["b_local"] = time.Time(
["1999-01-01T00:00:00.123456789", "2010-01-01T00:00:00"], scale="local"
)
t["c"] = [3.0, 4.0]
filename = tmp_path / "table-tmp"
# Show that FITS format succeeds
with pytest.warns(
AstropyUserWarning, match='Time Column "b_local" has no specified location'
):
t.write(filename, format="fits", overwrite=True)
with pytest.warns(
AstropyUserWarning,
match='Time column reference position "TRPOSn" is not specified.',
):
tm = table_types.read(filename, format="fits", astropy_native=True)
for ab in ("a", "b"):
name = ab + "_local"
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == "jd"
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ("col0", "col1", "c"):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format.
for ab in ("a", "b"):
name = ab + "_local"
t[name].info.serialize_method["fits"] = "formatted_value"
t.write(filename, format="fits", overwrite=True)
tm = table_types.read(filename, format="fits")
for ab in ("a", "b"):
name = ab + "_local"
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
def test_votable_mixin_write_fail(mixin_cols):
"""
Test that table with mixin columns (excluding Quantity) cannot be written by
io.votable.
"""
t = QTable(mixin_cols)
# Only do this test if there are unsupported column types (i.e. anything besides
# BaseColumn and Quantity class instances).
unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity))
if not unsupported_cols:
pytest.skip("no unsupported column types")
out = StringIO()
with pytest.raises(ValueError) as err:
t.write(out, format="votable")
assert "cannot write table with mixin column(s)" in str(err.value)
def test_join(table_types):
"""
Join tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1["a"] = table_types.Column(["a", "b", "b", "c"])
t1["i"] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t2 = table_types.Table(t1)
t2["a"] = ["b", "c", "a", "d"]
for name, col in MIXIN_COLS.items():
t1[name].info.description = name
t2[name].info.description = name + "2"
for join_type in ("inner", "left"):
t12 = join(t1, t2, keys="a", join_type=join_type)
idx1 = t12["i_1"]
idx2 = t12["i_2"]
for name, col in MIXIN_COLS.items():
name1 = name + "_1"
name2 = name + "_2"
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
assert t12[name1].info.description == name
assert t12[name2].info.description == name + "2"
for join_type in ("outer", "right"):
with pytest.raises(NotImplementedError) as exc:
t12 = join(t1, t2, keys="a", join_type=join_type)
assert "join requires masking column" in str(exc.value)
with pytest.raises(TypeError) as exc:
t12 = join(t1, t2, keys=["a", "skycoord"])
assert "one or more key columns are not sortable" in str(exc.value)
# Join does work for a mixin which is a subclass of np.ndarray
with pytest.warns(
MergeConflictWarning,
match="In merged column 'quantity' the 'description' attribute does not match",
):
t12 = join(t1, t2, keys=["quantity"])
assert np.all(t12["a_1"] == t1["a"])
def test_hstack(table_types):
"""
Hstack tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1["i"] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t1[name].info.description = name
t1[name].info.meta = {"a": 1}
for join_type in ("inner", "outer"):
for chop in (True, False):
t2 = table_types.Table(t1)
if chop:
t2 = t2[:-1]
if join_type == "outer":
with pytest.raises(NotImplementedError) as exc:
t12 = hstack([t1, t2], join_type=join_type)
assert "hstack requires masking column" in str(exc.value)
continue
t12 = hstack([t1, t2], join_type=join_type)
idx1 = t12["i_1"]
idx2 = t12["i_2"]
for name, col in MIXIN_COLS.items():
name1 = name + "_1"
name2 = name + "_2"
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
for attr in ("description", "meta"):
assert getattr(t1[name].info, attr) == getattr(
t12[name1].info, attr
)
assert getattr(t2[name].info, attr) == getattr(
t12[name2].info, attr
)
def assert_table_name_col_equal(t, name, col):
"""
Assert all(t[name] == col), with special handling for known mixin cols.
"""
if isinstance(col, coordinates.SkyCoord):
assert np.all(t[name].ra == col.ra)
assert np.all(t[name].dec == col.dec)
elif isinstance(col, coordinates.BaseRepresentationOrDifferential):
assert np.all(representation_equal(t[name], col))
elif isinstance(col, u.Quantity):
if type(t) is QTable:
assert np.all(t[name] == col)
elif isinstance(col, table_helpers.ArrayWrapper):
assert np.all(t[name].data == col.data)
else:
assert np.all(t[name] == col)
def test_get_items(mixin_cols):
"""
Test that slicing / indexing table gives right values and col attrs inherit
"""
attrs = ("name", "unit", "dtype", "format", "description", "meta")
m = mixin_cols["m"]
m.info.name = "m"
m.info.format = "{0}"
m.info.description = "d"
m.info.meta = {"a": 1}
t = QTable([m])
for item in ([1, 3], np.array([0, 2]), slice(1, 3)):
t2 = t[item]
m2 = m[item]
assert_table_name_col_equal(t2, "m", m[item])
for attr in attrs:
assert getattr(t2["m"].info, attr) == getattr(m.info, attr)
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_info_preserved_pickle_copy_init(mixin_cols):
"""
Test copy, pickle, and init from class roundtrip preserve info. This
tests not only the mixin classes but a regular column as well.
"""
def pickle_roundtrip(c):
return pickle.loads(pickle.dumps(c))
def init_from_class(c):
return c.__class__(c)
attrs = ("name", "unit", "dtype", "format", "description", "meta")
for colname in ("i", "m"):
m = mixin_cols[colname]
m.info.name = colname
m.info.format = "{0}"
m.info.description = "d"
m.info.meta = {"a": 1}
for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class):
m2 = func(m)
for attr in attrs:
# non-native byteorder not preserved by last 2 func, _except_ for structured dtype
if (
attr != "dtype"
or getattr(m.info.dtype, "isnative", True)
or m.info.dtype.name.startswith("void")
or func in (copy.copy, copy.deepcopy)
):
original = getattr(m.info, attr)
else:
# func does not preserve byteorder, check against (native) type.
original = m.info.dtype.newbyteorder("=")
assert getattr(m2.info, attr) == original
def check_share_memory(col1, col2, copy):
"""Check whether data attributes in col1 and col2 share memory.
If copy=True, this should not be the case for any, while
if copy=False, all should share memory.
"""
if isinstance(col1, SkyCoord):
# For SkyCoord, .info does not access actual data by default,
# but rather attributes like .ra, which are copies.
map1 = col1.data.info._represent_as_dict()
map2 = col2.data.info._represent_as_dict()
else:
map1 = col1.info._represent_as_dict()
map2 = col2.info._represent_as_dict()
# Check array attributes only (in principle, could iterate on, e.g.,
# differentials in representations, but this is enough for table).
shared = [
np.may_share_memory(v1, v2)
for (v1, v2) in zip(map1.values(), map2.values())
if isinstance(v1, np.ndarray) and v1.shape
]
if copy:
assert not any(shared)
else:
assert all(shared)
@pytest.mark.parametrize("copy", [True, False])
def test_add_column(mixin_cols, copy):
"""
Test that adding a column preserves values and attributes.
For copy=True, the data should be independent;
for copy=False, the data should be shared, but the instance independent.
"""
attrs = ("name", "unit", "dtype", "format", "description", "meta")
m = mixin_cols["m"]
assert m.info.name is None
# Make sure adding column in various ways doesn't touch info.
t = QTable([m], names=["a"], copy=copy)
assert m.info.name is None
check_share_memory(m, t["a"], copy=copy)
t["new"] = m
assert m.info.name is None
check_share_memory(m, t["new"], copy=True)
m.info.name = "m"
m.info.format = "{0}"
m.info.description = "d"
m.info.meta = {"a": 1}
t = QTable([m], copy=copy)
assert t.colnames == ["m"]
check_share_memory(m, t["m"], copy=copy)
t = QTable([m], names=["m1"], copy=copy)
assert m.info.name == "m"
assert t.colnames == ["m1"]
check_share_memory(m, t["m1"], copy=copy)
# Add columns m2, m3, m4 by two different methods and test expected equality
t["m2"] = m
check_share_memory(m, t["m2"], copy=True)
m.info.name = "m3"
t.add_columns([m], copy=copy)
check_share_memory(m, t["m3"], copy=copy)
for name in ("m2", "m3"):
assert_table_name_col_equal(t, name, m)
for attr in attrs:
if attr != "name":
assert getattr(t["m1"].info, attr) == getattr(t[name].info, attr)
# Also check that one can set using a scalar.
s = m[0]
if type(s) is type(m) and "info" in s.__dict__:
# We're not going to worry about testing classes for which scalars
# are a different class than the real array, or where info is not copied.
t["s"] = m[0]
assert_table_name_col_equal(t, "s", m[0])
check_share_memory(m, t["s"], copy=True)
for attr in attrs:
if attr != "name":
assert getattr(t["m1"].info, attr) == getattr(t["s"].info, attr)
# While we're add it, also check a length-1 table.
t = QTable([m[1:2]], names=["m"], copy=copy)
check_share_memory(m, t["m"], copy=copy)
if type(s) is type(m) and "info" in s.__dict__:
t["s"] = m[0]
assert_table_name_col_equal(t, "s", m[0])
for attr in attrs:
if attr != "name":
assert getattr(t["m1"].info, attr) == getattr(t["s"].info, attr)
def test_vstack():
"""
Vstack tables with mixin cols.
"""
t1 = QTable(MIXIN_COLS)
t2 = QTable(MIXIN_COLS)
with pytest.raises(NotImplementedError):
vstack([t1, t2])
def test_insert_row(mixin_cols):
"""
Test inserting a row, which works for Column, Quantity, Time and SkyCoord.
"""
t = QTable(mixin_cols)
t0 = t.copy()
t["m"].info.description = "d"
idxs = [0, -1, 1, 2, 3]
if isinstance(
t["m"], (u.Quantity, Column, time.Time, time.TimeDelta, coordinates.SkyCoord)
):
t.insert_row(1, t[-1])
for name in t.colnames:
col = t[name]
if isinstance(col, coordinates.SkyCoord):
assert skycoord_equal(col, t0[name][idxs])
else:
assert np.all(col == t0[name][idxs])
assert t["m"].info.description == "d"
else:
with pytest.raises(ValueError) as exc:
t.insert_row(1, t[-1])
assert "Unable to insert row" in str(exc.value)
def test_insert_row_bad_unit():
"""
Insert a row into a QTable with the wrong unit
"""
t = QTable([[1] * u.m])
with pytest.raises(ValueError) as exc:
t.insert_row(0, (2 * u.m / u.s,))
assert "'m / s' (speed/velocity) and 'm' (length) are not convertible" in str(
exc.value
)
def test_convert_np_array(mixin_cols):
"""
Test that converting to numpy array creates an object dtype and that
each instance in the array has the expected type.
"""
t = QTable(mixin_cols)
ta = t.as_array()
m = mixin_cols["m"]
dtype_kind = m.dtype.kind if hasattr(m, "dtype") else "O"
assert ta["m"].dtype.kind == dtype_kind
def test_assignment_and_copy():
"""
Test that assignment of an int, slice, and fancy index works.
Along the way test that copying table works.
"""
for name in ("quantity", "arraywrap"):
m = MIXIN_COLS[name]
t0 = QTable([m], names=["m"])
for i0, i1 in (
(1, 2),
(slice(0, 2), slice(1, 3)),
(np.array([1, 2]), np.array([2, 3])),
):
t = t0.copy()
t["m"][i0] = m[i1]
if name == "arraywrap":
assert np.all(t["m"].data[i0] == m.data[i1])
assert np.all(t0["m"].data[i0] == m.data[i0])
assert np.all(t0["m"].data[i0] != t["m"].data[i0])
else:
assert np.all(t["m"][i0] == m[i1])
assert np.all(t0["m"][i0] == m[i0])
assert np.all(t0["m"][i0] != t["m"][i0])
def test_conversion_qtable_table():
"""
Test that a table round trips from QTable => Table => QTable
"""
qt = QTable(MIXIN_COLS)
names = qt.colnames
for name in names:
qt[name].info.description = name
t = Table(qt)
for name in names:
assert t[name].info.description == name
if name == "quantity":
assert np.all(t["quantity"] == qt["quantity"].value)
assert np.all(t["quantity"].unit is qt["quantity"].unit)
assert isinstance(t["quantity"], t.ColumnClass)
else:
assert_table_name_col_equal(t, name, qt[name])
qt2 = QTable(qt)
for name in names:
assert qt2[name].info.description == name
assert_table_name_col_equal(qt2, name, qt[name])
def test_setitem_as_column_name():
"""
Test for mixin-related regression described in #3321.
"""
t = Table()
t["a"] = ["x", "y"]
t["b"] = "b" # Previously was failing with KeyError
assert np.all(t["a"] == ["x", "y"])
assert np.all(t["b"] == ["b", "b"])
def test_quantity_representation():
"""
Test that table representation of quantities does not have unit
"""
t = QTable([[1, 2] * u.m])
assert t.pformat() == [
"col0",
" m ",
"----",
" 1.0",
" 2.0",
]
def test_representation_representation():
"""
Test that Representations are represented correctly.
"""
# With no unit we get "None" in the unit row
c = coordinates.CartesianRepresentation([0], [1], [0], unit=u.one)
t = Table([c])
assert t.pformat() == [
" col0 ",
"------------",
"(0., 1., 0.)",
]
c = coordinates.CartesianRepresentation([0], [1], [0], unit="m")
t = Table([c])
assert t.pformat() == [
" col0 ",
" m ",
"------------",
"(0., 1., 0.)",
]
c = coordinates.SphericalRepresentation([10] * u.deg, [20] * u.deg, [1] * u.pc)
t = Table([c])
assert t.pformat() == [
" col0 ",
" deg, deg, pc ",
"--------------",
"(10., 20., 1.)",
]
c = coordinates.UnitSphericalRepresentation([10] * u.deg, [20] * u.deg)
t = Table([c])
assert t.pformat() == [
" col0 ",
" deg ",
"----------",
"(10., 20.)",
]
c = coordinates.SphericalCosLatDifferential(
[10] * u.mas / u.yr, [2] * u.mas / u.yr, [10] * u.km / u.s
)
t = Table([c])
assert t.pformat() == [
" col0 ",
"mas / yr, mas / yr, km / s",
"--------------------------",
" (10., 2., 10.)",
]
def test_skycoord_representation():
"""
Test that skycoord representation works, both in the way that the
values are output and in changing the frame representation.
"""
# With no unit we get "None" in the unit row
c = coordinates.SkyCoord([0], [1], [0], representation_type="cartesian")
t = Table([c])
assert t.pformat() == [
" col0 ",
"None,None,None",
"--------------",
" 0.0,1.0,0.0",
]
# Test that info works with a dynamically changed representation
c = coordinates.SkyCoord([0], [1], [0], unit="m", representation_type="cartesian")
t = Table([c])
assert t.pformat() == [
" col0 ",
" m,m,m ",
"-----------",
"0.0,1.0,0.0",
]
t["col0"].representation_type = "unitspherical"
assert t.pformat() == [
" col0 ",
"deg,deg ",
"--------",
"90.0,0.0",
]
t["col0"].representation_type = "cylindrical"
assert t.pformat() == [
" col0 ",
" m,deg,m ",
"------------",
"1.0,90.0,0.0",
]
@pytest.mark.parametrize("as_ndarray_mixin", [True, False])
def test_ndarray_mixin(as_ndarray_mixin):
"""
Test directly adding various forms of structured ndarray columns to a table.
Adding as NdarrayMixin is expected to be somewhat unusual after #12644
(which provides full support for structured array Column's). This test shows
that the end behavior is the same in both cases.
"""
a = np.array([(1, "a"), (2, "b"), (3, "c"), (4, "d")], dtype="<i4," + "|U1")
b = np.array(
[(10, "aa"), (20, "bb"), (30, "cc"), (40, "dd")],
dtype=[("x", "i4"), ("y", "U2")],
)
c = np.rec.fromrecords(
[(100.0, "raa"), (200.0, "rbb"), (300.0, "rcc"), (400.0, "rdd")],
names=["rx", "ry"],
)
d = np.arange(8, dtype="i8").reshape(4, 2)
if as_ndarray_mixin:
a = a.view(NdarrayMixin)
b = b.view(NdarrayMixin)
c = c.view(NdarrayMixin)
d = d.view(NdarrayMixin)
class_exp = NdarrayMixin
else:
class_exp = Column
# Add one during initialization and the next as a new column.
t = Table([a], names=["a"])
t["b"] = b
t["c"] = c
t["d"] = d
assert isinstance(t["a"], class_exp)
assert t["a"][1][1] == a[1][1]
assert t["a"][2][0] == a[2][0]
assert t[1]["a"][1] == a[1][1]
assert t[2]["a"][0] == a[2][0]
assert isinstance(t["b"], class_exp)
assert t["b"][1]["x"] == b[1]["x"]
assert t["b"][1]["y"] == b[1]["y"]
assert t[1]["b"]["x"] == b[1]["x"]
assert t[1]["b"]["y"] == b[1]["y"]
assert isinstance(t["c"], class_exp)
assert t["c"][1]["rx"] == c[1]["rx"]
assert t["c"][1]["ry"] == c[1]["ry"]
assert t[1]["c"]["rx"] == c[1]["rx"]
assert t[1]["c"]["ry"] == c[1]["ry"]
assert isinstance(t["d"], class_exp)
assert t["d"][1][0] == d[1][0]
assert t["d"][1][1] == d[1][1]
assert t[1]["d"][0] == d[1][0]
assert t[1]["d"][1] == d[1][1]
assert t.pformat(show_dtype=True) == [
" a [f0, f1] b [x, y] c [rx, ry] d ",
"(int32, str1) (int32, str2) (float64, str3) int64[2]",
"------------- ------------- --------------- --------",
" (1, 'a') (10, 'aa') (100., 'raa') 0 .. 1",
" (2, 'b') (20, 'bb') (200., 'rbb') 2 .. 3",
" (3, 'c') (30, 'cc') (300., 'rcc') 4 .. 5",
" (4, 'd') (40, 'dd') (400., 'rdd') 6 .. 7",
]
def test_possible_string_format_functions():
"""
The QuantityInfo info class for Quantity implements a
possible_string_format_functions() method that overrides the
standard pprint._possible_string_format_functions() function.
Test this.
"""
t = QTable([[1, 2] * u.m])
t["col0"].info.format = "%.3f"
assert t.pformat() == [
" col0",
" m ",
"-----",
"1.000",
"2.000",
]
t["col0"].info.format = "hi {:.3f}"
assert t.pformat() == [
" col0 ",
" m ",
"--------",
"hi 1.000",
"hi 2.000",
]
t["col0"].info.format = ".4f"
assert t.pformat() == [
" col0 ",
" m ",
"------",
"1.0000",
"2.0000",
]
def test_rename_mixin_columns(mixin_cols):
"""
Rename a mixin column.
"""
t = QTable(mixin_cols)
tc = t.copy()
t.rename_column("m", "mm")
assert t.colnames == ["i", "a", "b", "mm"]
if isinstance(t["mm"], table_helpers.ArrayWrapper):
assert np.all(t["mm"].data == tc["m"].data)
elif isinstance(t["mm"], coordinates.SkyCoord):
assert np.all(t["mm"].ra == tc["m"].ra)
assert np.all(t["mm"].dec == tc["m"].dec)
elif isinstance(t["mm"], coordinates.BaseRepresentationOrDifferential):
assert np.all(representation_equal(t["mm"], tc["m"]))
else:
assert np.all(t["mm"] == tc["m"])
def test_represent_mixins_as_columns_unit_fix():
"""
If the unit is invalid for a column that gets serialized this would
cause an exception. Fixed in #7481.
"""
t = Table({"a": [1, 2]}, masked=True)
t["a"].unit = "not a valid unit"
t["a"].mask[1] = True
serialize.represent_mixins_as_columns(t)
def test_primary_data_column_gets_description():
"""
If the mixin defines a primary data column, that should get the
description, format, etc., so no __info__ should be needed.
"""
t = QTable({"a": [1, 2] * u.m})
t["a"].info.description = "parrot"
t["a"].info.format = "7.2f"
tser = serialize.represent_mixins_as_columns(t)
assert "__info__" not in tser.meta["__serialized_columns__"]["a"]
assert tser["a"].format == "7.2f"
assert tser["a"].description == "parrot"
def test_skycoord_with_velocity():
# Regression test for gh-6447
sc = SkyCoord([1], [2], unit="deg", galcen_v_sun=None)
t = Table([sc])
s = StringIO()
t.write(s, format="ascii.ecsv", overwrite=True)
s.seek(0)
t2 = Table.read(s.read(), format="ascii.ecsv")
assert skycoord_equal(t2["col0"], sc)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("table_cls", [Table, QTable])
def test_ensure_input_info_is_unchanged(table_cls, copy):
"""If a mixin input to a table has no info, it should stay that way.
This since having 'info' slows down slicing, etc.
See gh-11066.
"""
q = [1, 2] * u.m
assert "info" not in q.__dict__
t = table_cls([q], names=["q"], copy=copy)
assert "info" not in q.__dict__
t = table_cls([q], copy=copy)
assert "info" not in q.__dict__
t = table_cls({"q": q}, copy=copy)
assert "info" not in q.__dict__
t["q2"] = q
assert "info" not in q.__dict__
sc = SkyCoord([1, 2], [2, 3], unit="deg")
t["sc"] = sc
assert "info" not in sc.__dict__
def test_bad_info_class():
"""Make a mixin column class that does not trigger the machinery to generate
a pure column representation"""
class MyArrayWrapper(ArrayWrapper):
info = ParentDtypeInfo()
t = Table()
t["tm"] = MyArrayWrapper([0, 1, 2])
out = StringIO()
match = (
r"failed to represent column 'tm' \(MyArrayWrapper\) as one or more Column"
r" subclasses"
)
with pytest.raises(TypeError, match=match):
represent_mixins_as_columns(t)
|
22379d278b70453be56f30ac600f7f8097e90443af87def830461613c62e8257 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import (
AiryDisk2DKernel,
Box1DKernel,
Box2DKernel,
CustomKernel,
Gaussian1DKernel,
Gaussian2DKernel,
Kernel1D,
Kernel2D,
Model1DKernel,
Model2DKernel,
RickerWavelet1DKernel,
RickerWavelet2DKernel,
Ring2DKernel,
Tophat2DKernel,
Trapezoid1DKernel,
TrapezoidDisk2DKernel,
)
from astropy.convolution.utils import KernelArithmeticError, KernelSizeError
from astropy.modeling.models import Box2D, Gaussian1D, Gaussian2D
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
WIDTHS_ODD = [3, 5, 7, 9]
WIDTHS_EVEN = [2, 4, 8, 16]
MODES = ["center", "linear_interp", "oversample", "integrate"]
KERNEL_TYPES = [
Gaussian1DKernel,
Gaussian2DKernel,
Box1DKernel,
Box2DKernel,
Trapezoid1DKernel,
TrapezoidDisk2DKernel,
RickerWavelet1DKernel,
Tophat2DKernel,
AiryDisk2DKernel,
Ring2DKernel,
]
NUMS = [1, 1.0, np.float32(1.0), np.float64(1.0)]
# Test data
delta_pulse_1D = np.zeros(81)
delta_pulse_1D[40] = 1
delta_pulse_2D = np.zeros((81, 81))
delta_pulse_2D[40, 40] = 1
random_data_1D = np.random.rand(61)
random_data_2D = np.random.rand(61, 61)
class TestKernels:
"""
Test class for the built-in convolution kernels.
"""
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
@pytest.mark.parametrize("width", WIDTHS_ODD)
def test_scipy_filter_gaussian(self, width):
"""
Test GaussianKernel against SciPy ndimage gaussian filter.
"""
from scipy.ndimage import gaussian_filter
gauss_kernel_1D = Gaussian1DKernel(width)
gauss_kernel_1D.normalize()
gauss_kernel_2D = Gaussian2DKernel(width)
gauss_kernel_2D.normalize()
astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary="fill")
astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary="fill")
scipy_1D = gaussian_filter(delta_pulse_1D, width)
scipy_2D = gaussian_filter(delta_pulse_2D, width)
assert_almost_equal(astropy_1D, scipy_1D, decimal=12)
assert_almost_equal(astropy_2D, scipy_2D, decimal=12)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
@pytest.mark.parametrize("width", WIDTHS_ODD)
def test_scipy_filter_gaussian_laplace(self, width):
"""
Test RickerWavelet kernels against SciPy ndimage gaussian laplace filters.
"""
from scipy.ndimage import gaussian_laplace
ricker_kernel_1D = RickerWavelet1DKernel(width)
ricker_kernel_2D = RickerWavelet2DKernel(width)
astropy_1D = convolve(
delta_pulse_1D, ricker_kernel_1D, boundary="fill", normalize_kernel=False
)
astropy_2D = convolve(
delta_pulse_2D, ricker_kernel_2D, boundary="fill", normalize_kernel=False
)
MESSAGE = r"sum is close to zero"
with pytest.raises(ValueError, match=MESSAGE):
astropy_1D = convolve(
delta_pulse_1D, ricker_kernel_1D, boundary="fill", normalize_kernel=True
)
with pytest.raises(ValueError, match=MESSAGE):
astropy_2D = convolve(
delta_pulse_2D, ricker_kernel_2D, boundary="fill", normalize_kernel=True
)
# The Laplace of Gaussian filter is an inverted Ricker Wavelet filter.
scipy_1D = -gaussian_laplace(delta_pulse_1D, width)
scipy_2D = -gaussian_laplace(delta_pulse_2D, width)
# There is a slight deviation in the normalization. They differ by a
# factor of ~1.0000284132604045. The reason is not known.
assert_almost_equal(astropy_1D, scipy_1D, decimal=5)
assert_almost_equal(astropy_2D, scipy_2D, decimal=5)
@pytest.mark.parametrize(
("kernel_type", "width"), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD))
)
def test_delta_data(self, kernel_type, width):
"""
Test smoothing of an image with a single positive pixel
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(width)
else:
kernel = kernel_type(width, width * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(
delta_pulse_1D, kernel, boundary="fill", normalize_kernel=False
)
c2 = convolve(
delta_pulse_1D, kernel, boundary="fill", normalize_kernel=False
)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(
delta_pulse_2D, kernel, boundary="fill", normalize_kernel=False
)
c2 = convolve(
delta_pulse_2D, kernel, boundary="fill", normalize_kernel=False
)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(
("kernel_type", "width"), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD))
)
def test_random_data(self, kernel_type, width):
"""
Test smoothing of an image made of random noise
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(width)
else:
kernel = kernel_type(width, width * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(
random_data_1D, kernel, boundary="fill", normalize_kernel=False
)
c2 = convolve(
random_data_1D, kernel, boundary="fill", normalize_kernel=False
)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(
random_data_2D, kernel, boundary="fill", normalize_kernel=False
)
c2 = convolve(
random_data_2D, kernel, boundary="fill", normalize_kernel=False
)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize("width", WIDTHS_ODD)
def test_uniform_smallkernel(self, width):
"""
Test smoothing of an image with a single positive pixel
Instead of using kernel class, uses a simple, small kernel
"""
kernel = np.ones([width, width])
c2 = convolve_fft(delta_pulse_2D, kernel, boundary="fill")
c1 = convolve(delta_pulse_2D, kernel, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize("width", WIDTHS_ODD)
def test_smallkernel_vs_Box2DKernel(self, width):
"""
Test smoothing of an image with a single positive pixel
"""
kernel1 = np.ones([width, width]) / width**2
kernel2 = Box2DKernel(width)
c2 = convolve_fft(delta_pulse_2D, kernel2, boundary="fill")
c1 = convolve_fft(delta_pulse_2D, kernel1, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
def test_convolve_1D_kernels(self):
"""
Check if convolving two kernels with each other works correctly.
"""
gauss_1 = Gaussian1DKernel(3)
gauss_2 = Gaussian1DKernel(4)
test_gauss_3 = Gaussian1DKernel(5)
with pytest.warns(
AstropyUserWarning, match=r"Both array and kernel " r"are Kernel instances"
):
gauss_3 = convolve(gauss_1, gauss_2)
assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)
def test_convolve_2D_kernels(self):
"""
Check if convolving two kernels with each other works correctly.
"""
gauss_1 = Gaussian2DKernel(3)
gauss_2 = Gaussian2DKernel(4)
test_gauss_3 = Gaussian2DKernel(5)
with pytest.warns(
AstropyUserWarning, match=r"Both array and kernel " r"are Kernel instances"
):
gauss_3 = convolve(gauss_1, gauss_2)
assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)
@pytest.mark.parametrize("number", NUMS)
def test_multiply_scalar(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = number * gauss
assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12)
@pytest.mark.parametrize("number", NUMS)
def test_multiply_scalar_type(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = number * gauss
assert type(gauss_new) is Gaussian1DKernel
@pytest.mark.parametrize("number", NUMS)
def test_rmultiply_scalar_type(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = gauss * number
assert type(gauss_new) is Gaussian1DKernel
def test_multiply_kernel1d(self):
"""Test that multiplying two 1D kernels raises an exception."""
gauss = Gaussian1DKernel(3)
msg = "Kernel operation not supported."
with pytest.raises(KernelArithmeticError, match=msg):
gauss * gauss
def test_multiply_kernel2d(self):
"""Test that multiplying two 2D kernels raises an exception."""
gauss = Gaussian2DKernel(3)
msg = "Kernel operation not supported."
with pytest.raises(KernelArithmeticError, match=msg):
gauss * gauss
def test_multiply_kernel1d_kernel2d(self):
"""
Test that multiplying a 1D kernel with a 2D kernel raises an
exception.
"""
msg = "Kernel operation not supported."
with pytest.raises(KernelArithmeticError, match=msg):
Gaussian1DKernel(3) * Gaussian2DKernel(3)
def test_add_kernel_scalar(self):
"""Test that adding a scalar to a kernel raises an exception."""
msg = "Kernel operation not supported."
with pytest.raises(KernelArithmeticError, match=msg):
Gaussian1DKernel(3) + 1
def test_model_1D_kernel(self):
"""
Check Model1DKernel against Gaussian1Dkernel
"""
stddev = 5.0
gauss = Gaussian1D(1.0 / np.sqrt(2 * np.pi * stddev**2), 0, stddev)
model_gauss_kernel = Model1DKernel(gauss, x_size=21)
model_gauss_kernel.normalize()
gauss_kernel = Gaussian1DKernel(stddev, x_size=21)
assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, decimal=12)
def test_model_2D_kernel(self):
"""
Check Model2DKernel against Gaussian2Dkernel
"""
stddev = 5.0
gauss = Gaussian2D(1.0 / (2 * np.pi * stddev**2), 0, 0, stddev, stddev)
model_gauss_kernel = Model2DKernel(gauss, x_size=21)
model_gauss_kernel.normalize()
gauss_kernel = Gaussian2DKernel(stddev, x_size=21)
assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, decimal=12)
def test_custom_1D_kernel(self):
"""
Check CustomKernel against Box1DKernel.
"""
# Define one dimensional array:
array = np.ones(5)
custom = CustomKernel(array)
custom.normalize()
box = Box1DKernel(5)
c2 = convolve(delta_pulse_1D, custom, boundary="fill")
c1 = convolve(delta_pulse_1D, box, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
def test_custom_2D_kernel(self):
"""
Check CustomKernel against Box2DKernel.
"""
# Define one dimensional array:
array = np.ones((5, 5))
custom = CustomKernel(array)
custom.normalize()
box = Box2DKernel(5)
c2 = convolve(delta_pulse_2D, custom, boundary="fill")
c1 = convolve(delta_pulse_2D, box, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
def test_custom_1D_kernel_list(self):
"""
Check if CustomKernel works with lists.
"""
custom = CustomKernel([1, 1, 1, 1, 1])
assert custom.is_bool is True
def test_custom_2D_kernel_list(self):
"""
Check if CustomKernel works with lists.
"""
custom = CustomKernel([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
assert custom.is_bool is True
def test_custom_1D_kernel_zerosum(self):
"""
Check if CustomKernel works when the input array/list
sums to zero.
"""
array = [-2, -1, 0, 1, 2]
custom = CustomKernel(array)
with pytest.warns(
AstropyUserWarning,
match=r"kernel cannot be " r"normalized because it sums to zero",
):
custom.normalize()
assert custom.truncation == 1.0
assert custom._kernel_sum == 0.0
def test_custom_2D_kernel_zerosum(self):
"""
Check if CustomKernel works when the input array/list
sums to zero.
"""
array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]]
custom = CustomKernel(array)
with pytest.warns(
AstropyUserWarning,
match=r"kernel cannot be " r"normalized because it sums to zero",
):
custom.normalize()
assert custom.truncation == 1.0
assert custom._kernel_sum == 0.0
def test_custom_kernel_odd_error(self):
"""
Check if CustomKernel raises if the array size is odd.
"""
with pytest.raises(KernelSizeError):
CustomKernel([1, 1, 1, 1])
def test_add_1D_kernels(self):
"""
Check if adding of two 1D kernels works.
"""
box_1 = Box1DKernel(5)
box_2 = Box1DKernel(3)
box_3 = Box1DKernel(1)
box_sum_1 = box_1 + box_2 + box_3
box_sum_2 = box_2 + box_3 + box_1
box_sum_3 = box_3 + box_1 + box_2
ref = [
1 / 5.0,
1 / 5.0 + 1 / 3.0,
1 + 1 / 3.0 + 1 / 5.0,
1 / 5.0 + 1 / 3.0,
1 / 5.0,
]
assert_almost_equal(box_sum_1.array, ref, decimal=12)
assert_almost_equal(box_sum_2.array, ref, decimal=12)
assert_almost_equal(box_sum_3.array, ref, decimal=12)
# Assert that the kernels haven't changed
assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12)
assert_almost_equal(box_2.array, [1 / 3.0, 1 / 3.0, 1 / 3.0], decimal=12)
assert_almost_equal(box_3.array, [1], decimal=12)
def test_add_2D_kernels(self):
"""
Check if adding of two 1D kernels works.
"""
box_1 = Box2DKernel(3)
box_2 = Box2DKernel(1)
box_sum_1 = box_1 + box_2
box_sum_2 = box_2 + box_1
ref = [
[1 / 9.0, 1 / 9.0, 1 / 9.0],
[1 / 9.0, 1 + 1 / 9.0, 1 / 9.0],
[1 / 9.0, 1 / 9.0, 1 / 9.0],
]
ref_1 = [
[1 / 9.0, 1 / 9.0, 1 / 9.0],
[1 / 9.0, 1 / 9.0, 1 / 9.0],
[1 / 9.0, 1 / 9.0, 1 / 9.0],
]
assert_almost_equal(box_2.array, [[1]], decimal=12)
assert_almost_equal(box_1.array, ref_1, decimal=12)
assert_almost_equal(box_sum_1.array, ref, decimal=12)
assert_almost_equal(box_sum_2.array, ref, decimal=12)
def test_Gaussian1DKernel_even_size(self):
"""
Check if even size for GaussianKernel works.
"""
gauss = Gaussian1DKernel(3, x_size=10)
assert gauss.array.size == 10
def test_Gaussian2DKernel_even_size(self):
"""
Check if even size for GaussianKernel works.
"""
gauss = Gaussian2DKernel(3, x_size=10, y_size=10)
assert gauss.array.shape == (10, 10)
# https://github.com/astropy/astropy/issues/3605
def test_Gaussian2DKernel_rotated(self):
gauss = Gaussian2DKernel(
x_stddev=3, y_stddev=1.5, theta=0.7853981633974483, x_size=5, y_size=5
) # rotated 45 deg ccw
ans = [
[0.04087193, 0.04442386, 0.03657381, 0.02280797, 0.01077372],
[0.04442386, 0.05704137, 0.05547869, 0.04087193, 0.02280797],
[0.03657381, 0.05547869, 0.06374482, 0.05547869, 0.03657381],
[0.02280797, 0.04087193, 0.05547869, 0.05704137, 0.04442386],
[0.01077372, 0.02280797, 0.03657381, 0.04442386, 0.04087193],
]
assert_allclose(gauss, ans, rtol=0.001) # Rough comparison at 0.1 %
def test_normalize_peak(self):
"""
Check if normalize works with peak mode.
"""
custom = CustomKernel([1, 2, 3, 2, 1])
custom.normalize(mode="peak")
assert custom.array.max() == 1
def test_check_kernel_attributes(self):
"""
Check if kernel attributes are correct.
"""
box = Box2DKernel(5)
# Check truncation
assert box.truncation == 0
# Check model
assert isinstance(box.model, Box2D)
# Check center
assert box.center == [2, 2]
# Check normalization
box.normalize()
assert_almost_equal(box._kernel_sum, 1.0, decimal=12)
# Check separability
assert box.separable
@pytest.mark.parametrize(
("kernel_type", "mode"), list(itertools.product(KERNEL_TYPES, MODES))
)
def test_discretize_modes(self, kernel_type, mode):
"""
Check if the different modes result in kernels that work with convolve.
Use only small kernel width, to make the test pass quickly.
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(3)
else:
kernel = kernel_type(3, 3 * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(
delta_pulse_1D, kernel, boundary="fill", normalize_kernel=False
)
c2 = convolve(
delta_pulse_1D, kernel, boundary="fill", normalize_kernel=False
)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(
delta_pulse_2D, kernel, boundary="fill", normalize_kernel=False
)
c2 = convolve(
delta_pulse_2D, kernel, boundary="fill", normalize_kernel=False
)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize("width", WIDTHS_EVEN)
def test_box_kernels_even_size(self, width):
"""
Check if BoxKernel work properly with even sizes.
"""
kernel_1D = Box1DKernel(width)
assert kernel_1D.shape[0] % 2 != 0
assert kernel_1D.array.sum() == 1.0
kernel_2D = Box2DKernel(width)
assert np.all([_ % 2 != 0 for _ in kernel_2D.shape])
assert kernel_2D.array.sum() == 1.0
def test_kernel_normalization(self):
"""
Test that repeated normalizations do not change the kernel [#3747].
"""
kernel = CustomKernel(np.ones(5))
kernel.normalize()
data = np.copy(kernel.array)
kernel.normalize()
assert_allclose(data, kernel.array)
kernel.normalize()
assert_allclose(data, kernel.array)
def test_kernel_normalization_mode(self):
"""
Test that an error is raised if mode is invalid.
"""
with pytest.raises(ValueError):
kernel = CustomKernel(np.ones(3))
kernel.normalize(mode="invalid")
def test_kernel1d_int_size(self):
"""
Test that an error is raised if ``Kernel1D`` ``x_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian1DKernel(3, x_size=1.2)
def test_kernel2d_int_xsize(self):
"""
Test that an error is raised if ``Kernel2D`` ``x_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian2DKernel(3, x_size=1.2)
def test_kernel2d_int_ysize(self):
"""
Test that an error is raised if ``Kernel2D`` ``y_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian2DKernel(3, x_size=5, y_size=1.2)
def test_kernel1d_initialization(self):
"""
Test that an error is raised if an array or model is not
specified for ``Kernel1D``.
"""
with pytest.raises(TypeError):
Kernel1D()
def test_kernel2d_initialization(self):
"""
Test that an error is raised if an array or model is not
specified for ``Kernel2D``.
"""
with pytest.raises(TypeError):
Kernel2D()
def test_array_keyword_not_allowed(self):
"""
Regression test for issue #10439
"""
x = np.ones([10, 10])
with pytest.raises(TypeError, match=r".* allowed .*"):
AiryDisk2DKernel(2, array=x)
Box1DKernel(2, array=x)
Box2DKernel(2, array=x)
Gaussian1DKernel(2, array=x)
Gaussian2DKernel(2, array=x)
RickerWavelet1DKernel(2, array=x)
RickerWavelet2DKernel(2, array=x)
Model1DKernel(Gaussian1D(1, 0, 2), array=x)
Model2DKernel(Gaussian2D(1, 0, 0, 2, 2), array=x)
Ring2DKernel(9, 8, array=x)
Tophat2DKernel(2, array=x)
Trapezoid1DKernel(2, array=x)
Trapezoid1DKernel(2, array=x)
|
aa1a86683b98cfe201ca1d9fca6184e1da5c3bbf83b0115db375f364eb977813 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.convolution.utils import discretize_model
from astropy.modeling.functional_models import (
Box1D,
Box2D,
Gaussian1D,
Gaussian2D,
RickerWavelet1D,
RickerWavelet2D,
)
from astropy.modeling.tests.example_models import models_1D, models_2D
from astropy.modeling.tests.test_models import create_model
from astropy.utils.compat.optional_deps import HAS_SCIPY
modes = ["center", "linear_interp", "oversample"]
test_models_1D = [Gaussian1D, Box1D, RickerWavelet1D]
test_models_2D = [Gaussian2D, Box2D, RickerWavelet2D]
@pytest.mark.parametrize(
("model_class", "mode"), list(itertools.product(test_models_1D, modes))
)
def test_pixel_sum_1D(model_class, mode):
"""
Test if the sum of all pixels corresponds nearly to the integral.
"""
if model_class == Box1D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_1D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model, models_1D[model_class]["x_lim"], mode=mode)
assert_allclose(values.sum(), models_1D[model_class]["integral"], atol=0.0001)
@pytest.mark.parametrize("mode", modes)
def test_gaussian_eval_1D(mode):
"""
Discretize Gaussian with different modes and check
if result is at least similar to Gaussian1D.eval().
"""
model = Gaussian1D(1, 0, 20)
x = np.arange(-100, 101)
values = model(x)
disc_values = discretize_model(model, (-100, 101), mode=mode)
assert_allclose(values, disc_values, atol=0.001)
@pytest.mark.parametrize(
("model_class", "mode"), list(itertools.product(test_models_2D, modes))
)
def test_pixel_sum_2D(model_class, mode):
"""
Test if the sum of all pixels corresponds nearly to the integral.
"""
if model_class == Box2D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_2D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(
model,
models_2D[model_class]["x_lim"],
models_2D[model_class]["y_lim"],
mode=mode,
)
assert_allclose(values.sum(), models_2D[model_class]["integral"], atol=0.0001)
@pytest.mark.parametrize(
("model_class", "mode"), list(itertools.product(test_models_2D, modes))
)
def test_pixel_sum_compound_2D(model_class, mode):
"""
Test if the sum of all pixels of a compound model corresponds nearly to the integral.
"""
if model_class == Box2D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_2D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(
model + model,
models_2D[model_class]["x_lim"],
models_2D[model_class]["y_lim"],
mode=mode,
)
model_integral = 2 * models_2D[model_class]["integral"]
assert_allclose(values.sum(), model_integral, atol=0.0001)
@pytest.mark.parametrize("mode", modes)
def test_gaussian_eval_2D(mode):
"""
Discretize Gaussian with different modes and check
if result is at least similar to Gaussian2D.eval()
"""
model = Gaussian2D(0.01, 0, 0, 1, 1)
x = np.arange(-2, 3)
y = np.arange(-2, 3)
x, y = np.meshgrid(x, y)
values = model(x, y)
disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode)
assert_allclose(values, disc_values, atol=1e-2)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
@pytest.mark.slow
def test_gaussian_eval_2D_integrate_mode():
"""
Discretize Gaussian with integrate mode
"""
model_list = [
Gaussian2D(0.01, 0, 0, 2, 2),
Gaussian2D(0.01, 0, 0, 1, 2),
Gaussian2D(0.01, 0, 0, 2, 1),
]
x = np.arange(-2, 3)
y = np.arange(-2, 3)
x, y = np.meshgrid(x, y)
for model in model_list:
values = model(x, y)
disc_values = discretize_model(model, (-2, 3), (-2, 3), mode="integrate")
assert_allclose(values, disc_values, atol=1e-2)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_subpixel_gauss_1D():
"""
Test subpixel accuracy of the integrate mode with gaussian 1D model.
"""
gauss_1D = Gaussian1D(1, 0, 0.1)
values = discretize_model(gauss_1D, (-1, 2), mode="integrate", factor=100)
assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_subpixel_gauss_2D():
"""
Test subpixel accuracy of the integrate mode with gaussian 2D model.
"""
gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1)
values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode="integrate", factor=100)
assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001)
def test_discretize_callable_1d():
"""
Test discretize when a 1d function is passed.
"""
def f(x):
return x**2
y = discretize_model(f, (-5, 6))
assert_allclose(y, np.arange(-5, 6) ** 2)
def test_discretize_callable_2d():
"""
Test discretize when a 2d function is passed.
"""
def f(x, y):
return x**2 + y**2
actual = discretize_model(f, (-5, 6), (-5, 6))
y, x = np.indices((11, 11)) - 5
desired = x**2 + y**2
assert_allclose(actual, desired)
def test_type_exception():
"""
Test type exception.
"""
with pytest.raises(TypeError, match=r"Model must be callable\."):
discretize_model(float(0), (-10, 11))
def test_dim_exception_1d():
"""
Test dimension exception 1d.
"""
def f(x):
return x**2
with pytest.raises(ValueError, match=r"y_range should not be input for a 1D model"):
discretize_model(f, (-10, 11), (-10, 11))
def test_dim_exception_2d():
"""
Test dimension exception 2d.
"""
def f(x, y):
return x**2 + y**2
with pytest.raises(ValueError, match=r"y_range must be specified for a 2D model"):
discretize_model(f, (-10, 11))
def test_float_x_range_exception():
def f(x, y):
return x**2 + y**2
with pytest.raises(
ValueError,
match=(
r"The difference between the upper and lower limit of 'x_range' must be a"
r" whole number\."
),
):
discretize_model(f, (-10.002, 11.23))
def test_float_y_range_exception():
def f(x, y):
return x**2 + y**2
with pytest.raises(
ValueError,
match=(
r"The difference between the upper and lower limit of 'y_range' must be a"
r" whole number\."
),
):
discretize_model(f, (-10, 11), (-10.002, 11.23))
def test_discretize_oversample():
gauss_2D = Gaussian2D(
amplitude=1.0, x_mean=5.0, y_mean=125.0, x_stddev=0.75, y_stddev=3
)
values = discretize_model(
gauss_2D, x_range=[0, 10], y_range=[100, 135], mode="oversample", factor=10
)
vmax = np.max(values)
vmax_yx = np.unravel_index(values.argmax(), values.shape)
values_osf1 = discretize_model(
gauss_2D, x_range=[0, 10], y_range=[100, 135], mode="oversample", factor=1
)
values_center = discretize_model(
gauss_2D, x_range=[0, 10], y_range=[100, 135], mode="center"
)
assert values.shape == (35, 10)
assert_allclose(vmax, 0.927, atol=1e-3)
assert vmax_yx == (25, 5)
assert_allclose(values_center, values_osf1)
def test_oversample_factor():
gauss_1D = Gaussian1D(1, 0, 0.1)
msg = "factor must have an integer value"
with pytest.raises(ValueError, match=msg):
discretize_model(gauss_1D, (-1, 2), mode="oversample", factor=1.2)
|
6ae212d7f6d6f3fa964fece2817dfe8cd0b22492e5eec1ee82ddf0f977b162c3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import subprocess
import sys
import pytest
from astropy.config import configuration, create_config_file, paths, set_temp_config
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning
OLD_CONFIG = {}
def setup_module():
OLD_CONFIG.clear()
OLD_CONFIG.update(configuration._cfgobjs)
def teardown_module():
configuration._cfgobjs.clear()
configuration._cfgobjs.update(OLD_CONFIG)
def test_paths():
assert "astropy" in paths.get_config_dir()
assert "astropy" in paths.get_cache_dir()
assert "testpkg" in paths.get_config_dir(rootname="testpkg")
assert "testpkg" in paths.get_cache_dir(rootname="testpkg")
def test_set_temp_config(tmp_path, monkeypatch):
# Check that we start in an understood state.
assert configuration._cfgobjs == OLD_CONFIG
# Temporarily remove any temporary overrides of the configuration dir.
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
orig_config_dir = paths.get_config_dir(rootname="astropy")
(temp_config_dir := tmp_path / "config").mkdir()
temp_astropy_config = temp_config_dir / "astropy"
# Test decorator mode
@paths.set_temp_config(temp_config_dir)
def test_func():
assert paths.get_config_dir(rootname="astropy") == str(temp_astropy_config)
# Test temporary restoration of original default
with paths.set_temp_config() as d:
assert d == orig_config_dir == paths.get_config_dir(rootname="astropy")
test_func()
# Test context manager mode (with cleanup)
with paths.set_temp_config(temp_config_dir, delete=True):
assert paths.get_config_dir(rootname="astropy") == str(temp_astropy_config)
assert not temp_config_dir.exists()
# Check that we have returned to our old configuration.
assert configuration._cfgobjs == OLD_CONFIG
def test_set_temp_cache(tmp_path, monkeypatch):
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
orig_cache_dir = paths.get_cache_dir(rootname="astropy")
(temp_cache_dir := tmp_path / "cache").mkdir()
temp_astropy_cache = temp_cache_dir / "astropy"
# Test decorator mode
@paths.set_temp_cache(temp_cache_dir)
def test_func():
assert paths.get_cache_dir(rootname="astropy") == str(temp_astropy_cache)
# Test temporary restoration of original default
with paths.set_temp_cache() as d:
assert d == orig_cache_dir == paths.get_cache_dir(rootname="astropy")
test_func()
# Test context manager mode (with cleanup)
with paths.set_temp_cache(temp_cache_dir, delete=True):
assert paths.get_cache_dir(rootname="astropy") == str(temp_astropy_cache)
assert not temp_cache_dir.exists()
def test_set_temp_cache_resets_on_exception(tmp_path):
"""Test for regression of bug #9704"""
t = paths.get_cache_dir()
(a := tmp_path / "a").write_text("not a good cache\n")
with pytest.raises(OSError), paths.set_temp_cache(a):
pass
assert t == paths.get_cache_dir()
def test_config_file():
from astropy.config.configuration import get_config, reload_config
apycfg = get_config("astropy")
assert apycfg.filename.endswith("astropy.cfg")
cfgsec = get_config("astropy.config")
assert cfgsec.depth == 1
assert cfgsec.name == "config"
assert cfgsec.parent.filename.endswith("astropy.cfg")
# try with a different package name, still inside astropy config dir:
testcfg = get_config("testpkg", rootname="astropy")
parts = os.path.normpath(testcfg.filename).split(os.sep)
assert ".astropy" in parts or "astropy" in parts
assert parts[-1] == "testpkg.cfg"
configuration._cfgobjs["testpkg"] = None # HACK
# try with a different package name, no specified root name (should
# default to astropy):
testcfg = get_config("testpkg")
parts = os.path.normpath(testcfg.filename).split(os.sep)
assert ".astropy" in parts or "astropy" in parts
assert parts[-1] == "testpkg.cfg"
configuration._cfgobjs["testpkg"] = None # HACK
# try with a different package name, specified root name:
testcfg = get_config("testpkg", rootname="testpkg")
parts = os.path.normpath(testcfg.filename).split(os.sep)
assert ".testpkg" in parts or "testpkg" in parts
assert parts[-1] == "testpkg.cfg"
configuration._cfgobjs["testpkg"] = None # HACK
# try with a subpackage with specified root name:
testcfg_sec = get_config("testpkg.somemodule", rootname="testpkg")
parts = os.path.normpath(testcfg_sec.parent.filename).split(os.sep)
assert ".testpkg" in parts or "testpkg" in parts
assert parts[-1] == "testpkg.cfg"
configuration._cfgobjs["testpkg"] = None # HACK
reload_config("astropy")
def check_config(conf):
# test that the output contains some lines that we expect
assert "# unicode_output = False" in conf
assert "[io.fits]" in conf
assert "[table]" in conf
assert "# replace_warnings = ," in conf
assert "[table.jsviewer]" in conf
assert "# css_urls = https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css," in conf # fmt: skip
assert "[visualization.wcsaxes]" in conf
assert "## Whether to log exceptions before raising them." in conf
assert "# log_exceptions = False" in conf
def test_generate_config(tmp_path):
from astropy.config.configuration import generate_config
out = io.StringIO()
generate_config("astropy", out)
conf = out.getvalue()
outfile = tmp_path / "astropy.cfg"
generate_config("astropy", outfile)
with open(outfile) as fp:
conf2 = fp.read()
for c in (conf, conf2):
check_config(c)
def test_generate_config2(tmp_path):
"""Test that generate_config works with the default filename."""
with set_temp_config(tmp_path):
from astropy.config.configuration import generate_config
generate_config("astropy")
assert os.path.exists(tmp_path / "astropy" / "astropy.cfg")
with open(tmp_path / "astropy" / "astropy.cfg") as fp:
conf = fp.read()
check_config(conf)
def test_create_config_file(tmp_path, caplog):
with set_temp_config(tmp_path):
create_config_file("astropy")
# check that the config file has been created
assert (
"The configuration file has been successfully written"
in caplog.records[0].message
)
assert os.path.exists(tmp_path / "astropy" / "astropy.cfg")
with open(tmp_path / "astropy" / "astropy.cfg") as fp:
conf = fp.read()
check_config(conf)
caplog.clear()
# now modify the config file
conf = conf.replace("# unicode_output = False", "unicode_output = True")
with open(tmp_path / "astropy" / "astropy.cfg", mode="w") as fp:
fp.write(conf)
with set_temp_config(tmp_path):
create_config_file("astropy")
# check that the config file has not been overwritten since it was modified
assert (
"The configuration file already exists and seems to have been customized"
in caplog.records[0].message
)
caplog.clear()
with set_temp_config(tmp_path):
create_config_file("astropy", overwrite=True)
# check that the config file has been overwritten
assert (
"The configuration file has been successfully written"
in caplog.records[0].message
)
def test_configitem():
from astropy.config.configuration import ConfigItem, ConfigNamespace, get_config
ci = ConfigItem(34, "this is a Description")
class Conf(ConfigNamespace):
tstnm = ci
conf = Conf()
assert ci.module == "astropy.config.tests.test_configs"
assert ci() == 34
assert ci.description == "this is a Description"
assert conf.tstnm == 34
sec = get_config(ci.module)
assert sec["tstnm"] == 34
ci.description = "updated Descr"
ci.set(32)
assert ci() == 32
# It's useful to go back to the default to allow other test functions to
# call this one and still be in the default configuration.
ci.description = "this is a Description"
ci.set(34)
assert ci() == 34
# Test iterator for one-item namespace
result = list(conf)
assert result == ["tstnm"]
result = list(conf.keys())
assert result == ["tstnm"]
result = list(conf.values())
assert result == [ci]
result = list(conf.items())
assert result == [("tstnm", ci)]
def test_configitem_types():
from astropy.config.configuration import ConfigItem, ConfigNamespace
ci1 = ConfigItem(34)
ci2 = ConfigItem(34.3)
ci3 = ConfigItem(True)
ci4 = ConfigItem("astring")
class Conf(ConfigNamespace):
tstnm1 = ci1
tstnm2 = ci2
tstnm3 = ci3
tstnm4 = ci4
conf = Conf()
assert isinstance(conf.tstnm1, int)
assert isinstance(conf.tstnm2, float)
assert isinstance(conf.tstnm3, bool)
assert isinstance(conf.tstnm4, str)
with pytest.raises(TypeError):
conf.tstnm1 = 34.3
conf.tstnm2 = 12 # this would should succeed as up-casting
with pytest.raises(TypeError):
conf.tstnm3 = "fasd"
with pytest.raises(TypeError):
conf.tstnm4 = 546.245
# Test iterator for multi-item namespace. Assume ordered by insertion order.
item_names = list(conf)
assert item_names == ["tstnm1", "tstnm2", "tstnm3", "tstnm4"]
result = list(conf.keys())
assert result == item_names
result = list(conf.values())
assert result == [ci1, ci2, ci3, ci4]
result = list(conf.items())
assert result == [
("tstnm1", ci1),
("tstnm2", ci2),
("tstnm3", ci3),
("tstnm4", ci4),
]
def test_configitem_options(tmp_path):
from astropy.config.configuration import ConfigItem, ConfigNamespace, get_config
cio = ConfigItem(["op1", "op2", "op3"])
class Conf(ConfigNamespace):
tstnmo = cio
sec = get_config(cio.module)
assert isinstance(cio(), str)
assert cio() == "op1"
assert sec["tstnmo"] == "op1"
cio.set("op2")
with pytest.raises(TypeError):
cio.set("op5")
assert sec["tstnmo"] == "op2"
# now try saving
apycfg = sec
while apycfg.parent is not apycfg:
apycfg = apycfg.parent
f = tmp_path / "astropy.cfg"
with open(f, "wb") as fd:
apycfg.write(fd)
with open(f, encoding="utf-8") as fd:
lns = [x.strip() for x in fd.readlines()]
assert "tstnmo = op2" in lns
def test_config_noastropy_fallback(monkeypatch):
"""
Tests to make sure configuration items fall back to their defaults when
there's a problem accessing the astropy directory
"""
# make sure the config directory is not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
# make sure the _find_or_create_root_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError
monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser)
# also have to make sure the stored configuration objects are cleared
monkeypatch.setattr(configuration, "_cfgobjs", {})
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_config_dir(rootname="astropy")
# now run the basic tests, and make sure the warning about no astropy
# is present
test_configitem()
def test_configitem_setters():
from astropy.config.configuration import ConfigItem, ConfigNamespace
class Conf(ConfigNamespace):
tstnm12 = ConfigItem(42, "this is another Description")
conf = Conf()
assert conf.tstnm12 == 42
with conf.set_temp("tstnm12", 45):
assert conf.tstnm12 == 45
assert conf.tstnm12 == 42
conf.tstnm12 = 43
assert conf.tstnm12 == 43
with conf.set_temp("tstnm12", 46):
assert conf.tstnm12 == 46
# Make sure it is reset even with Exception
try:
with conf.set_temp("tstnm12", 47):
raise Exception
except Exception:
pass
assert conf.tstnm12 == 43
def test_empty_config_file():
from astropy.config.configuration import is_unedited_config_file
def get_content(fn):
with open(get_pkg_data_filename(fn), encoding="latin-1") as fd:
return fd.read()
content = get_content("data/empty.cfg")
assert is_unedited_config_file(content)
content = get_content("data/not_empty.cfg")
assert not is_unedited_config_file(content)
class TestAliasRead:
def setup_class(self):
configuration._override_config_file = get_pkg_data_filename("data/alias.cfg")
def test_alias_read(self):
from astropy.utils.data import conf
with pytest.warns(
AstropyDeprecationWarning,
match=r"Config parameter 'name_resolve_timeout' in section "
r"\[coordinates.name_resolve\].*",
) as w:
conf.reload()
assert conf.remote_timeout == 42
assert len(w) == 1
def teardown_class(self):
from astropy.utils.data import conf
configuration._override_config_file = None
conf.reload()
def test_configitem_unicode():
from astropy.config.configuration import ConfigItem, ConfigNamespace, get_config
cio = ConfigItem("ასტრონომიის")
class Conf(ConfigNamespace):
tstunicode = cio
sec = get_config(cio.module)
assert isinstance(cio(), str)
assert cio() == "ასტრონომიის"
assert sec["tstunicode"] == "ასტრონომიის"
def test_warning_move_to_top_level():
# Check that the warning about deprecation config items in the
# file works. See #2514
from astropy import conf
configuration._override_config_file = get_pkg_data_filename("data/deprecated.cfg")
try:
with pytest.warns(AstropyDeprecationWarning) as w:
conf.reload()
conf.max_lines
assert len(w) == 1
finally:
configuration._override_config_file = None
conf.reload()
def test_no_home():
# "import astropy" fails when neither $HOME or $XDG_CONFIG_HOME
# are set. To test, we unset those environment variables for a
# subprocess and try to import astropy.
test_path = os.path.dirname(__file__)
astropy_path = os.path.abspath(os.path.join(test_path, "..", "..", ".."))
env = os.environ.copy()
paths = [astropy_path]
if env.get("PYTHONPATH"):
paths.append(env.get("PYTHONPATH"))
env["PYTHONPATH"] = os.pathsep.join(paths)
for val in ["HOME", "XDG_CONFIG_HOME"]:
if val in env:
del env[val]
retcode = subprocess.check_call([sys.executable, "-c", "import astropy"], env=env)
assert retcode == 0
|
9e30c0a107468f5cca30317054464600844ffee4e98184087b3a724d036a5b8f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.coordinates import Angle
from astropy.tests.helper import assert_quantity_allclose
from astropy.uncertainty import distributions as ds
from astropy.uncertainty.core import Distribution
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
if HAS_SCIPY:
from scipy.stats import norm # pylint: disable=W0611
SMAD_FACTOR = 1 / norm.ppf(0.75)
class TestInit:
@classmethod
def setup_class(self):
self.rates = np.array([1, 5, 30, 400])[:, np.newaxis]
self.parr = np.random.poisson(self.rates, (4, 1000))
self.parr_t = np.random.poisson(self.rates.squeeze(), (1000, 4))
def test_numpy_init(self):
# Test that we can initialize directly from a Numpy array
Distribution(self.parr)
def test_numpy_init_T(self):
Distribution(self.parr_t.T)
def test_quantity_init(self):
# Test that we can initialize directly from a Quantity
pq = self.parr << u.ct
pqd = Distribution(pq)
assert isinstance(pqd, u.Quantity)
assert isinstance(pqd, Distribution)
assert isinstance(pqd.value, Distribution)
assert_array_equal(pqd.value.distribution, self.parr)
def test_quantity_init_T(self):
# Test that we can initialize directly from a Quantity
pq = self.parr_t << u.ct
Distribution(pq.T)
def test_quantity_init_with_distribution(self):
# Test that we can initialize a Quantity from a Distribution.
pd = Distribution(self.parr)
qpd = pd << u.ct
assert isinstance(qpd, u.Quantity)
assert isinstance(qpd, Distribution)
assert qpd.unit == u.ct
assert_array_equal(qpd.value.distribution, pd.distribution.astype(float))
def test_init_scalar():
parr = np.random.poisson(np.array([1, 5, 30, 400])[:, np.newaxis], (4, 1000))
with pytest.raises(
TypeError, match=r"Attempted to initialize a Distribution with a scalar"
):
Distribution(parr.ravel()[0])
class TestDistributionStatistics:
def setup_class(self):
with NumpyRNGContext(12345):
self.data = np.random.normal(
np.array([1, 2, 3, 4])[:, np.newaxis],
np.array([3, 2, 4, 5])[:, np.newaxis],
(4, 10000),
)
self.distr = Distribution(self.data * u.kpc)
def test_shape(self):
# Distribution shape
assert self.distr.shape == (4,)
assert self.distr.distribution.shape == (4, 10000)
def test_size(self):
# Total number of values
assert self.distr.size == 4
assert self.distr.distribution.size == 40000
def test_n_samples(self):
# Number of samples
assert self.distr.n_samples == 10000
def test_n_distr(self):
assert self.distr.shape == (4,)
def test_pdf_mean(self):
# Mean of each PDF
expected = np.mean(self.data, axis=-1) * self.distr.unit
pdf_mean = self.distr.pdf_mean()
assert_quantity_allclose(pdf_mean, expected)
assert_quantity_allclose(pdf_mean, [1, 2, 3, 4] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_mean, Distribution)
assert isinstance(pdf_mean, u.Quantity)
# Check with out argument.
out = pdf_mean * 0.0
pdf_mean2 = self.distr.pdf_mean(out=out)
assert pdf_mean2 is out
assert np.all(pdf_mean2 == pdf_mean)
def test_pdf_std(self):
# Standard deviation of each PDF
expected = np.std(self.data, axis=-1) * self.distr.unit
pdf_std = self.distr.pdf_std()
assert_quantity_allclose(pdf_std, expected)
assert_quantity_allclose(pdf_std, [3, 2, 4, 5] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_std, Distribution)
assert isinstance(pdf_std, u.Quantity)
# Check with proper ddof, using out argument.
out = pdf_std * 0.0
expected = np.std(self.data, axis=-1, ddof=1) * self.distr.unit
pdf_std2 = self.distr.pdf_std(ddof=1, out=out)
assert pdf_std2 is out
assert np.all(pdf_std2 == expected)
def test_pdf_var(self):
# Variance of each PDF
expected = np.var(self.data, axis=-1) * self.distr.unit**2
pdf_var = self.distr.pdf_var()
assert_quantity_allclose(pdf_var, expected)
assert_quantity_allclose(
pdf_var, [9, 4, 16, 25] * self.distr.unit**2, rtol=0.1
)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_var, Distribution)
assert isinstance(pdf_var, u.Quantity)
# Check with proper ddof, using out argument.
out = pdf_var * 0.0
expected = np.var(self.data, axis=-1, ddof=1) * self.distr.unit**2
pdf_var2 = self.distr.pdf_var(ddof=1, out=out)
assert pdf_var2 is out
assert np.all(pdf_var2 == expected)
def test_pdf_median(self):
# Median of each PDF
expected = np.median(self.data, axis=-1) * self.distr.unit
pdf_median = self.distr.pdf_median()
assert_quantity_allclose(pdf_median, expected)
assert_quantity_allclose(pdf_median, [1, 2, 3, 4] * self.distr.unit, rtol=0.1)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_median, Distribution)
assert isinstance(pdf_median, u.Quantity)
# Check with out argument.
out = pdf_median * 0.0
pdf_median2 = self.distr.pdf_median(out=out)
assert pdf_median2 is out
assert np.all(pdf_median2 == expected)
@pytest.mark.skipif(not HAS_SCIPY, reason="no scipy")
def test_pdf_mad_smad(self):
# Median absolute deviation of each PDF
median = np.median(self.data, axis=-1, keepdims=True)
expected = np.median(np.abs(self.data - median), axis=-1) * self.distr.unit
pdf_mad = self.distr.pdf_mad()
assert_quantity_allclose(pdf_mad, expected)
pdf_smad = self.distr.pdf_smad()
assert_quantity_allclose(pdf_smad, pdf_mad * SMAD_FACTOR, rtol=1e-5)
assert_quantity_allclose(pdf_smad, [3, 2, 4, 5] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_mad, Distribution)
assert isinstance(pdf_mad, u.Quantity)
assert not isinstance(pdf_smad, Distribution)
assert isinstance(pdf_smad, u.Quantity)
# Check out argument for smad (which checks mad too).
out = pdf_smad * 0.0
pdf_smad2 = self.distr.pdf_smad(out=out)
assert pdf_smad2 is out
assert np.all(pdf_smad2 == pdf_smad)
def test_percentile(self):
expected = np.percentile(self.data, [10, 50, 90], axis=-1) * self.distr.unit
percs = self.distr.pdf_percentiles([10, 50, 90])
assert_quantity_allclose(percs, expected)
assert percs.shape == (3, 4)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(percs, Distribution)
assert isinstance(percs, u.Quantity)
def test_add_quantity(self):
distrplus = self.distr + [2000, 0, 0, 500] * u.pc
expected = (
np.median(self.data, axis=-1) + np.array([2, 0, 0, 0.5])
) * self.distr.unit
assert_quantity_allclose(distrplus.pdf_median(), expected)
expected = np.var(self.data, axis=-1) * self.distr.unit**2
assert_quantity_allclose(distrplus.pdf_var(), expected)
def test_add_distribution(self):
another_data = (
np.random.randn(4, 10000) * np.array([1000, 0.01, 80, 10])[:, np.newaxis]
+ np.array([2000, 0, 0, 500])[:, np.newaxis]
)
# another_data is in pc, but main distr is in kpc
another_distr = Distribution(another_data * u.pc)
combined_distr = self.distr + another_distr
expected = np.median(self.data + another_data / 1000, axis=-1) * self.distr.unit
assert_quantity_allclose(combined_distr.pdf_median(), expected)
expected = (
np.var(self.data + another_data / 1000, axis=-1) * self.distr.unit**2
)
assert_quantity_allclose(combined_distr.pdf_var(), expected)
def test_helper_normal_samples():
centerq = [1, 5, 30, 400] * u.kpc
with NumpyRNGContext(12345):
n_dist = ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.kpc, n_samples=100)
assert n_dist.distribution.shape == (4, 100)
assert n_dist.shape == (4,)
assert n_dist.unit == u.kpc
assert np.all(n_dist.pdf_std() > 100 * u.pc)
n_dist2 = ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.pc, n_samples=20000)
assert n_dist2.distribution.shape == (4, 20000)
assert n_dist2.shape == (4,)
assert n_dist2.unit == u.kpc
assert np.all(n_dist2.pdf_std() < 100 * u.pc)
def test_helper_poisson_samples():
centerqcounts = [1, 5, 30, 400] * u.count
with NumpyRNGContext(12345):
p_dist = ds.poisson(centerqcounts, n_samples=100)
assert p_dist.shape == (4,)
assert p_dist.distribution.shape == (4, 100)
assert p_dist.unit == u.count
p_min = np.min(p_dist)
assert isinstance(p_min, Distribution)
assert p_min.shape == ()
assert np.all(p_min >= 0)
assert np.all(np.abs(p_dist.pdf_mean() - centerqcounts) < centerqcounts)
def test_helper_uniform_samples():
udist = ds.uniform(lower=[1, 2] * u.kpc, upper=[3, 4] * u.kpc, n_samples=1000)
assert udist.shape == (2,)
assert udist.distribution.shape == (2, 1000)
assert np.all(np.min(udist.distribution, axis=-1) > [1, 2] * u.kpc)
assert np.all(np.max(udist.distribution, axis=-1) < [3, 4] * u.kpc)
# try the alternative creator
udist = ds.uniform(center=[1, 3, 2] * u.pc, width=[5, 4, 3] * u.pc, n_samples=1000)
assert udist.shape == (3,)
assert udist.distribution.shape == (3, 1000)
assert np.all(np.min(udist.distribution, axis=-1) > [-1.5, 1, 0.5] * u.pc)
assert np.all(np.max(udist.distribution, axis=-1) < [3.5, 5, 3.5] * u.pc)
def test_helper_normal_exact():
pytest.skip("distribution stretch goal not yet implemented")
centerq = [1, 5, 30, 400] * u.kpc
ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.kpc)
ds.normal(centerq, var=[0.04, 2.25, 16, 1] * u.kpc**2)
ds.normal(centerq, ivar=[25, 0.44444444, 0.625, 1] * u.kpc**-2)
def test_helper_poisson_exact():
pytest.skip("distribution stretch goal not yet implemented")
centerq = [1, 5, 30, 400] * u.one
ds.poisson(centerq, n_samples=1000)
with pytest.raises(
u.UnitsError,
match=r"Poisson distribution can only be computed for dimensionless quantities",
):
centerq = [1, 5, 30, 400] * u.kpc
ds.poisson(centerq, n_samples=1000)
def test_reprs():
darr = np.arange(30).reshape(3, 10)
distr = Distribution(darr * u.kpc)
assert "n_samples=10" in repr(distr)
assert "n_samples=10" in str(distr)
assert r"n_{\rm samp}=10" in distr._repr_latex_()
@pytest.mark.parametrize(
"func, kws",
[
(ds.normal, {"center": 0, "std": 2}),
(ds.uniform, {"lower": 0, "upper": 2}),
(ds.poisson, {"center": 2}),
(ds.normal, {"center": 0 * u.count, "std": 2 * u.count}),
(ds.uniform, {"lower": 0 * u.count, "upper": 2 * u.count}),
(ds.poisson, {"center": 2 * u.count}),
],
)
def test_wrong_kw_fails(func, kws):
with pytest.raises(TypeError, match="missing 1 required"):
kw_temp = kws.copy()
kw_temp["n_sample"] = 100 # note the missing "s"
assert func(**kw_temp).n_samples == 100
kw_temp = kws.copy()
kw_temp["n_samples"] = 100
assert func(**kw_temp).n_samples == 100
def test_index_assignment_quantity():
arr = np.random.randn(2, 1000)
distr = Distribution(arr * u.kpc)
d1q, d2q = distr
assert isinstance(d1q, Distribution)
assert isinstance(d2q, Distribution)
ndistr = ds.normal(center=[1, 2] * u.kpc, std=[3, 4] * u.kpc, n_samples=1000)
n1, n2 = ndistr
assert isinstance(n1, ds.Distribution)
assert isinstance(n2, ds.Distribution)
def test_index_assignment_array():
arr = np.random.randn(2, 1000)
distr = Distribution(arr)
d1a, d2a = distr
assert isinstance(d1a, Distribution)
assert isinstance(d2a, Distribution)
ndistr = ds.normal(center=[1, 2], std=[3, 4], n_samples=1000)
n1, n2 = ndistr
assert isinstance(n1, ds.Distribution)
assert isinstance(n2, ds.Distribution)
def test_histogram():
arr = np.random.randn(2, 3, 1000)
distr = Distribution(arr)
hist, bins = distr.pdf_histogram(bins=10)
assert hist.shape == (2, 3, 10)
assert bins.shape == (2, 3, 11)
def test_array_repr_latex():
# as of this writing ndarray does not have a _repr_latex_, and this test
# ensure distributions account for that. However, if in the future ndarray
# gets a _repr_latex_, we can skip this.
arr = np.random.randn(4, 1000)
if hasattr(arr, "_repr_latex_"):
pytest.skip("in this version of numpy, ndarray has a _repr_latex_")
distr = Distribution(arr)
assert distr._repr_latex_() is None
def test_distr_to():
distr = ds.normal(10 * u.cm, n_samples=100, std=1 * u.cm)
todistr = distr.to(u.m)
assert_quantity_allclose(distr.pdf_mean().to(u.m), todistr.pdf_mean())
def test_distr_noq_to():
# this is an array distribution not a quantity
distr = ds.normal(10, n_samples=100, std=1)
with pytest.raises(AttributeError):
distr.to(u.m)
def test_distr_to_value():
distr = ds.normal(10 * u.cm, n_samples=100, std=1 * u.cm)
tovdistr = distr.to_value(u.m)
assert np.allclose(distr.pdf_mean().to_value(u.m), tovdistr.pdf_mean())
def test_distr_noq_to_value():
distr = ds.normal(10, n_samples=100, std=1)
with pytest.raises(AttributeError):
distr.to_value(u.m)
def test_distr_angle():
# Check that Quantity subclasses decay to Quantity appropriately.
distr = Distribution([2.0, 3.0, 4.0])
ad = Angle(distr, "deg")
ad_plus_ad = ad + ad
assert isinstance(ad_plus_ad, Angle)
assert isinstance(ad_plus_ad, Distribution)
ad_times_ad = ad * ad
assert not isinstance(ad_times_ad, Angle)
assert isinstance(ad_times_ad, u.Quantity)
assert isinstance(ad_times_ad, Distribution)
ad += ad
assert isinstance(ad, Angle)
assert isinstance(ad, Distribution)
assert_array_equal(ad.distribution, ad_plus_ad.distribution)
with pytest.raises(u.UnitTypeError):
ad *= ad
def test_distr_angle_view_as_quantity():
# Check that Quantity subclasses decay to Quantity appropriately.
distr = Distribution([2.0, 3.0, 4.0])
ad = Angle(distr, "deg")
qd = ad.view(u.Quantity)
assert not isinstance(qd, Angle)
assert isinstance(qd, u.Quantity)
assert isinstance(qd, Distribution)
# View directly as DistributionQuantity class.
qd2 = ad.view(qd.__class__)
assert not isinstance(qd2, Angle)
assert isinstance(qd2, u.Quantity)
assert isinstance(qd2, Distribution)
assert_array_equal(qd2.distribution, qd.distribution)
qd3 = ad.view(qd.dtype, qd.__class__)
assert not isinstance(qd3, Angle)
assert isinstance(qd3, u.Quantity)
assert isinstance(qd3, Distribution)
assert_array_equal(qd3.distribution, qd.distribution)
def test_distr_cannot_view_new_dtype():
# A Distribution has a very specific structured dtype with just one
# element that holds the array of samples. As it is not clear what
# to do with a view as a new dtype, we just error on it.
# TODO: with a lot of thought, this restriction can likely be relaxed.
distr = Distribution([2.0, 3.0, 4.0])
with pytest.raises(ValueError, match="with a new dtype"):
distr.view(np.dtype("f8"))
# Check subclass just in case.
ad = Angle(distr, "deg")
with pytest.raises(ValueError, match="with a new dtype"):
ad.view(np.dtype("f8"))
with pytest.raises(ValueError, match="with a new dtype"):
ad.view(np.dtype("f8"), Distribution)
def test_scalar_quantity_distribution():
# Regression test for gh-12336
angles = Distribution([90.0, 30.0, 0.0] * u.deg)
sin_angles = np.sin(angles) # This failed in 4.3.
assert isinstance(sin_angles, Distribution)
assert isinstance(sin_angles, u.Quantity)
assert_array_equal(sin_angles, Distribution(np.sin([90.0, 30.0, 0.0] * u.deg)))
@pytest.mark.parametrize("op", [operator.eq, operator.ne, operator.gt])
class TestComparison:
@classmethod
def setup_class(cls):
cls.d = Distribution([90.0, 30.0, 0.0])
class Override:
__array_ufunc__ = None
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
def __lt__(self, other):
return "gt" # Since it is called for the reverse of gt
cls.override = Override()
def test_distribution_can_be_compared_to_non_distribution(self, op):
result = op(self.d, 0.0)
assert_array_equal(result, Distribution(op(self.d.distribution, 0.0)))
def test_distribution_comparison_defers_correctly(self, op):
result = op(self.d, self.override)
assert result == op.__name__
class TestSetItemWithSelection:
def test_setitem(self):
d = Distribution([90.0, 30.0, 0.0])
d[d > 50] = 0.0
assert_array_equal(d, Distribution([0.0, 30.0, 0.0]))
def test_inplace_operation(self):
d = Distribution([90.0, 30.0, 0.0])
d[d > 50] *= -1.0
assert_array_equal(d, Distribution([-90.0, 30.0, 0.0]))
|
ac1cdf68309729cbe51164367fc68d94c90039d6b123587568b03ae111fa42f9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
# pylint: disable=invalid-name
import os.path
import unittest.mock as mk
from importlib.metadata import EntryPoint
from itertools import combinations
from unittest import mock
import numpy as np
import pytest
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import (
DogBoxLSQFitter,
Fitter,
FittingWithOutlierRemoval,
JointFitter,
LevMarLSQFitter,
LinearLSQFitter,
LMLSQFitter,
NonFiniteValueError,
SimplexLSQFitter,
SLSQPLSQFitter,
TRFLSQFitter,
_NLLSQFitter,
populate_entry_points,
)
from astropy.modeling.optimizers import Optimization
from astropy.stats import sigma_clip
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from . import irafutil
if HAS_SCIPY:
from scipy import optimize
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
non_linear_fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomial fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x**2 + 4 * y + 5 * y**2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_nonlinear_fitting(self, fitter):
fitter = fitter()
self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7]
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_compare_nonlinear_fitting(self):
self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7]
fit_models = []
for fitter in non_linear_fitters:
fitter = fitter()
with pytest.warns(
AstropyUserWarning, match=r"Model is linear in parameters"
):
fit_models.append(fitter(self.model, self.x, self.y, self.z))
for pair in combinations(fit_models, 2):
assert_allclose(pair[0].parameters, pair[1].parameters)
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array(
[1344.0, 1772.0, 400.0, 1860.0, 2448.0, 552.0, 432.0, 568.0, 128.0]
)
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, 0.6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
model = fitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting_with_weights(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, 0.6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
weights = np.ones_like(self.y)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
model = fitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=0.4)
self.jf = JointFitter(
[self.g1, self.g2], {self.g1: ["amplitude"], self.g2: ["amplitude"]}, [9.8]
)
self.x = np.arange(10, 20, 0.1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, 0.3]
p2 = [13, 0.4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(
np.r_[model(p[0], p[1:3], x1) - y1, model(p[0], p[3:], x2) - y2]
)
coeff, _ = optimize.leastsq(
errfunc, p, args=(self.x, self.ny1, self.x, self.ny2)
)
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
MESSAGE = r"Model must be simple, not compound"
with pytest.raises(ValueError, match=MESSAGE):
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
fitter(init_model_comp, x, y)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join("data", "idcompspec.fits"))
with open(test_file) as f:
lines = f.read()
reclist = lines.split("begin")
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields["order"])
initial_model = models.Chebyshev1D(order - 1, domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs), rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected, rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5 * x * x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5 * x * x, -2 * x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)
assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(
degree=2,
c1_0=[1, 2],
c0_1=[-0.5, 1],
n_models=2,
fixed={"c1_0": True, "c0_1": True},
)
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2 * x + 1, x - 2], mask=np.zeros_like([x, x]))
y[0, 7] = 100.0 # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.0
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c1, [2.0, 1.0], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array(
[2 * x + 3 * y + 1, x - 0.5 * y - 2], mask=np.zeros_like([x, x])
)
z[0, 3, 1] = -1000.0 # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2.0, 1.0], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3.0, -0.5], atol=1e-14)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4.0 * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
@pytest.mark.parametrize("fitter0", non_linear_fitters)
@pytest.mark.parametrize("fitter1", non_linear_fitters)
def test_estimated_vs_analytic_deriv(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize("fitter0", non_linear_fitters)
@pytest.mark.parametrize("fitter1", non_linear_fitters)
def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.0)
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(
g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True
)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_with_optimize(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` against
`scipy.optimize.leastsq`.
"""
fitter = fitter()
model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(
errfunc, self.initial_values, args=(self.xdata, self.ydata)
)
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_with_weights(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` with weights.
"""
fitter = fitter()
# part 1: weights are equal to 1
model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True)
withw = fitter(
self.gauss,
self.xdata,
self.ydata,
estimate_jacobian=True,
weights=np.ones_like(self.xdata),
)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.0
mask = weights >= 1.0
model = fitter(
self.gauss, self.xdata[mask], self.ydata[mask], estimate_jacobian=True
)
withw = fitter(
self.gauss, self.xdata, self.ydata, estimate_jacobian=True, weights=weights
)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(r"ignore:.* Maximum number of iterations reached")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter_class", fitters)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_fitter_against_LevMar(self, fitter_class, fitter):
"""
Tests results from non-linear fitters against `LevMarLSQFitter`
and `TRFLSQFitter`
"""
fitter = fitter()
fitter_cls = fitter_class()
# This emits a warning from fitter that we need to ignore with
# pytest.mark.filterwarnings above.
new_model = fitter_cls(self.gauss, self.xdata, self.ydata)
model = fitter(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_LSQ_SLSQP_with_constraints(self, fitter):
"""
Runs `LevMarLSQFitter`/`TRFLSQFitter` and `SLSQPLSQFitter` on a
model with constraints.
"""
fitter = fitter()
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fslsqp = SLSQPLSQFitter()
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters, rtol=10 ** (-4))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_linear_lsq_fitter_with_weights(self, fitter):
"""
Tests that issue #11581 has been solved.
"""
fitter = fitter()
np.random.seed(42)
norder = 2
fitter2 = LinearLSQFitter()
model = models.Polynomial1D(norder)
npts = 10000
c = [2.0, -10.0, 7.0]
tw = np.random.uniform(0.0, 10.0, npts)
tx = np.random.uniform(0.0, 10.0, npts)
ty = c[0] + c[1] * tx + c[2] * (tx**2)
ty += np.random.normal(0.0, 1.5, npts)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
tf1 = fitter(model, tx, ty, weights=tw)
tf2 = fitter2(model, tx, ty, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16))
assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2))
model = models.Gaussian1D()
if isinstance(fitter, (TRFLSQFitter, LMLSQFitter)):
with pytest.warns(
AstropyUserWarning, match=r"The fit may be unsuccessful; *."
):
fitter(model, tx, ty, weights=tw)
else:
fitter(model, tx, ty, weights=tw)
model = models.Polynomial2D(norder)
nxpts = 100
nypts = 150
npts = nxpts * nypts
c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]
tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tz = (
c[0]
+ c[1] * tx
+ c[2] * (tx**2)
+ c[3] * ty
+ c[4] * (ty**2)
+ c[5] * tx * ty
)
tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
tf1 = fitter(model, tx, ty, tz, weights=tw)
tf2 = fitter2(model, tx, ty, tz, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16))
assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x**2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0.0, 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_param_cov(self, fitter):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
fitter = fitter()
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covariance is
# non-negligible
y = x * a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.vstack([x, np.ones(len(x))]).T
beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)
s2 = np.sum((y - np.matmul(X, beta).ravel()) ** 2) / (len(y) - len(beta))
olscov = np.linalg.inv(np.matmul(X.T, X)) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(olscov, fitter.fit_info["param_cov"])
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_param_cov_with_uncertainties(self, fitter):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
fitter = fitter()
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covariance is
# non-negligible
y = x * a + b + np.random.normal(size=len(x))
sigma = np.random.normal(loc=1, scale=0.1, size=len(x))
# compute the ordinary least squares covariance matrix
# accounting for measurement uncertainties `sigma`
X = np.vstack([x, np.ones(len(x))]).T
inv_N = np.linalg.inv(np.diag(sigma) ** 2)
cov = np.linalg.inv(X.T @ inv_N @ X)
beta = cov @ X.T @ inv_N @ y.T
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fmod = fitter(mod, x, y, weights=sigma**-1)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(cov, fitter.fit_info["param_cov"])
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
with pytest.warns(AstropyUserWarning, match=r".*ImportError.*"):
populate_entry_points([mock_entry_importerror])
def test_bad_func(self):
"""This returns a function which fails the type check"""
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
with pytest.warns(AstropyUserWarning, match=r".*Class.*"):
populate_entry_points([mock_entry_badfunc])
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter"""
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
with pytest.warns(AstropyUserWarning, match=r".*BadClass.*"):
populate_entry_points([mock_entry_badclass])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5.0, 5.0, 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0] * np.exp(-0.5 * (x - p[1]) ** 2 / p[2] ** 2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
y = self.y + (
np.random.normal(0.0, 0.2, self.x.shape)
+ c * np.random.normal(3.0, 5.0, self.x.shape)
)
g_init = models.Gaussian1D(amplitude=1.0, mean=0, stddev=1.0)
fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0] * np.exp(
-0.5 * (pos[0] - p[2]) ** 2 / p[4] ** 2
- 0.5 * (pos[1] - p[1]) ** 2 / p[3] ** 2
)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.0).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.0).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
z = self.z + (
np.random.normal(0.0, 0.2, self.z.shape)
+ c * np.random.normal(self.z, 2.0, self.z.shape)
)
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(
amplitude=guess[0],
x_mean=guess[1],
y_mean=guess[2],
x_stddev=0.75,
y_stddev=1.25,
)
fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0)
fitted_model, _ = fit(g2_init, self.x, self.y, z)
assert_allclose(fitted_model.parameters[0:5], self.model_params, atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(),
sigma_clip,
sigma=2.5,
niter=3,
cenfunc=np.ma.mean,
stdfunc=np.ma.std,
)
x = np.arange(10)
y = np.array([2.5 * x - 4, 2 * x * x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4.0, 10.0], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.0], atol=1e-14)
assert_allclose(poly_set.c2, [0.0, 2.0], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(),
sigma_clip,
sigma=2.5,
niter=3,
cenfunc=np.ma.mean,
stdfunc=np.ma.std,
)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x + y, 1 - 0.1 * x + 0.2 * y]), 0, 3)
z[3, 3:5, 0] = 100.0 # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0.0, 1.0]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1.0, -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1.0, 0.2]]], atol=1e-14)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020"""
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0, 0] = 1000.0 # outlier
self.z[0, 1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10 ** (-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, mask = fitter(model, self.x1d, self.z1d)
assert (~mask).sum() == self.z1d.size - 2
assert mask[0] and mask[1]
assert_allclose(
fit.parameters[0], 0.0, atol=10 ** (-2)
) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert fit.parameters[0] > 1.0 # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""
smoke test for #7020 - fails without fitting.py
patch because weights does not propagate
"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_1d_set_with_weights_with_sigma_clip(self):
"""1D model set with separate weights"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
z1d = np.array([self.z1d, self.z1d])
weights = np.array([self.weights1d, self.weights1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=weights)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10 ** (-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, mask = fitter(model, self.x, self.y, self.z)
assert (~mask).sum() == self.z.size - 2
assert mask[0, 0] and mask[0, 1]
assert_allclose(fit.parameters[0], 0.0, atol=10 ** (-2))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_2d_with_weights_without_sigma_clip(self, fitter):
fitter = fitter()
model = models.Polynomial2D(0)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 1.0 # outliers pulled it high
def test_2d_linear_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
# LinearLSQFitter doesn't handle weights properly in 2D
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 1.0 # outliers pulled it high
@pytest.mark.parametrize("base_fitter", non_linear_fitters)
def test_2d_with_weights_with_sigma_clip(self, base_fitter):
"""smoke test for #7020 - fails without fitting.py patch because
weights does not propagate"""
base_fitter = base_fitter()
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(base_fitter, sigma_clip, niter=3, sigma=3.0)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
def test_2d_linear_with_weights_with_sigma_clip(self):
"""same as test above with a linear fitter."""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_fitters_with_weights(fitter):
"""Issue #5737"""
fitter = fitter()
if isinstance(fitter, _NLLSQFitter):
pytest.xfail(
"This test is poorly designed and causes issues for "
"scipy.optimize.least_squares based fitters"
)
Xin, Yin = np.mgrid[0:21, 0:21]
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.parametrize(
"fixed, warns",
[
({}, True), # tests fitting non-fixed parameters models produces warnings
(
{"c1_0": True},
True,
), # tests fitting fixed par models produces warnings - #14037
(
{"c0_1": True},
False,
), # https://github.com/astropy/astropy/pull/14037#pullrequestreview-1191726872
],
)
def test_polynomial_poorly_conditioned(fixed, warns):
p0 = models.Polynomial2D(degree=1, c0_0=3, c1_0=5, c0_1=0, fixed=fixed)
fitter = LinearLSQFitter()
x = [1, 2, 3, 4, 5]
y = [1, 1, 1, 1, 1]
values = p0(x, y)
if warns:
with pytest.warns(
AstropyUserWarning, match="The fit may be poorly conditioned"
):
p = fitter(p0, x, y, values)
else:
p = fitter(p0, x, y, values)
assert np.allclose(p0.parameters, p.parameters, rtol=0, atol=1e-14)
def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_fitters_interface(fitter):
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
fitter = fitter()
model = models.Gaussian1D(10, 4, 0.3)
x = np.arange(21)
y = model(x)
if isinstance(fitter, SimplexLSQFitter):
kwargs = {"maxiter": 79, "verblevel": 1, "acc": 1e-6}
else:
kwargs = {"maxiter": 77, "verblevel": 1, "epsilon": 1e-2, "acc": 1e-6}
if isinstance(fitter, (LevMarLSQFitter, _NLLSQFitter)):
kwargs.pop("verblevel")
_ = fitter(model, x, y, **kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter_class", [SLSQPLSQFitter, SimplexLSQFitter])
def test_optimizers(fitter_class):
fitter = fitter_class()
# Test maxiter
assert fitter._opt_method.maxiter == 100
fitter._opt_method.maxiter = 1000
assert fitter._opt_method.maxiter == 1000
# Test eps
assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps)
fitter._opt_method.eps = 1e-16
assert fitter._opt_method.eps == 1e-16
# Test acc
assert fitter._opt_method.acc == 1e-7
fitter._opt_method.acc = 1e-16
assert fitter._opt_method.acc == 1e-16
# Test repr
assert repr(fitter._opt_method) == f"{fitter._opt_method.__class__.__name__}()"
fitparams = mk.MagicMock()
final_func_val = mk.MagicMock()
numiter = mk.MagicMock()
funcalls = mk.MagicMock()
exit_mode = 1
mess = mk.MagicMock()
xtol = mk.MagicMock()
if fitter_class == SLSQPLSQFitter:
return_value = (fitparams, final_func_val, numiter, exit_mode, mess)
fit_info = {
"final_func_val": final_func_val,
"numiter": numiter,
"exit_mode": exit_mode,
"message": mess,
}
else:
return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode)
fit_info = {
"final_func_val": final_func_val,
"numiter": numiter,
"exit_mode": exit_mode,
"num_function_calls": funcalls,
}
with mk.patch.object(
fitter._opt_method.__class__, "opt_method", return_value=return_value
):
with pytest.warns(AstropyUserWarning, match=r"The fit may be unsuccessful; .*"):
assert (fitparams, fit_info) == fitter._opt_method(
mk.MagicMock(), mk.MagicMock(), mk.MagicMock(), xtol=xtol
)
assert fit_info == fitter._opt_method.fit_info
if isinstance(fitter, SLSQPLSQFitter):
assert fitter._opt_method.acc == 1e-16
else:
assert fitter._opt_method.acc == xtol
@mk.patch.multiple(Optimization, __abstractmethods__=set())
def test_Optimization_abstract_call():
optimization = Optimization(mk.MagicMock())
MESSAGE = r"Subclasses should implement this method"
with pytest.raises(NotImplementedError, match=MESSAGE):
optimization()
def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10.0, scale=1.0, size=(2, 25))
y[0, 14] = 100.0
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(),
outlier_func=sigma_clip,
niter=5,
sigma_lower=3.0,
sigma_upper=3.0,
maxiters=1,
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info["niter"] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(),
outlier_func=sigma_clip,
niter=0,
sigma_lower=3.0,
sigma_upper=3.0,
maxiters=1,
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info["niter"] == 0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestFittingUncertanties:
"""
Test that parameter covariance is calculated correctly for the fitters
that do so (currently LevMarLSQFitter, LinearLSQFitter).
"""
example_1D_models = [models.Polynomial1D(2), models.Linear1D()]
example_1D_sets = [
models.Polynomial1D(2, n_models=2, model_set_axis=False),
models.Linear1D(n_models=2, slope=[1.0, 1.0], intercept=[0, 0]),
]
def setup_class(self):
np.random.seed(619)
self.x = np.arange(10)
self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.rand_grid = np.random.random(100).reshape(10, 10)
self.rand = self.rand_grid[0]
@pytest.mark.parametrize(
("single_model", "model_set"), list(zip(example_1D_models, example_1D_sets))
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_1d_models(self, single_model, model_set, fitter):
"""Test that fitting uncertainties are computed correctly for 1D models
and 1D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
# test 1D single models
# fit single model w/ nonlinear fitter
y = single_model(self.x) + self.rand
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit_model = fitter(single_model, self.x, y)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ linlsq fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x, y)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
# check covariance, stds computed correctly computed
assert_allclose(cov_model_linlsq, cov_model)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)), fit_model_linlsq.stds.stds)
# now test 1D model sets
# fit set of models w/ linear fitter
y = model_set(self.x, model_set_axis=False) + np.array([self.rand, self.rand])
fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)
cov_1d_set_linlsq = [j.cov_matrix for j in fit_1d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_1d_set_linlsq[0], cov_model)
assert_allclose(
np.sqrt(np.diag(cov_1d_set_linlsq[0])), fit_1d_set_linlsq.stds[0].stds
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_2d_models(self, fitter):
"""
Test that fitting uncertainties are computed correctly for 2D models
and 2D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
single_model = models.Polynomial2D(2, c0_0=2)
model_set = models.Polynomial2D(
degree=2, n_models=2, c0_0=[2, 3], model_set_axis=False
)
# fit single model w/ nonlinear fitter
z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit_model = fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ nonlinear fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
assert_allclose(cov_model, cov_model_linlsq)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)), fit_model_linlsq.stds.stds)
# fit 2d model set
z_grid = model_set(self.x_grid, self.y_grid) + np.array(
(self.rand_grid, self.rand_grid)
)
fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid, z_grid)
cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_2d_set_linlsq[0], cov_model)
assert_allclose(
np.sqrt(np.diag(cov_2d_set_linlsq[0])), fit_2d_set_linlsq.stds[0].stds
)
def test_covariance_std_printing_indexing(self, capsys):
"""
Test printing methods and indexing.
"""
# test str representation for Covariance/stds
fitter = LinearLSQFitter(calc_uncertainties=True)
mod = models.Linear1D()
fit_mod = fitter(mod, self.x, mod(self.x) + self.rand)
print(fit_mod.cov_matrix)
captured = capsys.readouterr()
assert "slope | 0.001" in captured.out
assert "intercept| -0.005, 0.03" in captured.out
print(fit_mod.stds)
captured = capsys.readouterr()
assert "slope | 0.032" in captured.out
assert "intercept| 0.173" in captured.out
# test 'pprint' for Covariance/stds
print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))
captured = capsys.readouterr()
assert "slope | 0.00105" in captured.out
assert "intercept" not in captured.out
print(fit_mod.stds.pprint(max_lines=1, round_val=5))
captured = capsys.readouterr()
assert "slope | 0.03241" in captured.out
assert "intercept" not in captured.out
# test indexing for Covariance class.
assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix["slope", "slope"]
# test indexing for stds class.
assert fit_mod.stds[1] == fit_mod.stds["intercept"]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
@pytest.mark.parametrize("weights", [np.ones(8), None])
def test_non_finite_error(fitter, weights):
"""Regression test error introduced to solve issues #3575 and #12809"""
x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])
m_init = models.Gaussian1D()
fit = fitter()
# Raise warning, notice fit fails due to nans
with pytest.raises(
NonFiniteValueError, match=r"Objective function has encountered.*"
):
fit(m_init, x, y, weights=weights)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
@pytest.mark.parametrize("weights", [np.ones(8), None])
def test_non_finite_filter_1D(fitter, weights):
"""Regression test filter introduced to remove non-finte values from data"""
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])
m_init = models.Gaussian1D()
fit = fitter()
with pytest.warns(
AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter",
):
fit(m_init, x, y, filter_non_finite=True, weights=weights)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
@pytest.mark.parametrize("weights", [np.ones((10, 10)), None])
def test_non_finite_filter_2D(fitter, weights):
"""Regression test filter introduced to remove non-finte values from data"""
x, y = np.mgrid[0:10, 0:10]
m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2)
with NumpyRNGContext(_RANDOM_SEED):
z = m_true(x, y) + np.random.rand(*x.shape)
z[0, 0] = np.nan
z[3, 3] = np.inf
z[7, 5] = -np.inf
m_init = models.Gaussian2D()
fit = fitter()
with pytest.warns(
AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter",
):
fit(m_init, x, y, z, filter_non_finite=True, weights=weights)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters*")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_linear_fit_zero_degree_polynomial_with_weights(fitter):
"""
Regression test for issue #13617
Issue:
Weighted non-linear weighted fits of O-degree polynomials cause an error
to be raised by scipy.
Fix:
There should be no error raised in this circumstance
"""
model = models.Polynomial1D(0, c0=0)
fitter = fitter()
x = np.arange(10, dtype=float)
y = np.ones((10,))
weights = np.ones((10,))
fit = fitter(model, x, y)
assert_almost_equal(fit.c0, 1.0)
fit = fitter(model, x, y, weights=weights)
assert_almost_equal(fit.c0, 1.0)
|
044ff4e69518a6505938e9eeb798f54618a7983addffe5971af175e01cea362d | # Licensed under a 3-clause BSD style license - see LICENSE.rst:
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
import unittest.mock as mk
import numpy as np
# pylint: disable=invalid-name, no-member
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy.modeling.tabular as tabular_models
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import FittableModel, Model, _ModelMeta
from astropy.modeling.models import Gaussian2D
from astropy.modeling.parameters import InputParameterError, Parameter
from astropy.modeling.polynomial import PolynomialBase
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
SmoothlyBrokenPowerLaw1D,
)
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils import NumpyRNGContext, minversion
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .example_models import models_1D, models_2D
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter,
]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_custom_model(fitter, amplitude=4, frequency=1):
fitter = fitter()
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
sin_model.evaluate(x, 5.0, 2.0)
sin_model.fit_deriv(x, 5.0, 2.0)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
model = fitter(sin_model, x, data)
assert np.all(
(
np.array([model.amplitude.value, model.frequency.value])
- np.array([amplitude, frequency])
)
< 0.001
)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2.0, frequency=0.5)
assert sin_model.amplitude == 2.0
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_inconsistent_input_shapes():
g = Gaussian2D()
x = np.arange(-1.0, 1, 0.2)
y = x.copy()
# check scalar input broadcasting works
assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0
# but not array broadcasting
x.shape = (10, 1)
y.shape = (1, 10)
result = g(x, y)
assert result.shape == (10, 10)
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return (
(self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a),
)
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox.bounding_box()
dz, dy, dx = (np.diff(bbox) / 2).ravel()
z1, y1, x1 = np.mgrid[
slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1),
]
z2, y2, x2 = np.mgrid[
slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1),
]
arr = model(x2, y2, z2, with_bounding_box=True)
sub_arr = model(x1, y1, z1, with_bounding_box=True)
# check for flux agreement
assert abs(np.nansum(arr) - np.nansum(sub_arr)) < np.nansum(arr) * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, 0.1)
self.y1 = np.arange(1, 10, 0.1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters["x_values"]
y = test_parameters["y_values"]
z = test_parameters["z_values"]
assert np.all(np.abs(model(x, y) - z) < self.eval_error)
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
MESSAGE = r"No bounding box is defined for this model .*"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
# test the exception of dimensions don't match
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
return
ddx = 0.01
ylim, xlim = bbox
x1 = np.arange(xlim[0], xlim[1], ddx)
y1 = np.arange(ylim[0], ylim[1], ddx)
x2 = np.concatenate(
(
[xlim[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[xlim[1] + idx * ddx for idx in range(1, 10)],
)
)
y2 = np.concatenate(
(
[ylim[0] - idx * ddx for idx in range(10, 0, -1)],
y1,
[ylim[1] + idx * ddx for idx in range(1, 10)],
)
)
inside_bbox = model(x1, y1)
outside_bbox = model(x2, y2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box2D_peak(self, model_class, test_parameters):
if not test_parameters.pop("bbox_peak", False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
ylim, xlim = bbox
dy, dx = (np.diff(bbox) / 2).ravel()
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1), slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[
slice(ylim[0] - dy, ylim[1] + dy + 1), slice(xlim[0] - dx, xlim[1] + dx + 1)
]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitter2D(self, model_class, test_parameters, fitter):
"""Test if the parametric model works with the fitter."""
fitter = fitter()
x_lim = test_parameters["x_lim"]
y_lim = test_parameters["y_lim"]
parameters = test_parameters["parameters"]
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed])
fitted = np.array([param.value for param in params if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_deriv_2D(self, model_class, test_parameters, fitter):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
fitter = fitter()
x_lim = test_parameters["x_lim"]
y_lim = test_parameters["y_lim"]
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
x_test = np.logspace(x_lim[0], x_lim[1], self.N * 10)
y_test = np.logspace(y_lim[0], y_lim[1], self.M * 10)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
x_test = np.linspace(x_lim[0], x_lim[1], self.N * 10)
y_test = np.linspace(y_lim[0], y_lim[1], self.M * 10)
xv, yv = np.meshgrid(x, y)
xv_test, yv_test = np.meshgrid(x_test, y_test)
try:
model_with_deriv = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
model_no_deriv = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
model = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
except KeyError:
model_with_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model_no_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model = create_model(model_class, test_parameters, use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.default_rng(0)
amplitude = test_parameters["parameters"][0]
n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitter
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv, data)
fitter_no_deriv = fitter
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, xv, yv, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv(xv_test, yv_test),
new_model_no_deriv(xv_test, yv_test),
rtol=1e-2,
)
if model_class != Gaussian2D:
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, rtol=0.1
)
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
_non_finite_models = [
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
SmoothlyBrokenPowerLaw1D,
]
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.11
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, 0.1)
self.y1 = np.arange(1, 10, 0.1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters["x_values"]
y = test_parameters["y_values"]
assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
MESSAGE = r"No bounding box is defined for this model .*"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
model.bounding_box = 5
try:
bbox = model.bounding_box.bounding_box()
except NotImplementedError:
return
ddx = 0.01
x1 = np.arange(bbox[0], bbox[1], ddx)
x2 = np.concatenate(
(
[bbox[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[bbox[1] + idx * ddx for idx in range(1, 10)],
)
)
inside_bbox = model(x1)
outside_bbox = model(x2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box1D_peak(self, model_class, test_parameters):
if not test_parameters.pop("bbox_peak", False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
if isinstance(model, (models.Lorentz1D, models.Drude1D)):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
dx = (np.diff(bbox) / 2)[0]
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitter1D(self, model_class, test_parameters, fitter):
"""
Test if the parametric model works with the fitter.
"""
SCIPY_LT_1_6 = not minversion("scipy", "1.6")
if (
model_class == models.BrokenPowerLaw1D
and fitter == fitting.TRFLSQFitter
and SCIPY_LT_1_6
):
pytest.xfail(reason="TRF fitter fails for BrokenPowerLaw1D in scipy < 1.6")
fitter = fitter()
x_lim = test_parameters["x_lim"]
parameters = test_parameters["parameters"]
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = (1 + relative_noise_amplitude * np.random.randn(len(x))) * model(x)
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed])
fitted = np.array([param.value for param in params if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.parametrize("fitter", fitters)
def test_deriv_1D(self, model_class, test_parameters, fitter):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
fitter = fitter()
if model_class in self._non_finite_models:
return
x_lim = test_parameters["x_lim"]
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters["parameters"]
model_with_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model_no_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
# fmt: off
rsn_rand_1234567890 = np.array(
[
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890
]
)
# fmt: on
n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitter
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitter
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, x, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.15
)
def create_model(
model_class, test_parameters, use_constraints=True, parameter_key="parameters"
):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if "constraints" in test_parameters:
constraints = test_parameters["constraints"]
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
@pytest.mark.parametrize(
("model_class", "test_parameters"),
sorted(models_1D.items(), key=lambda x: str(x[0])),
)
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.parametrize(
("model_class", "test_parameters"),
sorted(models_2D.items(), key=lambda x: str(x[0])),
)
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False), [[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False), [[42, 84], [43, 86]])
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == "<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>"
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1.0, 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0.0, 0.7, 1.4, 2.1, 3.9]
ans1 = [1.0, 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0.0, 0.7, 1.4, 2.1, 3.9, 4.1]
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False, fill_value=None)
assert_allclose(model(xextrap), [1.0, 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points * u.nm, lookup_table=values * u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable(
[1, 2, 3],
[10, 20, 30] * u.nJy,
bounds_error=False,
fill_value=1e-33 * (u.W / (u.m * u.m * u.Hz)),
)
assert_quantity_allclose(model(np.arange(5)), [100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_interp_2d():
table = np.array(
[
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131],
]
)
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0.0, 0.7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array([-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
MESSAGE = r"Only n_models=1 is supported"
with pytest.raises(NotImplementedError, match=MESSAGE):
model = LookupTable(n_models=2)
MESSAGE = r"Must provide a lookup table"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
MESSAGE = r"lookup_table should be an array with 2 dimensions"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(lookup_table=[1, 2, 3])
MESSAGE = r"lookup_table should be an array with 2 dimensions"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
MESSAGE = r"points must all have the same unit"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
MESSAGE = r"fill value is in Jy but expected to be unitless"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(points, table, bounds_error=False, fill_value=1 * u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
MESSAGE = r"Lookup table must have at least one dimension"
with pytest.raises(ValueError, match=MESSAGE):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert p(1) == p(1, with_bounding_box=True)
t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(
t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
[[np.nan, 11], [np.nan, 14], [np.nan, 4]],
)
trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_with_bounding_box():
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t(1, with_bounding_box=True)
assert result == 3.4
assert t.inverse(result, with_bounding_box=True) == 1.0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_bounding_box_with_units():
points = np.arange(5) * u.pix
lt = np.arange(5) * u.AA
t = models.Tabular1D(points, lt)
result = t(1 * u.pix, with_bounding_box=True)
assert result == 1.0 * u.AA
assert t.inverse(result, with_bounding_box=True) == 1 * u.pix
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular1d_inverse():
"""Test that the Tabular1D inverse is defined"""
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t.inverse((3.4, 6.7))
assert_allclose(result, np.array((1.0, 2.0)))
# Check that it works for descending values in lookup_table
t2 = models.Tabular1D(points, values[::-1])
assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
result2 = t2.inverse((7, 6.7))
assert_allclose(result2, np.array((1.0, 2.0)))
# Check that it errors on double-valued lookup_table
points = np.arange(5)
values = np.array([1.5, 3.4, 3.4, 32, 25])
t = models.Tabular1D(points, values)
with pytest.raises(NotImplementedError, match=r""):
t.inverse((3.4, 7.0))
# Check that Tabular2D.inverse raises an error
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t3 = models.Tabular2D(points=points, lookup_table=table)
with pytest.raises(NotImplementedError, match=r""):
t3.inverse((3, 3))
# Check that it uses the same kwargs as the original model
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
t.inverse(100)
t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
result = t.inverse(100)
assert_allclose(t(result), 100)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_grid_shape_mismatch_error():
points = np.arange(5)
lt = np.mgrid[0:5, 0:5][0]
MESSAGE = r"Expected grid points in 2 directions, got 5."
with pytest.raises(ValueError, match=MESSAGE):
models.Tabular2D(points, lt)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_repr():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert (
repr(t)
== "<Tabular1D(points=(array([0, 1, 2, 3, 4]),), lookup_table=[0 1 2 3 4])>"
)
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert (
repr(t)
== "<Tabular2D(points=(array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])), "
"lookup_table=[[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]])>"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_str():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert (
str(t) == "Model: Tabular1D\n"
"N_inputs: 1\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]),)\n"
" lookup_table: [0 1 2 3 4]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert (
str(t) == "Model: Tabular2D\n"
"N_inputs: 2\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4]))\n"
" lookup_table: [[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_evaluate():
points = np.arange(5)
lt = np.arange(5)[::-1]
t = models.Tabular1D(points, lt)
assert (t.evaluate([1, 2, 3]) == [3, 2, 1]).all()
assert (t.evaluate(np.array([1, 2, 3]) * u.m) == [3, 2, 1]).all()
t.n_outputs = 2
value = [np.array([3, 2, 1]), np.array([1, 2, 3])]
with mk.patch.object(
tabular_models, "interpn", autospec=True, return_value=value
) as mkInterpn:
outputs = t.evaluate([1, 2, 3])
for index, output in enumerate(outputs):
assert np.all(value[index] == output)
assert mkInterpn.call_count == 1
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_module_name():
"""
The module name must be set manually because
these classes are created dynamically.
"""
for model in [models.Tabular1D, models.Tabular2D]:
assert model.__module__ == "astropy.modeling.tabular"
class classmodel(FittableModel):
f = Parameter(default=1)
x = Parameter(default=0)
y = Parameter(default=2)
def __init__(self, f=f.default, x=x.default, y=y.default):
super().__init__(f, x, y)
def evaluate(self):
pass
class subclassmodel(classmodel):
f = Parameter(default=3, fixed=True)
x = Parameter(default=10)
y = Parameter(default=12)
h = Parameter(default=5)
def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default):
super().__init__(f, x, y)
def evaluate(self):
pass
def test_parameter_inheritance():
b = subclassmodel()
assert b.param_names == ("f", "x", "y", "h")
assert b.h == 5
assert b.f == 3
assert b.f.fixed == True # noqa: E712
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_parameter_description():
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
model = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
assert model.amplitude_L._description == "The Lorentzian amplitude"
assert model.fwhm_L._description == "The Lorentzian full width at half maximum"
assert model.fwhm_G._description == "The Gaussian full width at half maximum"
def test_SmoothlyBrokenPowerLaw1D_validators():
MESSAGE = r"amplitude parameter must be > 0"
with pytest.raises(InputParameterError, match=MESSAGE):
SmoothlyBrokenPowerLaw1D(amplitude=-1)
MESSAGE = r"delta parameter must be >= 0.001"
with pytest.raises(InputParameterError, match=MESSAGE):
SmoothlyBrokenPowerLaw1D(delta=0)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
def test_SmoothlyBrokenPowerLaw1D_fit_deriv():
x_lim = [0.01, 100]
x = np.logspace(x_lim[0], x_lim[1], 100)
parameters = {
"parameters": [1, 10, -2, 2, 0.5],
"constraints": {"fixed": {"x_break": True, "delta": True}},
}
model_with_deriv = create_model(
SmoothlyBrokenPowerLaw1D, parameters, use_constraints=False
)
model_no_deriv = create_model(
SmoothlyBrokenPowerLaw1D, parameters, use_constraints=False
)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
# fmt: off
rsn_rand_1234567890 = np.array(
[
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890
]
)
# fmt: on
n = 0.1 * parameters["parameters"][0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, x, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.5
)
class _ExtendedModelMeta(_ModelMeta):
@classmethod
def __prepare__(mcls, name, bases, **kwds):
# this shows the parent class machinery still applies
namespace = super().__prepare__(name, bases, **kwds)
# the custom bit
namespace.update(kwds)
return namespace
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
def test_metaclass_kwargs():
"""Test can pass kwargs to Models"""
class ClassModel(FittableModel, flag="flag"):
def evaluate(self):
pass
# Nothing further to test, just making the class is good enough.
def test_submetaclass_kwargs():
"""Test can pass kwargs to Model subclasses."""
class ClassModel(FittableModel, metaclass=_ExtendedModelMeta, flag="flag"):
def evaluate(self):
pass
assert ClassModel.flag == "flag"
class ModelDefault(Model):
slope = Parameter()
intercept = Parameter()
_separable = False
@staticmethod
def evaluate(x, slope, intercept):
return slope * x + intercept
class ModelCustom(ModelDefault):
def _calculate_separability_matrix(self):
return np.array([[0]])
def test_custom_separability_matrix():
original = separability_matrix(ModelDefault(slope=1, intercept=2))
assert original.all()
custom = separability_matrix(ModelCustom(slope=1, intercept=2))
assert not custom.any()
|
0f52182a58503670a7bb5d34d512a0bc910a5cad62149c726d03470cf3471b0d | """Tests that models are picklable."""
from pickle import dumps, loads
import numpy as np
from numpy.testing import assert_allclose
import pytest
from astropy import units as u
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.modeling import functional_models
from astropy.modeling import mappings
from astropy.modeling import math_functions
from astropy.modeling import physical_models
from astropy.modeling import polynomial
from astropy.modeling import powerlaws
from astropy.modeling import projections
from astropy.modeling import rotations
from astropy.modeling import spline
from astropy.modeling import tabular
from astropy.modeling.math_functions import ArctanhUfunc
MATH_FUNCTIONS = (func for func in math_functions.__all__ if func != "ArctanhUfunc")
PROJ_TO_REMOVE = (
[
"Projection",
"Pix2SkyProjection",
"Sky2PixProjection",
"Zenithal",
"Conic",
"Cylindrical",
"PseudoCylindrical",
"PseudoConic",
"QuadCube",
"HEALPix",
"AffineTransformation2D",
"projcodes",
"Pix2Sky_ZenithalPerspective",
]
+ [f"Pix2Sky_{code}" for code in projections.projcodes]
+ [f"Sky2Pix_{code}" for code in projections.projcodes]
)
PROJECTIONS = (func for func in projections.__all__ if func not in PROJ_TO_REMOVE)
OTHER_MODELS = [
mappings.Mapping((1, 0)),
mappings.Identity(2),
ArctanhUfunc(),
rotations.Rotation2D(23),
tabular.Tabular1D(lookup_table=[1, 2, 3, 4]),
tabular.Tabular2D(lookup_table=[[1, 2, 3, 4], [5, 6, 7, 8]]),
]
POLYNOMIALS_1D = ["Chebyshev1D", "Hermite1D", "Legendre1D", "Polynomial1D"]
POLYNOMIALS_2D = ["Chebyshev2D", "Hermite2D", "Legendre2D", "InverseSIP"]
ROTATIONS = [
rotations.RotateCelestial2Native(12, 23, 34),
rotations.RotateNative2Celestial(12, 23, 34),
rotations.EulerAngleRotation(12, 23, 34, "xyz"),
rotations.RotationSequence3D([12, 23, 34], axes_order="xyz"),
rotations.SphericalRotationSequence([12, 23, 34], "xyz"),
rotations.Rotation2D(12),
]
@pytest.fixture()
def inputs():
return 0.3, 0.4
@pytest.fixture()
def inputs_math():
return 1, -0.5
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("model", functional_models.__all__)
def test_pickle_functional(inputs, model):
m = getattr(functional_models, model)()
mp = loads(dumps(m))
if m.n_inputs == 1:
assert_allclose(m(inputs[0]), mp(inputs[0]))
else:
assert_allclose(m(*inputs), mp(*inputs))
@pytest.mark.parametrize("model", MATH_FUNCTIONS)
def test_pickle_math_functions(inputs_math, model):
m = getattr(math_functions, model)()
mp = loads(dumps(m))
if m.n_inputs == 1:
assert_allclose(m(inputs_math[0]), mp(inputs_math[0]))
else:
assert_allclose(m(*inputs_math), mp(*inputs_math))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("m", OTHER_MODELS)
def test_pickle_other(inputs, m):
mp = loads(dumps(m))
if m.n_inputs == 1:
assert_allclose(m(inputs[0]), mp(inputs[0]))
else:
assert_allclose(m(*inputs), mp(*inputs))
def test_pickle_units_mapping(inputs):
m = mappings.UnitsMapping(((u.m, None),))
mp = loads(dumps(m))
assert_allclose(m(inputs[0] * u.km), mp(inputs[0] * u.km))
def test_pickle_affine_transformation_2D(inputs):
m = projections.AffineTransformation2D(matrix=[[1, 1], [1, 1]], translation=[1, 1])
m.matrix.fixed = True
mp = loads(dumps(m))
assert_allclose(m(*inputs), mp(*inputs))
assert m.matrix.fixed is True
@pytest.mark.parametrize("model", physical_models.__all__)
def test_pickle_physical_models(inputs, model):
m = getattr(physical_models, model)()
m1 = loads(dumps(m))
if m.n_inputs == 1:
assert_allclose(m(inputs[0]), m1(inputs[0]))
else:
assert_allclose(m(*inputs), m1(*inputs))
@pytest.mark.parametrize("model", POLYNOMIALS_1D)
def test_pickle_1D_polynomials(inputs, model):
m = getattr(polynomial, model)
m = m(2)
m1 = loads(dumps(m))
assert_allclose(m(inputs[1]), m1(inputs[0]))
@pytest.mark.parametrize("model", POLYNOMIALS_2D)
def test_pickle_2D_polynomials(inputs, model):
m = getattr(polynomial, model)
m = m(2, 3)
m1 = loads(dumps(m))
assert_allclose(m(*inputs), m1(*inputs))
def test_pickle_polynomial_2D(inputs):
# Polynomial2D is initialized with 1 degree but
# requires 2 inputs
m = polynomial.Polynomial2D
m = m(2)
m1 = loads(dumps(m))
assert_allclose(m(*inputs), m1(*inputs))
def test_pickle_sip(inputs):
m = polynomial.SIP
m = m((21, 23), 2, 3)
m1 = loads(dumps(m))
assert_allclose(m(*inputs), m1(*inputs))
@pytest.mark.parametrize("model", powerlaws.__all__)
def test_pickle_powerlaws(inputs, model):
m = getattr(powerlaws, model)()
m1 = loads(dumps(m))
if m.n_inputs == 1:
assert_allclose(m(inputs[0]), m1(inputs[0]))
else:
assert_allclose(m(*inputs), m1(*inputs))
@pytest.mark.parametrize("model", PROJECTIONS)
def test_pickle_projections(inputs, model):
m = getattr(projections, model)()
m1 = loads(dumps(m))
assert_allclose(m(*inputs), m1(*inputs))
@pytest.mark.parametrize("m", ROTATIONS)
def test_pickle_rotations(inputs, m):
mp = loads(dumps(m))
if m.n_inputs == 2:
assert_allclose(m(*inputs), mp(*inputs))
else:
assert_allclose(m(inputs[0], *inputs), mp(inputs[0], *inputs))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_pickle_spline(inputs):
def func(x, noise):
return np.exp(-(x**2)) + 0.1 * noise
noise = np.random.randn(50)
x = np.linspace(-3, 3, 50)
y = func(x, noise)
fitter = spline.SplineInterpolateFitter()
spl = spline.Spline1D(degree=3)
m = fitter(spl, x, y)
mp = loads(dumps(m))
assert_allclose(m(inputs[0]), mp(inputs[0]))
|
d9197ad30044948f61d8d330598bd336975fce08a43daa5fc3ffc7152474b59c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to using quantities/units on parameters of models.
"""
import numpy as np
import pytest
from astropy import coordinates as coord
from astropy import units as u
from astropy.modeling.core import Fittable1DModel, InputParameterError
from astropy.modeling.models import (
Const1D,
Gaussian1D,
Pix2Sky_TAN,
RotateNative2Celestial,
Rotation2D,
BlackBody,
)
from astropy.modeling.parameters import Parameter, ParameterDefinitionError
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
class BaseTestModel(Fittable1DModel):
@staticmethod
def evaluate(x, a):
return x
def test_parameter_quantity():
"""
Basic tests for initializing general models (that do not require units)
with parameters that have units attached.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
assert g.amplitude.value == 1.0
assert g.amplitude.unit is u.J
assert g.mean.value == 1.0
assert g.mean.unit is u.m
assert g.stddev.value == 0.1
assert g.stddev.unit is u.m
def test_parameter_set_quantity():
"""
Make sure that parameters that start off as quantities can be set to any
other quantity, regardless of whether the units of the new quantity are
compatible with the original ones.
We basically leave it up to the evaluate method to raise errors if there
are issues with incompatible units, and we don't check for consistency
at the parameter level.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Try equivalent units
g.amplitude = 4 * u.kJ
assert_quantity_allclose(g.amplitude, 4 * u.kJ)
g.mean = 3 * u.km
assert_quantity_allclose(g.mean, 3 * u.km)
g.stddev = 2 * u.mm
assert_quantity_allclose(g.stddev, 2 * u.mm)
# Try different units
g.amplitude = 2 * u.s
assert_quantity_allclose(g.amplitude, 2 * u.s)
g.mean = 2 * u.Jy
assert_quantity_allclose(g.mean, 2 * u.Jy)
def test_parameter_lose_units():
"""
Check that parameters that have been set to a quantity that are then set to
a value with no units raise an exception. We do this because setting a
parameter to a value with no units is ambiguous if units were set before:
if a parameter is 1 * u.Jy and the parameter is then set to 4, does this mean
2 without units, or 2 * u.Jy?
"""
g = Gaussian1D(1 * u.Jy, 3, 0.1)
MESSAGE = (
r"The .* parameter should be given as a .* because it was originally"
r" initialized as a .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.amplitude = 2
def test_parameter_add_units():
"""
On the other hand, if starting from a parameter with no units, we should be
able to add units since this is unambiguous.
"""
g = Gaussian1D(1, 3, 0.1)
g.amplitude = 2 * u.Jy
assert_quantity_allclose(g.amplitude, 2 * u.Jy)
def test_parameter_change_unit():
"""
Test that changing the unit on a parameter does not work. This is an
ambiguous operation because it's not clear if it means that the value should
be converted or if the unit should be changed without conversion.
"""
g = Gaussian1D(1, 1 * u.m, 0.1 * u.m)
# Setting a unit on a unitless parameter should not work
MESSAGE = (
r"Cannot attach units to parameters that were not initially specified with"
r" units"
)
with pytest.raises(ValueError, match=MESSAGE):
g.amplitude.unit = u.Jy
# But changing to another unit should not, even if it is an equivalent unit
MESSAGE = (
r"Cannot change the unit attribute directly, instead change the parameter to a"
r" new quantity"
)
with pytest.raises(ValueError, match=MESSAGE):
g.mean.unit = u.cm
def test_parameter_set_value():
"""
Test that changing the value on a parameter works as expected.
"""
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
# To set a parameter to a quantity, we simply do
g.amplitude = 2 * u.Jy
# If we try setting the value, we need to pass a non-quantity value
# TODO: determine whether this is the desired behavior?
g.amplitude.value = 4
assert_quantity_allclose(g.amplitude, 4 * u.Jy)
assert g.amplitude.value == 4
assert g.amplitude.unit is u.Jy
# If we try setting it to a Quantity, we raise an error
MESSAGE = (
r"The .value property on parameters should be set to unitless values, not"
r" Quantity objects.*"
)
with pytest.raises(TypeError, match=MESSAGE):
g.amplitude.value = 3 * u.Jy
def test_parameter_quantity_property():
"""
Test that the quantity property of Parameters behaves as expected
"""
# Since parameters have a .value and .unit parameter that return just the
# value and unit respectively, we also have a .quantity parameter that
# returns a Quantity instance.
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy)
# Setting a parameter to a quantity changes the value and the default unit
g.amplitude.quantity = 5 * u.mJy
assert g.amplitude.value == 5
assert g.amplitude.unit is u.mJy
# And we can also set the parameter to a value with different units
g.amplitude.quantity = 4 * u.s
assert g.amplitude.value == 4
assert g.amplitude.unit is u.s
# But not to a value without units
MESSAGE = r"The .quantity attribute should be set to a Quantity object"
with pytest.raises(TypeError, match=MESSAGE):
g.amplitude.quantity = 3
def test_parameter_default_units_match():
# If the unit and default quantity units are different, raise an error
MESSAGE = (
r"parameter default 1.0 m does not have units equivalent to the required"
r" unit Jy"
)
with pytest.raises(ParameterDefinitionError, match=MESSAGE):
class TestC(Fittable1DModel):
a = Parameter(default=1.0 * u.m, unit=u.Jy)
@pytest.mark.parametrize(("unit", "default"), ((u.m, 1.0), (None, 1 * u.m)))
def test_parameter_defaults(unit, default):
"""
Test that default quantities are correctly taken into account
"""
class TestModel(BaseTestModel):
a = Parameter(default=default, unit=unit)
# TODO: decide whether the default property should return a value or
# a quantity?
# The default unit and value should be set on the class
assert TestModel.a.unit == u.m
assert TestModel.a.default == 1.0
# Check that the default unit and value are also set on a class instance
m = TestModel()
assert m.a.unit == u.m
assert m.a.default == m.a.value == 1.0
# If the parameter is set to a different value, the default is still the
# internal default
m = TestModel(2.0 * u.m)
assert m.a.unit == u.m
assert m.a.value == 2.0
assert m.a.default == 1.0
# Instantiate with a different, but compatible unit
m = TestModel(2.0 * u.pc)
assert m.a.unit == u.pc
assert m.a.value == 2.0
# The default is still in the original units
# TODO: but how do we know what those units are if we don't return a
# quantity?
assert m.a.default == 1.0
# Initialize with a completely different unit
m = TestModel(2.0 * u.Jy)
assert m.a.unit == u.Jy
assert m.a.value == 2.0
# TODO: this illustrates why the default doesn't make sense anymore
assert m.a.default == 1.0
# Instantiating with different units works, and just replaces the original unit
MESSAGE = r".* requires a Quantity for parameter .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TestModel(1.0)
def test_parameter_quantity_arithmetic():
"""
Test that arithmetic operations with properties that have units return the
appropriate Quantities.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Addition should work if units are compatible
assert g.mean + (1 * u.m) == 2 * u.m
assert (1 * u.m) + g.mean == 2 * u.m
# Multiplication by a scalar should also preserve the quantity-ness
assert g.mean * 2 == (2 * u.m)
assert 2 * g.mean == (2 * u.m)
# Multiplication by a quantity should result in units being multiplied
assert g.mean * (2 * u.m) == (2 * (u.m**2))
assert (2 * u.m) * g.mean == (2 * (u.m**2))
# Negation should work properly too
assert -g.mean == (-1 * u.m)
assert abs(-g.mean) == g.mean
# However, addition of a quantity + scalar should not work
MESSAGE = (
r"Can only apply 'add' function to dimensionless quantities when other"
r" argument .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean + 1
with pytest.raises(UnitsError, match=MESSAGE):
1 + g.mean
def test_parameter_quantity_comparison():
"""
Basic test of comparison operations on properties with units.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Essentially here we are checking that parameters behave like Quantity
assert g.mean == 1 * u.m
assert 1 * u.m == g.mean
assert g.mean != 1
assert 1 != g.mean
assert g.mean < 2 * u.m
assert 2 * u.m > g.mean
MESSAGE = (
r"Can only apply 'less' function to dimensionless quantities when other"
r" argument .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean < 2 # noqa: B015
with pytest.raises(UnitsError, match=MESSAGE):
2 > g.mean # noqa: B015
g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m)
assert np.all(g.mean == [1, 2] * u.m)
assert np.all([1, 2] * u.m == g.mean)
assert np.all(g.mean != [1, 2])
assert np.all([1, 2] != g.mean)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean < [3, 4] # noqa: B015
with pytest.raises(UnitsError, match=MESSAGE):
[3, 4] > g.mean # noqa: B015
def test_parameters_compound_models():
Pix2Sky_TAN()
sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg)
lon_pole = 180 * u.deg
n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole)
rot = Rotation2D(23)
rot | n2c
def test_magunit_parameter():
"""Regression test for bug reproducer in issue #13133"""
unit = u.ABmag
c = -20.0 * unit
model = Const1D(c)
assert model(-23.0 * unit) == c
def test_log_getter():
"""Regression test for issue #14511"""
x = 6000 * u.AA
mdl_base = BlackBody(temperature=5000 * u.K, scale=u.Quantity(1))
class CustomBlackBody(BlackBody):
scale = Parameter(
"scale",
default=1,
bounds=(0, None),
getter=np.log,
setter=np.exp,
unit=u.dimensionless_unscaled,
)
mdl = CustomBlackBody(temperature=5000 * u.K, scale=u.Quantity(np.log(1)))
assert mdl.scale == np.log(1)
assert_quantity_allclose(mdl(x), mdl_base(x))
def test_sqrt_getter():
"""Regression test for issue #14511"""
x = 1 * u.m
mdl_base = Gaussian1D(mean=32 * u.m, stddev=3 * u.m)
class CustomGaussian1D(Gaussian1D):
mean = Parameter(
"mean",
default=1 * u.m,
bounds=(0, None),
getter=np.sqrt,
setter=np.square,
unit=u.m,
)
stddev = Parameter(
"stddev",
default=1 * u.m,
bounds=(0, None),
getter=np.sqrt,
setter=np.square,
unit=u.m,
)
mdl = CustomGaussian1D(mean=np.sqrt(32 * u.m), stddev=np.sqrt(3 * u.m))
assert mdl.mean == np.sqrt(32 * u.m)
assert (
mdl.mean._internal_value == np.sqrt(32) ** 2
) # numerical inaccuracy results in 32.00000000000001
assert mdl.mean._internal_unit == u.m
assert mdl.stddev == np.sqrt(3 * u.m)
assert (
mdl.stddev._internal_value == np.sqrt(3) ** 2
) # numerical inaccuracy results in 3.0000000000000004
assert mdl.stddev._internal_unit == u.m
assert_quantity_allclose(mdl(x), mdl_base(x))
|
bb3358b3d1377ffce68356bbce1e06ca31e2b4aa2144b48727255392f3ed40d9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for polynomial models."""
# pylint: disable=invalid-name
import os
import unittest.mock as mk
import warnings
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import conf, wcs
from astropy.io import fits
from astropy.modeling import fitting
from astropy.modeling.functional_models import Linear1D
from astropy.modeling.mappings import Identity
from astropy.modeling.polynomial import (
SIP,
Chebyshev1D,
Chebyshev2D,
Hermite1D,
Hermite2D,
Legendre1D,
Legendre2D,
OrthoPolynomialBase,
Polynomial1D,
Polynomial2D,
PolynomialBase,
)
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
linear1d = {
Chebyshev1D: {
"args": (3,),
"kwargs": {"domain": [1, 10]},
"parameters": {"c0": 1.2, "c1": 2, "c2": 2.3, "c3": 0.2},
"constraints": {"fixed": {"c0": True}},
},
Hermite1D: {
"args": (3,),
"kwargs": {"domain": [1, 10]},
"parameters": {"c0": 1.2, "c1": 2, "c2": 2.3, "c3": 0.2},
"constraints": {"fixed": {"c0": True}},
},
Legendre1D: {
"args": (3,),
"kwargs": {"domain": [1, 10]},
"parameters": {"c0": 1.2, "c1": 2, "c2": 2.3, "c3": 0.2},
"constraints": {"fixed": {"c0": True}},
},
Polynomial1D: {
"args": (3,),
"kwargs": {"domain": [1, 10]},
"parameters": {"c0": 1.2, "c1": 2, "c2": 2.3, "c3": 0.2},
"constraints": {"fixed": {"c0": True}},
},
Linear1D: {
"args": (),
"kwargs": {},
"parameters": {"intercept": 1.2, "slope": 23.1},
"constraints": {"fixed": {"intercept": True}},
},
}
linear2d = {
Chebyshev2D: {
"args": (1, 1),
"kwargs": {"x_domain": [0, 99], "y_domain": [0, 82]},
"parameters": {"c0_0": 1.2, "c1_0": 2, "c0_1": 2.3, "c1_1": 0.2},
"constraints": {"fixed": {"c0_0": True}},
},
Hermite2D: {
"args": (1, 1),
"kwargs": {"x_domain": [0, 99], "y_domain": [0, 82]},
"parameters": {"c0_0": 1.2, "c1_0": 2, "c0_1": 2.3, "c1_1": 0.2},
"constraints": {"fixed": {"c0_0": True}},
},
Legendre2D: {
"args": (1, 1),
"kwargs": {"x_domain": [0, 99], "y_domain": [0, 82]},
"parameters": {"c0_0": 1.2, "c1_0": 2, "c0_1": 2.3, "c1_1": 0.2},
"constraints": {"fixed": {"c0_0": True}},
},
Polynomial2D: {
"args": (1,),
"kwargs": {},
"parameters": {"c0_0": 1.2, "c1_0": 2, "c0_1": 2.3},
"constraints": {"fixed": {"c0_0": True}},
},
}
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter,
]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestFitting:
"""Test linear fitter with polynomial models."""
def setup_class(self):
self.N = 100
self.M = 100
self.x1 = np.linspace(1, 10, 100)
self.y2, self.x2 = np.mgrid[:100, :83]
rsn = np.random.default_rng(0)
self.n1 = rsn.standard_normal(self.x1.size) * 0.1
self.n2 = rsn.standard_normal(self.x2.size)
self.n2.shape = self.x2.shape
self.linear_fitter = fitting.LinearLSQFitter()
# TODO: Most of these test cases have some pretty repetitive setup that we
# could probably factor out
@pytest.mark.parametrize(
("model_class", "constraints"),
list(product(sorted(linear1d, key=str), (False, True))),
)
def test_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args["kwargs"])
kwargs.update(model_args["parameters"])
if constraints:
kwargs.update(model_args["constraints"])
model = model_class(*model_args["args"], **kwargs)
y1 = model(self.x1)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=r"The fit may be poorly conditioned",
category=AstropyUserWarning,
)
model_lin = self.linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
# For the constraints tests we're not checking the overall fit,
# just that the constraint was maintained
fixed = model_args["constraints"].get("fixed", None)
if fixed:
for param, value in fixed.items():
expected = model_args["parameters"][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters, atol=0.2)
@pytest.mark.parametrize(
("model_class", "constraints"),
list(product(sorted(linear1d, key=str), (False, True))),
)
@pytest.mark.parametrize("fitter", fitters)
def test_non_linear_fitter_1D(self, model_class, constraints, fitter):
"""Test fitting with non-linear LevMarLSQFitter"""
fitter = fitter()
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args["kwargs"])
kwargs.update(model_args["parameters"])
if constraints:
kwargs.update(model_args["constraints"])
model = model_class(*model_args["args"], **kwargs)
y1 = model(self.x1)
with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"):
model_nlin = fitter(model, self.x1, y1 + self.n1)
if constraints:
fixed = model_args["constraints"].get("fixed", None)
if fixed:
for param, value in fixed.items():
expected = model_args["parameters"][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters, atol=0.2)
@pytest.mark.parametrize(
("model_class", "constraints"),
list(product(sorted(linear2d, key=str), (False, True))),
)
def test_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args["kwargs"])
kwargs.update(model_args["parameters"])
if constraints:
kwargs.update(model_args["constraints"])
model = model_class(*model_args["args"], **kwargs)
z = model(self.x2, self.y2)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=r"The fit may be poorly conditioned",
category=AstropyUserWarning,
)
model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2)
if constraints:
fixed = model_args["constraints"].get("fixed", None)
if fixed:
for param, value in fixed.items():
expected = model_args["parameters"][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters, atol=0.2)
@pytest.mark.parametrize(
("model_class", "constraints"),
list(product(sorted(linear2d, key=str), (False, True))),
)
@pytest.mark.parametrize("fitter", fitters)
def test_non_linear_fitter_2D(self, model_class, constraints, fitter):
"""Test fitting with non-linear LevMarLSQFitter"""
fitter = fitter()
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args["kwargs"])
kwargs.update(model_args["parameters"])
if constraints:
kwargs.update(model_args["constraints"])
model = model_class(*model_args["args"], **kwargs)
z = model(self.x2, self.y2)
with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"):
model_nlin = fitter(model, self.x2, self.y2, z + self.n2)
if constraints:
fixed = model_args["constraints"].get("fixed", None)
if fixed:
for param, value in fixed.items():
expected = model_args["parameters"][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters, atol=0.2)
@pytest.mark.parametrize("model_class", list(list(linear1d) + list(linear2d)))
def test_polynomial_init_with_constraints(model_class):
"""
Test that polynomial models can be instantiated with constraints, but no
parameters specified.
Regression test for https://github.com/astropy/astropy/issues/3606
"""
# Just determine which parameter to place a constraint on; it doesn't
# matter which parameter it is to exhibit the problem so long as it's a
# valid parameter for the model
if "1D" in model_class.__name__:
param = "c0"
else:
param = "c0_0"
if issubclass(model_class, Linear1D):
param = "intercept"
if issubclass(model_class, OrthoPolynomialBase):
degree = (2, 2)
else:
degree = (2,)
m = model_class(*degree, fixed={param: True})
assert m.fixed[param] is True
assert getattr(m, param).fixed is True
if issubclass(model_class, OrthoPolynomialBase):
assert (
repr(m)
== f"<{model_class.__name__}(2, 2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., "
"c1_1=0., c2_1=0., c0_2=0., c1_2=0., c2_2=0.)>"
)
assert (
str(m) == f"Model: {model_class.__name__}\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"X_Degree: 2\n"
"Y_Degree: 2\n"
"Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c1_1 c2_1 c0_2 c1_2 c2_2\n"
" ---- ---- ---- ---- ---- ---- ---- ---- ----\n"
" 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0"
)
else:
if model_class.__name__ == "Polynomial2D":
assert (
repr(m) == "<Polynomial2D(2, c0_0=0., c1_0=0., c2_0=0., "
"c0_1=0., c0_2=0., c1_1=0.)>"
)
assert (
str(m) == "Model: Polynomial2D\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Degree: 2\n"
"Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1\n"
" ---- ---- ---- ---- ---- ----\n"
" 0.0 0.0 0.0 0.0 0.0 0.0"
)
elif model_class.__name__ == "Linear1D":
assert repr(m) == "<Linear1D(slope=2., intercept=0.)>"
assert (
str(m) == "Model: Linear1D\n"
"Inputs: ('x',)\n"
"Outputs: ('y',)\n"
"Model set size: 1\n"
"Parameters:\n"
" slope intercept\n"
" ----- ---------\n"
" 2.0 0.0"
)
else:
assert repr(m) == f"<{model_class.__name__}(2, c0=0., c1=0., c2=0.)>"
assert (
str(m) == f"Model: {model_class.__name__}\n"
"Inputs: ('x',)\n"
"Outputs: ('y',)\n"
"Model set size: 1\n"
"Degree: 2\n"
"Parameters:\n"
" c0 c1 c2\n"
" --- --- ---\n"
" 0.0 0.0 0.0"
)
def test_sip_hst():
"""Test SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join("data", "hst_sip.hdr"))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr["CRPIX1"]
crpix2 = hdr["CRPIX2"]
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr["A_*"])
b_pars = dict(**hdr["B_*"])
a_order = a_pars.pop("A_ORDER")
b_order = b_pars.pop("B_ORDER")
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars)
coords = [1, 1]
rel_coords = [1 - crpix1, 1 - crpix2]
astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords
assert_allclose(sip(1, 1), astwcs_result)
# Test changing of inputs and calling it with keyword argumenrts.
sip.inputs = ("r", "t")
assert_allclose(sip(r=1, t=1), astwcs_result)
assert_allclose(sip(1, t=1), astwcs_result)
# Test representations
assert (
repr(sip) == "<SIP([<Shift(offset=-2048.)>, <Shift(offset=-1024.)>, "
"<_SIP1D(4, 'A', A_2_0=0.00000855, A_3_0=-0., A_4_0=0., A_0_2=0.00000217, "
"A_0_3=0., A_0_4=0., A_1_1=-0.0000052, A_1_2=-0., A_1_3=-0., "
"A_2_1=-0., A_2_2=0., A_3_1=0.)>, "
"<_SIP1D(4, 'B', B_2_0=-0.00000175, B_3_0=0., B_4_0=-0., B_0_2=-0.00000722, "
"B_0_3=-0., B_0_4=-0., B_1_1=0.00000618, B_1_2=-0., B_1_3=0., "
"B_2_1=-0., B_2_2=-0., B_3_1=-0.)>])>"
)
with conf.set_temp("max_width", 80):
# fmt: off
assert str(sip) == (
"Model: SIP\n"
" Model: Shift\n"
" Inputs: ('x',)\n"
" Outputs: ('y',)\n"
" Model set size: 1\n"
" Parameters:\n"
" offset\n"
" -------\n"
" -2048.0\n"
"\n"
" Model: Shift\n"
" Inputs: ('x',)\n"
" Outputs: ('y',)\n"
" Model set size: 1\n"
" Parameters:\n"
" offset\n"
" -------\n"
" -1024.0\n"
"\n"
" Model: _SIP1D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Order: 4\n"
" Coeff. Prefix: A\n"
" Parameters:\n"
" A_2_0 A_3_0 ... A_3_1 \n"
" --------------------- ---------------------- ... ---------------------\n"
" 8.551277582556502e-06 -4.730444829222791e-10 ... 1.971022971660309e-15\n"
"\n"
" Model: _SIP1D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Order: 4\n"
" Coeff. Prefix: B\n"
" Parameters:\n"
" B_2_0 B_3_0 ... B_3_1 \n"
" ---------------------- --------------------- ... ----------------------\n"
" -1.746491877058669e-06 8.567635427816317e-11 ... -3.779506805487476e-15\n"
)
# fmt: on
# Test get num of coeffs
assert sip.sip1d_a.get_num_coeff(1) == 6
# Test error
MESSAGE = "Degree of polynomial must be 2< deg < 9"
sip.sip1d_a.order = 1
with pytest.raises(ValueError, match=MESSAGE):
sip.sip1d_a.get_num_coeff(1)
sip.sip1d_a.order = 10
with pytest.raises(ValueError, match=MESSAGE):
sip.sip1d_a.get_num_coeff(1)
def test_sip_irac():
"""Test forward and inverse SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join("data", "irac_sip.hdr"))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr["CRPIX1"]
crpix2 = hdr["CRPIX2"]
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr["A_*"])
b_pars = dict(**hdr["B_*"])
ap_pars = dict(**hdr["AP_*"])
bp_pars = dict(**hdr["BP_*"])
a_order = a_pars.pop("A_ORDER")
b_order = b_pars.pop("B_ORDER")
ap_order = ap_pars.pop("AP_ORDER")
bp_order = bp_pars.pop("BP_ORDER")
del a_pars["A_DMAX"]
del b_pars["B_DMAX"]
pix = [200, 200]
rel_pix = [200 - crpix1, 200 - crpix2]
sip = SIP(
[crpix1, crpix2],
a_order,
b_order,
a_pars,
b_pars,
ap_order=ap_order,
ap_coeff=ap_pars,
bp_order=bp_order,
bp_coeff=bp_pars,
)
foc = wobj.sip_pix2foc([pix], 1)
newpix = wobj.sip_foc2pix(foc, 1)[0]
assert_allclose(sip(*pix), foc[0] - rel_pix)
assert_allclose(sip.inverse(*foc[0]) + foc[0] - rel_pix, newpix - pix)
# Test inverse representations
assert (
repr(sip.inverse)
== "<InverseSIP([<Polynomial2D(2, c0_0=0., c1_0=0.0000114, c2_0=0.00002353, "
"c0_1=-0.00000546, c0_2=-0.00000667, c1_1=-0.00001801)>, "
"<Polynomial2D(2, c0_0=0., c1_0=-0.00001495, c2_0=0.00000122, c0_1=0.00001975, "
"c0_2=-0.00002601, c1_1=0.00002944)>])>"
)
assert (
str(sip.inverse) == "Model: InverseSIP\n"
" Model: Polynomial2D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Degree: 2\n"
" Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n"
" ---- -------- --------- ---------- ---------- ----------\n"
" 0.0 1.14e-05 2.353e-05 -5.463e-06 -6.666e-06 -1.801e-05\n"
"\n"
" Model: Polynomial2D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Degree: 2\n"
" Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n"
" ---- ---------- --------- --------- ---------- ---------\n"
" 0.0 -1.495e-05 1.225e-06 1.975e-05 -2.601e-05 2.944e-05\n"
)
def test_sip_no_coeff():
sip = SIP([10, 12], 2, 2)
assert_allclose(sip.sip1d_a.parameters, [0.0, 0.0, 0])
assert_allclose(sip.sip1d_b.parameters, [0.0, 0.0, 0])
MESSAGE = r"SIP inverse coefficients are not available"
with pytest.raises(NotImplementedError, match=MESSAGE):
sip.inverse
# Test model set
sip = SIP([10, 12], 2, 2, n_models=2)
assert sip.sip1d_a.model_set_axis == 0
assert sip.sip1d_b.model_set_axis == 0
@pytest.mark.parametrize(
"cls",
(Polynomial1D, Chebyshev1D, Legendre1D, Polynomial2D, Chebyshev2D, Legendre2D),
)
def test_zero_degree_polynomial(cls):
"""
A few tests that degree=0 polynomials are correctly evaluated and
fitted.
Regression test for https://github.com/astropy/astropy/pull/3589
"""
MESSAGE = "Degree of polynomial must be positive or null"
if cls.n_inputs == 1: # Test 1D polynomials
p1 = cls(degree=0, c0=1)
assert p1(0) == 1
assert np.all(p1(np.zeros(5)) == np.ones(5))
x = np.linspace(0, 1, 100)
# Add a little noise along a straight line
y = 1 + np.random.uniform(0, 0.1, len(x))
p1_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p1_fit = fitter(p1_init, x, y)
# The fit won't be exact of course, but it should get close to within
# 1%
assert_allclose(p1_fit.c0, 1, atol=0.10)
# Error from negative degree
with pytest.raises(ValueError, match=MESSAGE):
cls(degree=-1)
elif cls.n_inputs == 2: # Test 2D polynomials
if issubclass(cls, OrthoPolynomialBase):
p2 = cls(x_degree=0, y_degree=0, c0_0=1)
# different shaped x and y inputs
a = np.array([1, 2, 3])
b = np.array([1, 2])
with mk.patch.object(
PolynomialBase,
"prepare_inputs",
autospec=True,
return_value=((a, b), mk.MagicMock()),
):
with pytest.raises(
ValueError, match=r"Expected input arrays to have the same shape"
):
p2.prepare_inputs(mk.MagicMock(), mk.MagicMock())
# Error from negative degree
with pytest.raises(ValueError, match=MESSAGE):
cls(x_degree=-1, y_degree=0)
with pytest.raises(ValueError, match=MESSAGE):
cls(x_degree=0, y_degree=-1)
else:
p2 = cls(degree=0, c0_0=1)
# Error from negative degree
with pytest.raises(ValueError, match=MESSAGE):
cls(degree=-1)
assert p2(0, 0) == 1
assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))
y, x = np.mgrid[0:1:100j, 0:1:100j]
z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)
if issubclass(cls, OrthoPolynomialBase):
p2_init = cls(x_degree=0, y_degree=0)
else:
p2_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p2_fit = fitter(p2_init, x, y, z)
assert_allclose(p2_fit.c0_0, 1, atol=0.10)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_2d_orthopolynomial_in_compound_model(fitter):
"""
Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get
evaluated & fitted correctly when part of a compound model.
Regression test for https://github.com/astropy/astropy/pull/6085.
"""
fitter = fitter()
y, x = np.mgrid[0:5, 0:5]
z = x + y
simple_model = Chebyshev2D(2, 2)
with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"):
simple_fit = fitter(simple_model, x, y, z)
compound_model = Identity(2) | Chebyshev2D(2, 2)
compound_model.fittable = True
compound_model.linear = True
with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"):
compound_fit = fitter(compound_model, x, y, z)
assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-11)
def test_Hermite1D_clenshaw():
model = Hermite1D(degree=2)
assert model.clenshaw(1, [3]) == 3
assert model.clenshaw(1, [3, 4]) == 11
assert model.clenshaw(1, [3, 4, 5]) == 21
assert model.clenshaw(1, [3, 4, 5, 6]) == -3
def test__fcache():
model = OrthoPolynomialBase(x_degree=2, y_degree=2)
MESSAGE = r"Subclasses should implement this"
with pytest.raises(NotImplementedError, match=MESSAGE):
model._fcache(np.asanyarray(1), np.asanyarray(1))
model = Hermite2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == {
0: np.asanyarray(1),
1: 2,
3: np.asanyarray(1),
4: 2,
2: 2.0,
5: -4.0,
}
model = Legendre2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == {
0: np.asanyarray(1),
1: np.asanyarray(1),
2: 1.0,
3: np.asanyarray(1),
4: np.asanyarray(1),
5: 1.0,
}
model = Chebyshev2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == {
0: np.asanyarray(1),
1: np.asanyarray(1),
2: 1.0,
3: np.asanyarray(1),
4: np.asanyarray(1),
5: 1.0,
}
def test_fit_deriv_shape_error():
model = Hermite2D(x_degree=2, y_degree=2)
MESSAGE = r"x and y must have the same shape"
with pytest.raises(ValueError, match=MESSAGE):
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
model = Chebyshev2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError, match=MESSAGE):
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
model = Legendre2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError, match=MESSAGE):
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
model = Polynomial2D(degree=2)
MESSAGE = r"Expected x and y to be of equal size"
with pytest.raises(ValueError, match=MESSAGE):
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
|
cc1f7e3fd671b279335e43fad1d642c5581560ae359cbbd3a9a1337479fdb51e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import unittest.mock as mk
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import SpectralCoord
from astropy.modeling.bounding_box import (
CompoundBoundingBox,
ModelBoundingBox,
_BaseInterval,
_BaseSelectorArgument,
_BoundingDomain,
_ignored_interval,
_Interval,
_SelectorArgument,
_SelectorArguments,
)
from astropy.modeling.core import Model, fix_inputs
from astropy.modeling.models import (
Gaussian1D,
Gaussian2D,
Identity,
Polynomial2D,
Scale,
Shift,
)
class Test_Interval:
def test_create(self):
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
assert isinstance(interval, _BaseInterval)
assert interval.lower == lower
assert interval.upper == upper
assert interval == (lower, upper)
assert interval.__repr__() == f"Interval(lower={lower}, upper={upper})"
def test_copy(self):
interval = _Interval(0.5, 1.5)
copy = interval.copy()
assert interval == copy
assert id(interval) != id(copy)
# Same float values have will have same id
assert interval.lower == copy.lower
assert id(interval.lower) == id(copy.lower)
# Same float values have will have same id
assert interval.upper == copy.upper
assert id(interval.upper) == id(copy.upper)
def test__validate_shape(self):
MESSAGE = r"An interval must be some sort of sequence of length 2"
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
# Passes (2,)
interval._validate_shape((1, 2))
interval._validate_shape([1, 2])
interval._validate_shape((1 * u.m, 2 * u.m))
interval._validate_shape([1 * u.m, 2 * u.m])
# Passes (1, 2)
interval._validate_shape(((1, 2),))
interval._validate_shape(([1, 2],))
interval._validate_shape([(1, 2)])
interval._validate_shape([[1, 2]])
interval._validate_shape(((1 * u.m, 2 * u.m),))
interval._validate_shape(([1 * u.m, 2 * u.m],))
interval._validate_shape([(1 * u.m, 2 * u.m)])
interval._validate_shape([[1 * u.m, 2 * u.m]])
# Passes (2, 0)
interval._validate_shape((mk.MagicMock(), mk.MagicMock()))
interval._validate_shape([mk.MagicMock(), mk.MagicMock()])
# Passes with array inputs:
interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
interval._validate_shape(
(np.array([-2.5, -3.5, -4.5]), np.array([2.5, 3.5, 4.5]))
)
# Fails shape (no units)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((1, 2, 3))
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([1, 2, 3])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(1)
# Fails shape (units)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((1 * u.m, 2 * u.m, 3 * u.m))
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([1 * u.m, 2 * u.m, 3 * u.m])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(
[[1 * u.m, 2 * u.m, 3 * u.m], [4 * u.m, 5 * u.m, 6 * u.m]]
)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(1 * u.m)
# Fails shape (arrays):
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(
(np.array([-2.5, -3.5]), np.array([2.5, 3.5]), np.array([3, 4]))
)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5]))
def test__validate_bounds(self):
# Passes
assert _Interval._validate_bounds(1, 2) == (1, 2)
assert _Interval._validate_bounds(1 * u.m, 2 * u.m) == (1 * u.m, 2 * u.m)
interval = _Interval._validate_bounds(
np.array([-2.5, -3.5]), np.array([2.5, 3.5])
)
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
# Fails
with pytest.warns(
RuntimeWarning,
match=r"Invalid interval: upper bound 1 is strictly "
r"less than lower bound 2\.",
):
_Interval._validate_bounds(2, 1)
with pytest.warns(
RuntimeWarning,
match=r"Invalid interval: upper bound 1\.0 m is strictly "
r"less than lower bound 2\.0 m\.",
):
_Interval._validate_bounds(2 * u.m, 1 * u.m)
def test_validate(self):
# Passes
assert _Interval.validate((1, 2)) == (1, 2)
assert _Interval.validate([1, 2]) == (1, 2)
assert _Interval.validate((1 * u.m, 2 * u.m)) == (1 * u.m, 2 * u.m)
assert _Interval.validate([1 * u.m, 2 * u.m]) == (1 * u.m, 2 * u.m)
assert _Interval.validate(((1, 2),)) == (1, 2)
assert _Interval.validate(([1, 2],)) == (1, 2)
assert _Interval.validate([(1, 2)]) == (1, 2)
assert _Interval.validate([[1, 2]]) == (1, 2)
assert _Interval.validate(((1 * u.m, 2 * u.m),)) == (1 * u.m, 2 * u.m)
assert _Interval.validate(([1 * u.m, 2 * u.m],)) == (1 * u.m, 2 * u.m)
assert _Interval.validate([(1 * u.m, 2 * u.m)]) == (1 * u.m, 2 * u.m)
assert _Interval.validate([[1 * u.m, 2 * u.m]]) == (1 * u.m, 2 * u.m)
interval = _Interval.validate((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
interval = _Interval.validate(
(np.array([-2.5, -3.5, -4.5]), np.array([2.5, 3.5, 4.5]))
)
assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all()
assert (interval.upper == np.array([2.5, 3.5, 4.5])).all()
# Fail shape
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
_Interval.validate((1, 2, 3))
# Fail bounds
with pytest.warns(RuntimeWarning):
_Interval.validate((2, 1))
def test_outside(self):
interval = _Interval.validate((0, 1))
# fmt: off
assert (
interval.outside(np.linspace(-1, 2, 13))
== [
True, True, True, True,
False, False, False, False, False,
True, True, True, True
]
).all()
# fmt: on
def test_domain(self):
interval = _Interval.validate((0, 1))
assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all()
def test__ignored_interval(self):
assert _ignored_interval.lower == -np.inf
assert _ignored_interval.upper == np.inf
for num in [0, -1, -100, 3.14, 10**100, -(10**100)]:
assert not num < _ignored_interval[0]
assert num > _ignored_interval[0]
assert not num > _ignored_interval[1]
assert num < _ignored_interval[1]
assert not (_ignored_interval.outside(np.array([num]))).all()
def test_validate_with_SpectralCoord(self):
"""Regression test for issue #12439"""
lower = SpectralCoord(1, u.um)
upper = SpectralCoord(10, u.um)
interval = _Interval.validate((lower, upper))
assert interval.lower == lower
assert interval.upper == upper
class Test_BoundingDomain:
def setup_method(self):
class BoundingDomain(_BoundingDomain):
def fix_inputs(self, model, fix_inputs):
super().fix_inputs(model, fixed_inputs=fix_inputs)
def prepare_inputs(self, input_shape, inputs):
super().prepare_inputs(input_shape, inputs)
self.BoundingDomain = BoundingDomain
def test_create(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "C"
bounding_box = self.BoundingDomain(model, order="F")
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "F"
bounding_box = self.BoundingDomain(Gaussian2D(), ["x"])
assert bounding_box._ignored == [0]
assert bounding_box._order == "C"
# Error
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
self.BoundingDomain(model, order=mk.MagicMock())
def test_model(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box.model == model
def test_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock(), order="C")
assert bounding_box._order == "C"
assert bounding_box.order == "C"
bounding_box = self.BoundingDomain(mk.MagicMock(), order="F")
assert bounding_box._order == "F"
assert bounding_box.order == "F"
bounding_box._order = "test"
assert bounding_box.order == "test"
def test_ignored(self):
ignored = [0]
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = self.BoundingDomain(model, ignored=ignored)
assert bounding_box._ignored == ignored
assert bounding_box.ignored == ignored
def test__get_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Success (default 'C')
assert bounding_box._order == "C"
assert bounding_box._get_order() == "C"
assert bounding_box._get_order("C") == "C"
assert bounding_box._get_order("F") == "F"
# Success (default 'F')
bounding_box._order = "F"
assert bounding_box._order == "F"
assert bounding_box._get_order() == "F"
assert bounding_box._get_order("C") == "C"
assert bounding_box._get_order("F") == "F"
# Error
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_order(mk.MagicMock())
def test__get_index(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass input name
assert bounding_box._get_index("x") == 0
assert bounding_box._get_index("y") == 1
# Pass invalid input name
MESSAGE = r"'z' is not one of the inputs: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_index("z")
# Pass valid index
assert bounding_box._get_index(0) == 0
assert bounding_box._get_index(1) == 1
assert bounding_box._get_index(np.int32(0)) == 0
assert bounding_box._get_index(np.int32(1)) == 1
assert bounding_box._get_index(np.int64(0)) == 0
assert bounding_box._get_index(np.int64(1)) == 1
# Pass invalid index
MESSAGE = r"Integer key: .* must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(2)
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(np.int32(2))
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(np.int64(2))
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(-1)
# Pass invalid key
MESSAGE = r"Key value: .* must be string or integer"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_index(mk.MagicMock())
def test__get_name(self):
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = self.BoundingDomain(model)
index = mk.MagicMock()
name = mk.MagicMock()
model.inputs = mk.MagicMock()
model.inputs.__getitem__.return_value = name
assert bounding_box._get_name(index) == name
assert model.inputs.__getitem__.call_args_list == [mk.call(index)]
def test_ignored_inputs(self):
model = mk.MagicMock()
ignored = list(range(4, 8))
model.n_inputs = 8
model.inputs = [mk.MagicMock() for _ in range(8)]
bounding_box = self.BoundingDomain(model, ignored=ignored)
inputs = bounding_box.ignored_inputs
assert isinstance(inputs, list)
for index, _input in enumerate(inputs):
assert _input in model.inputs
assert model.inputs[index + 4] == _input
for index, _input in enumerate(model.inputs):
if _input in inputs:
assert inputs[index - 4] == _input
else:
assert index < 4
def test__validate_ignored(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass
assert bounding_box._validate_ignored(None) == []
assert bounding_box._validate_ignored(["x", "y"]) == [0, 1]
assert bounding_box._validate_ignored([0, 1]) == [0, 1]
assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]
# Fail
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
bounding_box._validate_ignored([mk.MagicMock()])
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
bounding_box._validate_ignored(["z"])
MESSAGE = r"Integer key: 3 must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([3])
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([np.int32(3)])
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([np.int64(3)])
def test___call__(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
MESSAGE = (
r"This bounding box is fixed by the model and does not have adjustable"
r" parameters"
)
with pytest.raises(RuntimeError, match=MESSAGE):
bounding_box(*args, **kwargs)
def test_fix_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
model = mk.MagicMock()
fixed_inputs = mk.MagicMock()
with pytest.raises(
NotImplementedError, match=r"This should be implemented by a child class"
):
bounding_box.fix_inputs(model, fixed_inputs)
def test__prepare_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
with pytest.raises(
NotImplementedError,
match=r"This has not been implemented for BoundingDomain",
):
bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())
def test__base_ouput(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Simple shape
input_shape = (13,)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
# Complex shape
input_shape = (13, 7)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
def test__all_out_output(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
# Simple shape
model.n_outputs = 1
input_shape = (13,)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (1, 13)
assert output_unit is None
# Complex shape
model.n_outputs = 6
input_shape = (13, 7)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (6, 13, 7)
assert output_unit is None
def test__modify_output(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
# Simple shape
with mk.patch.object(
_BoundingDomain,
"_base_output",
autospec=True,
return_value=np.asanyarray(0),
) as mkBase:
assert (
np.array([1, 2, 3])
== bounding_box._modify_output(
[1, 2, 3], valid_index, input_shape, fill_value
)
).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
# Replacement
with mk.patch.object(
_BoundingDomain,
"_base_output",
autospec=True,
return_value=np.array([1, 2, 3, 4, 5, 6]),
) as mkBase:
assert (
np.array([7, 2, 8, 4, 9, 6])
== bounding_box._modify_output(
[7, 8, 9], np.array([[0, 2, 4]]), input_shape, fill_value
)
).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
def test__prepare_outputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
valid_outputs = [mk.MagicMock() for _ in range(3)]
effects = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(
_BoundingDomain, "_modify_output", autospec=True, side_effect=effects
) as mkModify:
assert effects == bounding_box._prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkModify.call_args_list == [
mk.call(
bounding_box,
valid_outputs[idx],
valid_index,
input_shape,
fill_value,
)
for idx in range(3)
]
def test_prepare_outputs(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
valid_outputs = mk.MagicMock()
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with mk.patch.object(
_BoundingDomain, "_prepare_outputs", autospec=True
) as mkPrepare:
# Reshape valid_outputs
model.n_outputs = 1
assert mkPrepare.return_value == bounding_box.prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box, [valid_outputs], valid_index, input_shape, fill_value
)
]
mkPrepare.reset_mock()
# No reshape valid_outputs
model.n_outputs = 2
assert mkPrepare.return_value == bounding_box.prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box, valid_outputs, valid_index, input_shape, fill_value
)
]
def test__get_valid_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Don't get unit
assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None
# Get unit from unitless
assert bounding_box._get_valid_outputs_unit(7, True) is None
# Get unit
assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m
def test__evaluate_model(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
valid_inputs = mk.MagicMock()
input_shape = mk.MagicMock()
valid_index = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
with mk.patch.object(
_BoundingDomain, "_get_valid_outputs_unit", autospec=True
) as mkGet:
with mk.patch.object(
_BoundingDomain, "prepare_outputs", autospec=True
) as mkPrepare:
assert bounding_box._evaluate_model(
evaluate,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units,
) == (mkPrepare.return_value, mkGet.return_value)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box,
evaluate.return_value,
valid_index,
input_shape,
fill_value,
)
]
assert mkGet.call_args_list == [
mk.call(evaluate.return_value, with_units)
]
assert evaluate.call_args_list == [mk.call(valid_inputs)]
def test__evaluate(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
valid_inputs = mk.MagicMock()
valid_index = mk.MagicMock()
effects = [
(valid_inputs, valid_index, True),
(valid_inputs, valid_index, False),
]
with mk.patch.object(
self.BoundingDomain, "prepare_inputs", autospec=True, side_effect=effects
) as mkPrepare:
with mk.patch.object(
_BoundingDomain, "_all_out_output", autospec=True
) as mkAll:
with mk.patch.object(
_BoundingDomain, "_evaluate_model", autospec=True
) as mkEvaluate:
# all_out
assert (
bounding_box._evaluate(
evaluate, inputs, input_shape, fill_value, with_units
)
== mkAll.return_value
)
assert mkAll.call_args_list == [
mk.call(bounding_box, input_shape, fill_value)
]
assert mkEvaluate.call_args_list == []
assert mkPrepare.call_args_list == [
mk.call(bounding_box, input_shape, inputs)
]
mkAll.reset_mock()
mkPrepare.reset_mock()
# not all_out
assert (
bounding_box._evaluate(
evaluate, inputs, input_shape, fill_value, with_units
)
== mkEvaluate.return_value
)
assert mkAll.call_args_list == []
assert mkEvaluate.call_args_list == [
mk.call(
bounding_box,
evaluate,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units,
)
]
assert mkPrepare.call_args_list == [
mk.call(bounding_box, input_shape, inputs)
]
def test__set_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# set no unit
assert bounding_box._set_outputs_unit(27, None) == 27
# set unit
assert bounding_box._set_outputs_unit(27, u.m) == 27 * u.m
def test_evaluate(self):
bounding_box = self.BoundingDomain(Gaussian2D())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
fill_value = mk.MagicMock()
outputs = mk.MagicMock()
valid_outputs_unit = mk.MagicMock()
value = (outputs, valid_outputs_unit)
with mk.patch.object(
_BoundingDomain, "_evaluate", autospec=True, return_value=value
) as mkEvaluate:
with mk.patch.object(
_BoundingDomain, "_set_outputs_unit", autospec=True
) as mkSet:
with mk.patch.object(Model, "input_shape", autospec=True) as mkShape:
with mk.patch.object(
Model, "bbox_with_units", new_callable=mk.PropertyMock
) as mkUnits:
assert tuple(mkSet.return_value) == bounding_box.evaluate(
evaluate, inputs, fill_value
)
assert mkSet.call_args_list == [
mk.call(outputs, valid_outputs_unit)
]
assert mkEvaluate.call_args_list == [
mk.call(
bounding_box,
evaluate,
inputs,
mkShape.return_value,
fill_value,
mkUnits.return_value,
)
]
assert mkShape.call_args_list == [
mk.call(bounding_box._model, inputs)
]
assert mkUnits.call_args_list == [mk.call()]
class TestModelBoundingBox:
def test_create(self):
intervals = ()
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "C"
# Set optional
intervals = {}
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model, order="F")
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "F"
# Set interval
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
# Set ignored
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 2
model.inputs = ["x", "y"]
bounding_box = ModelBoundingBox(intervals, model, ignored=[1])
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
assert bounding_box._ignored == [1]
intervals = ((1, 2), (3, 4))
model = mk.MagicMock()
model.n_inputs = 3
model.inputs = ["x", "y", "z"]
bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order="F")
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)}
assert bounding_box._model == model
assert bounding_box._ignored == [2]
assert bounding_box._order == "F"
def test_copy(self):
bounding_box = ModelBoundingBox.validate(
Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4))
)
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
assert bounding_box.ignored == copy.ignored
assert id(bounding_box.ignored) != id(copy.ignored)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
# Check interval objects
for index, interval in bounding_box.intervals.items():
assert interval == copy.intervals[index]
assert id(interval) != id(copy.intervals[index])
# Same float values have will have same id
assert interval.lower == copy.intervals[index].lower
assert id(interval.lower) == id(copy.intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.intervals[index].upper
assert id(interval.upper) == id(copy.intervals[index].upper)
assert len(bounding_box.intervals) == len(copy.intervals)
assert bounding_box.intervals.keys() == copy.intervals.keys()
def test_intervals(self):
intervals = {0: _Interval(1, 2)}
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = ModelBoundingBox(intervals, model)
assert bounding_box._intervals == intervals
assert bounding_box.intervals == intervals
def test_named_intervals(self):
intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)}
model = mk.MagicMock()
model.n_inputs = 4
model.inputs = [mk.MagicMock() for _ in range(4)]
bounding_box = ModelBoundingBox(intervals, model)
named = bounding_box.named_intervals
assert isinstance(named, dict)
for name, interval in named.items():
assert name in model.inputs
assert intervals[model.inputs.index(name)] == interval
for index, name in enumerate(model.inputs):
assert index in intervals
assert name in named
assert intervals[index] == named[name]
def test___repr__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert (
bounding_box.__repr__() == "ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" y: Interval(lower=-4, upper=4)\n"
" }\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
intervals = {0: _Interval(-1, 1)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=["y"])
assert (
bounding_box.__repr__() == "ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['y']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
def test___len__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert len(bounding_box) == 0 == len(bounding_box._intervals)
def test___contains__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Contains with keys
assert "x" in bounding_box
assert "y" in bounding_box
assert "z" not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
# General not in
assert mk.MagicMock() not in bounding_box
# Contains with ignored
del bounding_box["y"]
# Contains with keys
assert "x" in bounding_box
assert "y" in bounding_box
assert "z" not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
def test___getitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Get using input key
assert bounding_box["x"] == (-1, 1)
assert bounding_box["y"] == (-4, 4)
# Fail with input key
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
bounding_box["z"]
# Get using index
assert bounding_box[0] == (-1, 1)
assert bounding_box[1] == (-4, 4)
assert bounding_box[np.int32(0)] == (-1, 1)
assert bounding_box[np.int32(1)] == (-4, 4)
assert bounding_box[np.int64(0)] == (-1, 1)
assert bounding_box[np.int64(1)] == (-4, 4)
# Fail with index
MESSAGE = r"Integer key: 2 must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[2]
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[np.int32(2)]
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[np.int64(2)]
# get ignored interval
del bounding_box[0]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == (-4, 4)
del bounding_box[1]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == _ignored_interval
def test_bounding_box(self):
# 0D
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=["x"])
assert bounding_box.bounding_box() == (-np.inf, np.inf)
assert bounding_box.bounding_box("C") == (-np.inf, np.inf)
assert bounding_box.bounding_box("F") == (-np.inf, np.inf)
# 1D
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == (-1, 1)
assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1)
# > 1D
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box("C") == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box("F") == ((-1, 1), (-4, 4))
def test___eq__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == bounding_box
assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == (-1, 1)
assert not (bounding_box == mk.MagicMock())
assert not (bounding_box == (-2, 2))
assert not (
bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)})
)
# Respect ordering
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box_1 = ModelBoundingBox.validate(model, intervals)
bounding_box_2 = ModelBoundingBox.validate(model, intervals, order="F")
assert bounding_box_1._order == "C"
assert bounding_box_1 == ((-4, 4), (-1, 1))
assert not (bounding_box_1 == ((-1, 1), (-4, 4)))
assert bounding_box_2._order == "F"
assert not (bounding_box_2 == ((-4, 4), (-1, 1)))
assert bounding_box_2 == ((-1, 1), (-4, 4))
assert bounding_box_1 == bounding_box_2
# Respect ignored
model = Gaussian2D()
bounding_box_1._ignored = [mk.MagicMock()]
bounding_box_2._ignored = [mk.MagicMock()]
assert bounding_box_1._ignored != bounding_box_2._ignored
assert not (bounding_box_1 == bounding_box_2)
def test__setitem__(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1])
assert bounding_box._ignored == [0, 1]
# USING Intervals directly
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box["x"] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box["x"], _Interval)
assert bounding_box["x"] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box["y"] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box["y"], _Interval)
assert bounding_box["y"] == (-4, 4)
del bounding_box["x"]
del bounding_box["y"]
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
del bounding_box[0]
del bounding_box[1]
# USING tuples
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box["x"] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box["x"], _Interval)
assert bounding_box["x"] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box["y"] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box["y"], _Interval)
assert bounding_box["y"] == (-4, 4)
del bounding_box["x"]
del bounding_box["y"]
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# USING Intervals directly
# Set interval using key
assert "x" not in bounding_box
bounding_box["x"] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert "x" in bounding_box
assert isinstance(bounding_box["x"], _Interval)
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
# USING tuples
# Set interval using key
bounding_box._intervals = {}
assert "x" not in bounding_box
bounding_box["x"] = (np.array([-1, -2]), np.array([1, 2]))
assert "x" in bounding_box
assert isinstance(bounding_box["x"], _Interval)
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = (np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test___delitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Using index
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert 0 in bounding_box
assert "x" in bounding_box
del bounding_box[0]
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
assert 0 in bounding_box
assert "x" in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError, match=r"Cannot delete ignored input: 0!"):
del bounding_box[0]
# Using key
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert 0 in bounding_box
assert "y" in bounding_box
del bounding_box["y"]
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
assert 0 in bounding_box
assert "y" in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError, match=r"Cannot delete ignored input: y!"):
del bounding_box["y"]
def test__validate_dict(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Input name keys
intervals = {"x": _Interval(-1, 1), "y": _Interval(-4, 4)}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_dict(intervals)
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Input index
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# name keys
intervals = {"x": _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert "x" not in bounding_box
bounding_box._validate_dict(intervals)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# input index
bounding_box._intervals = {}
intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 0 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test__validate_sequence(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# C order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order="C")
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Fortran order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Invalid order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=mk.MagicMock())
assert "x" not in bounding_box
assert "y" not in bounding_box
assert len(bounding_box.intervals) == 0
def test__n_inputs(self):
model = Gaussian2D()
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box._n_inputs == 2
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=["y"])
assert bounding_box._n_inputs == 1
bounding_box = ModelBoundingBox.validate(model, {}, ignored=["x", "y"])
assert bounding_box._n_inputs == 0
bounding_box._ignored = ["x", "y", "z"]
assert bounding_box._n_inputs == 0
def test__validate_iterable(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass with ignored
bounding_box._intervals = {}
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1)}
assert 0 not in bounding_box.intervals
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
# Invalid iterable
MESSAGE = "Found {} intervals, but must have exactly {}"
bounding_box._intervals = {}
bounding_box._ignored = []
assert "x" not in bounding_box
assert "y" not in bounding_box
with pytest.raises(ValueError, match=MESSAGE.format(3, 2)):
bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3)))
assert len(bounding_box.intervals) == 0
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
with pytest.raises(ValueError, match=MESSAGE.format(2, 1)):
bounding_box._validate_iterable(intervals)
assert len(bounding_box.intervals) == 0
bounding_box._ignored = []
intervals = {0: _Interval(-1, 1)}
with pytest.raises(ValueError, match=MESSAGE.format(1, 2)):
bounding_box._validate_iterable(intervals)
assert "x" not in bounding_box
assert "y" not in bounding_box
assert len(bounding_box.intervals) == 0
def test__validate(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass single with ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox({}, model, ignored=[1])
assert 0 not in bounding_box.intervals
assert 1 not in bounding_box.intervals
bounding_box._validate(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert len(bounding_box.intervals) == 1
# Pass single
model = Gaussian1D()
bounding_box = ModelBoundingBox({}, model)
assert "x" not in bounding_box
bounding_box._validate((-1, 1))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
sequence = (np.array([-1, -2]), np.array([1, 2]))
assert "x" not in bounding_box
bounding_box._validate(sequence)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
def test_validate(self):
model = Gaussian2D()
kwargs = {"test": mk.MagicMock()}
# Pass sequence Default order
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box = ModelBoundingBox.validate(
model, ((-4, 4), (-1, 1)), order="F", **kwargs
)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals, order="F", **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == "F"
# Pass ModelBoundingBox
bbox = bounding_box
bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == "F"
# Pass single ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(
model, intervals, ignored=["y"], **kwargs
)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == _ignored_interval
assert len(bounding_box.intervals) == 1
# Pass single
bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs)
assert (bounding_box._model.parameters == Gaussian1D().parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
sequence = (np.array([-1, -2]), np.array([1, 2]))
bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
def test_fix_inputs(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1)))
# keep_ignored = False (default)
new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()})
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all()
assert "x" in new_bounding_box
assert new_bounding_box["x"] == (-1, 1)
assert "y" not in new_bounding_box
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == []
# keep_ignored = True
new_bounding_box = bounding_box.fix_inputs(
Gaussian2D(), {1: mk.MagicMock()}, _keep_ignored=True
)
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all()
assert "x" in new_bounding_box
assert new_bounding_box["x"] == (-1, 1)
assert "y" in new_bounding_box
assert "y" in new_bounding_box.ignored_inputs
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == [1]
def test_dimension(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert bounding_box.dimension == 0 == len(bounding_box._intervals)
def test_domain(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# test defaults
assert (
np.array(bounding_box.domain(0.25))
== np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])
).all()
# test C order
assert (
np.array(bounding_box.domain(0.25, "C"))
== np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])
).all()
# test Fortran order
assert (
np.array(bounding_box.domain(0.25, "F"))
== np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)])
).all()
# test error order
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box.domain(0.25, mk.MagicMock())
def test__outside(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False for _ in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
# fmt: off
assert (
outside_index
== [
True, True, True, True,
False, False, False, False, False,
True, True, True, True
]
).all()
# fmt: on
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True for _ in range(13)]).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True]).all()
assert all_out and isinstance(all_out, bool)
def test__valid_index(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == list(range(13))).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_prepare_inputs(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (np.array(new_inputs) == np.array(inputs)).all()
assert len(valid_index) == 1
assert (valid_index[0] == list(range(13))).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (
np.array(new_inputs)
== np.array(
[
[x[4], x[5], x[6], x[7], x[8]],
[y[4], y[5], y[6], y[7], y[8]],
]
)
).all()
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all()
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_bounding_box_ignore(self):
"""Regression test for #13028"""
bbox_x = ModelBoundingBox((9, 10), Polynomial2D(1), ignored=["x"])
assert bbox_x.ignored_inputs == ["x"]
bbox_y = ModelBoundingBox((11, 12), Polynomial2D(1), ignored=["y"])
assert bbox_y.ignored_inputs == ["y"]
class Test_SelectorArgument:
def test_create(self):
index = mk.MagicMock()
ignore = mk.MagicMock()
argument = _SelectorArgument(index, ignore)
assert isinstance(argument, _BaseSelectorArgument)
assert argument.index == index
assert argument.ignore == ignore
assert argument == (index, ignore)
def test_validate(self):
model = Gaussian2D()
# default integer
assert _SelectorArgument.validate(model, 0) == (0, True)
assert _SelectorArgument.validate(model, 1) == (1, True)
# default string
assert _SelectorArgument.validate(model, "x") == (0, True)
assert _SelectorArgument.validate(model, "y") == (1, True)
ignore = mk.MagicMock()
# non-default integer
assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)
# non-default string
assert _SelectorArgument.validate(model, "x", ignore) == (0, ignore)
assert _SelectorArgument.validate(model, "y", ignore) == (1, ignore)
# Fail
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
_SelectorArgument.validate(model, "z")
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer."
):
_SelectorArgument.validate(model, mk.MagicMock())
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
_SelectorArgument.validate(model, 2)
def test_get_selector(self):
# single inputs
inputs = [idx + 17 for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
# numpy array of single inputs
inputs = [np.array([idx + 11]) for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
inputs = [np.asanyarray(idx + 13) for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
# multi entry numpy array
inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(
*inputs
) == tuple(inputs[index])
def test_name(self):
model = Gaussian2D()
for index in range(model.n_inputs):
assert (
_SelectorArgument(index, mk.MagicMock()).name(model)
== model.inputs[index]
)
def test_pretty_repr(self):
model = Gaussian2D()
assert (
_SelectorArgument(0, False).pretty_repr(model)
== "Argument(name='x', ignore=False)"
)
assert (
_SelectorArgument(0, True).pretty_repr(model)
== "Argument(name='x', ignore=True)"
)
assert (
_SelectorArgument(1, False).pretty_repr(model)
== "Argument(name='y', ignore=False)"
)
assert (
_SelectorArgument(1, True).pretty_repr(model)
== "Argument(name='y', ignore=True)"
)
def test_get_fixed_value(self):
model = Gaussian2D()
values = {0: 5, "y": 7}
# Get index value
assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5
# Get name value
assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7
# Fail
MESSAGE = r".* was not found in .*"
with pytest.raises(RuntimeError, match=MESSAGE) as err:
_SelectorArgument(1, True).get_fixed_value(model, {0: 5})
def test_is_argument(self):
model = Gaussian2D()
argument = _SelectorArgument.validate(model, 0)
# Is true
assert argument.is_argument(model, 0) is True
assert argument.is_argument(model, "x") is True
# Is false
assert argument.is_argument(model, 1) is False
assert argument.is_argument(model, "y") is False
# Fail
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
argument.is_argument(model, "z")
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
argument.is_argument(model, mk.MagicMock())
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
argument.is_argument(model, 2)
def test_named_tuple(self):
model = Gaussian2D()
for index in range(model.n_inputs):
ignore = mk.MagicMock()
assert _SelectorArgument(index, ignore).named_tuple(model) == (
model.inputs[index],
ignore,
)
class Test_SelectorArguments:
def test_create(self):
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == []
kept_ignore = mk.MagicMock()
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False)), kept_ignore
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == kept_ignore
def test_pretty_repr(self):
model = Gaussian2D()
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
)
assert (
arguments.pretty_repr(model) == "SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" Argument(name='y', ignore=False)\n"
")"
)
def test_ignore(self):
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, True))
).ignore == [0, 1]
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, True)), [13, 4]
).ignore == [0, 1, 13, 4]
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
).ignore == [0]
assert _SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, True))
).ignore == [1]
assert (
_SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, False))
).ignore
== []
)
assert _SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, False)), [17, 14]
).ignore == [17, 14]
def test_validate(self):
# Integer key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Default ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, True))
assert arguments.kept_ignore == []
# String key and passed ignore
arguments = _SelectorArguments.validate(
Gaussian2D(), (("x", True), ("y", False))
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Test kept_ignore option
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8])
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [11, 5, 8]
arguments._kept_ignore = [13, 17, 14]
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [13, 17, 14]
# Invalid, bad argument
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
_SelectorArguments.validate(Gaussian2D(), ((0, True), ("z", False)))
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
_SelectorArguments.validate(
Gaussian2D(), ((mk.MagicMock(), True), (1, False))
)
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False)))
# Invalid, repeated argument
with pytest.raises(ValueError, match=r"Input: 'x' has been repeated"):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False)))
# Invalid, no arguments
with pytest.raises(
ValueError, match=r"There must be at least one selector argument"
):
_SelectorArguments.validate(Gaussian2D(), ())
def test_get_selector(self):
inputs = [idx + 19 for idx in range(4)]
assert _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).get_selector(*inputs) == tuple(inputs[:2])
assert _SelectorArguments.validate(
Gaussian2D(), ((1, True), (0, False))
).get_selector(*inputs) == tuple(inputs[:2][::-1])
assert _SelectorArguments.validate(Gaussian2D(), ((1, False),)).get_selector(
*inputs
) == (inputs[1],)
assert _SelectorArguments.validate(Gaussian2D(), ((0, True),)).get_selector(
*inputs
) == (inputs[0],)
def test_is_selector(self):
# Is Selector
assert _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5, 2.5))
assert _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
(0.5,)
)
# Is not selector
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5, 2.5, 3.5))
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5,))
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector(0.5)
assert not _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
(0.5, 2.5)
)
assert not _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
2.5
)
def test_get_fixed_values(self):
model = Gaussian2D()
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {0: 11, 1: 7}) == (11, 7)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {0: 5, "y": 47}) == (5, 47)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {"x": 2, "y": 9}) == (2, 9)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {"x": 12, 1: 19}) == (12, 19)
def test_is_argument(self):
model = Gaussian2D()
# Is true
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, "x") is True
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, "y") is True
# Is true and false
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, "x") is True
assert arguments.is_argument(model, 1) is False
assert arguments.is_argument(model, "y") is False
arguments = _SelectorArguments.validate(model, ((1, False),))
assert arguments.is_argument(model, 0) is False
assert arguments.is_argument(model, "x") is False
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, "y") is True
def test_selector_index(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.selector_index(model, 0) == 0
assert arguments.selector_index(model, "x") == 0
assert arguments.selector_index(model, 1) == 1
assert arguments.selector_index(model, "y") == 1
arguments = _SelectorArguments.validate(model, ((1, True), (0, False)))
assert arguments.selector_index(model, 0) == 1
assert arguments.selector_index(model, "x") == 1
assert arguments.selector_index(model, 1) == 0
assert arguments.selector_index(model, "y") == 0
# Error
arguments = _SelectorArguments.validate(model, ((0, True),))
with pytest.raises(
ValueError, match=r"y does not correspond to any selector argument"
):
arguments.selector_index(model, "y")
def test_add_ignore(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments == ((0, True),)
assert arguments._kept_ignore == []
new_arguments0 = arguments.add_ignore(model, 1)
assert new_arguments0 == arguments
assert new_arguments0._kept_ignore == [1]
assert arguments._kept_ignore == []
assert arguments._kept_ignore == []
new_arguments1 = new_arguments0.add_ignore(model, "y")
assert new_arguments1 == arguments == new_arguments0
assert new_arguments0._kept_ignore == [1]
assert new_arguments1._kept_ignore == [1, 1]
assert arguments._kept_ignore == []
# Error
with pytest.raises(
ValueError, match=r"0: is a selector argument and cannot be ignored"
):
arguments.add_ignore(model, 0)
def test_reduce(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
new_arguments = arguments.reduce(model, 0)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, "x")
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 1)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, "y")
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
def test_named_tuple(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.named_tuple(model) == (("x", True), ("y", False))
class TestCompoundBoundingBox:
def test_create(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox(
bounding_boxes, model, selector_args, create_selector, order="F"
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "F"
def test_copy(self):
bounding_box = CompoundBoundingBox.validate(
Gaussian2D(),
{(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)},
((0, True),),
mk.MagicMock(),
)
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
assert bounding_box._create_selector == copy._create_selector
assert id(bounding_box._create_selector) != id(copy._create_selector)
# Check selector_args
for index, argument in enumerate(bounding_box.selector_args):
assert argument == copy.selector_args[index]
assert id(argument) != id(copy.selector_args[index])
# Same integer values have will have same id
assert argument.index == copy.selector_args[index].index
assert id(argument.index) == id(copy.selector_args[index].index)
# Same boolean values have will have same id
assert argument.ignore == copy.selector_args[index].ignore
assert id(argument.ignore) == id(copy.selector_args[index].ignore)
assert len(bounding_box.selector_args) == len(copy.selector_args)
# Check bounding_boxes
for selector, bbox in bounding_box.bounding_boxes.items():
assert bbox == copy.bounding_boxes[selector]
assert id(bbox) != id(copy.bounding_boxes[selector])
assert bbox.ignored == copy.bounding_boxes[selector].ignored
assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored)
# model is not copied to prevent infinite recursion
assert bbox._model == copy.bounding_boxes[selector]._model
assert id(bbox._model) == id(copy.bounding_boxes[selector]._model)
# Same string values have will have same id
assert bbox._order == copy.bounding_boxes[selector]._order
assert id(bbox._order) == id(copy.bounding_boxes[selector]._order)
# Check interval objects
for index, interval in bbox.intervals.items():
assert interval == copy.bounding_boxes[selector].intervals[index]
assert id(interval) != id(
copy.bounding_boxes[selector].intervals[index]
)
# Same float values have will have same id
assert (
interval.lower
== copy.bounding_boxes[selector].intervals[index].lower
)
assert id(interval.lower) == id(
copy.bounding_boxes[selector].intervals[index].lower
)
# Same float values have will have same id
assert (
interval.upper
== copy.bounding_boxes[selector].intervals[index].upper
)
assert id(interval.upper) == id(
copy.bounding_boxes[selector].intervals[index].upper
)
assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals)
assert (
bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys()
)
assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes)
assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys()
def test___repr__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (
bounding_box.__repr__() == "CompoundBoundingBox(\n"
" bounding_boxes={\n"
" (1,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" (2,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-2, upper=2)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" }\n"
" selector_args = SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" )\n"
")"
)
def test_bounding_boxes(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box.bounding_boxes == bounding_boxes
def test_selector_args(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_box = CompoundBoundingBox({}, model, selector_args)
# Get
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
# Set
selector_args = ((1, False),)
with pytest.warns(RuntimeWarning, match=r"Overriding selector_args.*"):
bounding_box.selector_args = selector_args
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
def test_create_selector(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector)
assert bounding_box._create_selector == create_selector
assert bounding_box.create_selector == create_selector
def test__get_selector_key(self):
bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),))
assert len(bounding_box.bounding_boxes) == 0
# Singular
assert bounding_box._get_selector_key(5) == (5,)
assert bounding_box._get_selector_key((5,)) == (5,)
assert bounding_box._get_selector_key([5]) == (5,)
assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,)
assert bounding_box._get_selector_key(np.array([5])) == (5,)
# multiple
assert bounding_box._get_selector_key((5, 19)) == (5, 19)
assert bounding_box._get_selector_key([5, 19]) == (5, 19)
assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19)
def test___setitem__(self):
model = Gaussian2D()
# Ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, True),), order="F")
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15,)] = (-15, 15)
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == (-15, 15)
assert bounding_box._bounding_boxes[(15,)].order == "F"
# Invalid key
assert (7, 13) not in bounding_box._bounding_boxes
with pytest.raises(ValueError, match=".* is not a selector!"):
bounding_box[(7, 13)] = (-7, 7)
assert (7, 13) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box[(13,)] = ((-13, 13), (-3, 3))
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# No ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, False),), order="F")
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15,)] = ((-15, 15), (-6, 6))
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6))
assert bounding_box._bounding_boxes[(15,)].order == "F"
# Invalid key
assert (14, 11) not in bounding_box._bounding_boxes
with pytest.raises(ValueError, match=".* is not a selector!"):
bounding_box[(14, 11)] = ((-7, 7), (-12, 12))
assert (14, 11) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box[(13,)] = (-13, 13)
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
def test__validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
# Tuple selector_args
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox({}, model, selector_args)
bounding_box._validate(bounding_boxes)
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
def test___eq__(self):
bounding_box_1 = CompoundBoundingBox(
{(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),)
)
bounding_box_2 = CompoundBoundingBox(
{(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),)
)
# Equal
assert bounding_box_1 == bounding_box_2
# Not equal to non-compound bounding_box
assert not bounding_box_1 == mk.MagicMock()
assert not bounding_box_2 == mk.MagicMock()
# Not equal bounding_boxes
bounding_box_2[(15,)] = (-15, 15)
assert not bounding_box_1 == bounding_box_2
del bounding_box_2._bounding_boxes[(15,)]
assert bounding_box_1 == bounding_box_2
# Not equal selector_args
bounding_box_2._selector_args = _SelectorArguments.validate(
Gaussian2D(), ((0, False),)
)
assert not bounding_box_1 == bounding_box_2
bounding_box_2._selector_args = _SelectorArguments.validate(
Gaussian2D(), ((0, True),)
)
assert bounding_box_1 == bounding_box_2
# Not equal create_selector
bounding_box_2._create_selector = mk.MagicMock()
assert not bounding_box_1 == bounding_box_2
def test_validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
# Fail selector_args
MESSAGE = r"Selector arguments must be provided .*"
with pytest.raises(ValueError, match=MESSAGE):
CompoundBoundingBox.validate(model, bounding_boxes)
# Normal validate
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args, create_selector, order="F"
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "F"
# Re-validate
new_bounding_box = CompoundBoundingBox.validate(model, bounding_box)
assert bounding_box == new_bounding_box
assert new_bounding_box._order == "F"
# Default order
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args, create_selector
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "C"
def test___contains__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (1,) in bounding_box
assert (2,) in bounding_box
assert (3,) not in bounding_box
assert 1 not in bounding_box
assert 2 not in bounding_box
def test__create_bounding_box(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1, False),), create_selector)
# Create is successful
create_selector.return_value = ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 0
bbox = bounding_box._create_bounding_box((7,))
assert isinstance(bbox, ModelBoundingBox)
assert bbox == ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 1
assert (7,) in bounding_box
assert isinstance(bounding_box[(7,)], ModelBoundingBox)
assert bounding_box[(7,)] == bbox
# Create is unsuccessful
create_selector.return_value = (-42, 42)
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box._create_bounding_box((27,))
def test___getitem__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
# already exists
assert isinstance(bounding_box[1], ModelBoundingBox)
assert bounding_box[1] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[2] == (-2, 2)
assert isinstance(bounding_box[(1,)], ModelBoundingBox)
assert bounding_box[(1,)] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[(2,)] == (-2, 2)
# no selector
with pytest.raises(
RuntimeError, match="No bounding box is defined for selector: .*"
):
bounding_box[(3,)]
# Create a selector
bounding_box._create_selector = mk.MagicMock()
with mk.patch.object(
CompoundBoundingBox, "_create_bounding_box", autospec=True
) as mkCreate:
assert bounding_box[(3,)] == mkCreate.return_value
assert mkCreate.call_args_list == [mk.call(bounding_box, (3,))]
def test__select_bounding_box(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
inputs = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(
_SelectorArguments, "get_selector", autospec=True
) as mkSelector:
with mk.patch.object(
CompoundBoundingBox, "__getitem__", autospec=True
) as mkGet:
assert bounding_box._select_bounding_box(inputs) == mkGet.return_value
assert mkGet.call_args_list == [
mk.call(bounding_box, mkSelector.return_value)
]
assert mkSelector.call_args_list == [
mk.call(bounding_box.selector_args, *inputs)
]
def test_prepare_inputs(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
input_shape = mk.MagicMock()
with mk.patch.object(
ModelBoundingBox, "prepare_inputs", autospec=True
) as mkPrepare:
assert (
bounding_box.prepare_inputs(input_shape, [1, 2, 3])
== mkPrepare.return_value
)
assert mkPrepare.call_args_list == [
mk.call(bounding_box[(1,)], input_shape, [1, 2, 3])
]
mkPrepare.reset_mock()
assert (
bounding_box.prepare_inputs(input_shape, [2, 2, 3])
== mkPrepare.return_value
)
assert mkPrepare.call_args_list == [
mk.call(bounding_box[(2,)], input_shape, [2, 2, 3])
]
mkPrepare.reset_mock()
def test__matching_bounding_boxes(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes("x", value)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes("x", value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
matching = bounding_box._matching_bounding_boxes("y", value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "y" in bbox
assert "y" in bbox.ignored_inputs
assert "x" in bbox
assert bbox["x"] == (-(5 - value), (5 - value))
assert len(bbox.intervals) == 1
assert bbox.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
matching = bounding_box._matching_bounding_boxes("slit_id", 0)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
matching = bounding_box._matching_bounding_boxes("slit_id", 1)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 3047.5), "y": (-0.5, 4047.5)}
assert bbox.order == "F"
# Errors
MESSAGE = (
r"Attempting to fix input .*, but there are no bounding boxes for argument"
r" value .*"
)
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._matching_bounding_boxes("slit_id", 2)
def test__fix_input_selector_arg(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg("x", value)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg("x", value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((1, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox_selector
assert "x" in bbox_selector.ignored_inputs
assert "y" in bbox_selector
assert bbox_selector["y"] == (-value, value)
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [0]
bbox = bounding_box._fix_input_selector_arg("y", value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((0, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert "y" in bbox_selector
assert "y" in bbox_selector.ignored_inputs
assert "x" in bbox_selector
assert bbox_selector["x"] == (-(5 - value), (5 - value))
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
bbox = bounding_box._fix_input_selector_arg("slit_id", 0)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
bbox = bounding_box._fix_input_selector_arg("slit_id", 1)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 3047.5), "y": (-0.5, 4047.5)}
assert bbox.order == "F"
def test__fix_input_bbox_arg(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
bbox = bounding_box._fix_input_bbox_arg("x", 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [0]
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert len(bbox._bounding_boxes) == 2
bbox = bounding_box._fix_input_bbox_arg("y", 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [1]
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert len(bbox._bounding_boxes) == 2
def test_fix_inputs(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
model.bounding_box = bounding_box
# Fix selector argument
new_model = fix_inputs(model, {"slit_id": 0})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
# Fix a bounding_box field
new_model = fix_inputs(model, {"x": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
new_model = fix_inputs(model, {"y": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
# Fix selector argument and a bounding_box field
new_model = fix_inputs(model, {"slit_id": 0, "x": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"y": (-0.5, 2047.5)}
assert bbox.order == "F"
new_model = fix_inputs(model, {"y": 5, "slit_id": 1})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"x": (-0.5, 3047.5)}
assert bbox.order == "F"
# Fix two bounding_box fields
new_model = fix_inputs(model, {"x": 5, "y": 7})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert bbox.selector_args == ((0, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
def test_complex_compound_bounding_box(self):
model = Identity(4)
bounding_boxes = {
(2.5, 1.3): ((-1, 1), (-3, 3)),
(2.5, 2.71): ((-3, 3), (-1, 1)),
}
selector_args = (("x0", True), ("x1", True))
bbox = CompoundBoundingBox.validate(model, bounding_boxes, selector_args)
assert bbox[(2.5, 1.3)] == ModelBoundingBox(
((-1, 1), (-3, 3)), model, ignored=["x0", "x1"]
)
assert bbox[(2.5, 2.71)] == ModelBoundingBox(
((-3, 3), (-1, 1)), model, ignored=["x0", "x1"]
)
|
d9968608a44a124459ebdc940c2af5c8693b75f42475398165596305f5fafe5a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, pointless-statement
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.modeling.core import CompoundModel, Model, ModelDefinitionError
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import (
Chebyshev1D,
Chebyshev2D,
Const1D,
Gaussian1D,
Gaussian2D,
Identity,
Legendre1D,
Legendre2D,
Linear1D,
Mapping,
Polynomial1D,
Polynomial2D,
Rotation2D,
Scale,
Shift,
Tabular1D,
fix_inputs,
)
from astropy.modeling.parameters import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY
@pytest.mark.parametrize(
("expr", "result"),
[
(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x**y, [8.0, 8.0]),
],
)
def test_model_set(expr, result):
s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2))
out = s(0, model_set_axis=False)
assert_array_equal(out, result)
@pytest.mark.parametrize(
("expr", "result"),
[
(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x**y, [8.0, 8.0]),
],
)
def test_model_set_raises_value_error(expr, result):
"""Check that creating model sets with components whose _n_models are
different raise a value error
"""
MESSAGE = r"Both operands must have equal values for .*"
with pytest.raises(ValueError, match=MESSAGE):
expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1))
@pytest.mark.parametrize(
("expr", "result"),
[
(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x**y, 8.0),
],
)
def test_two_model_instance_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters.
"""
s = expr(Const1D(2), Const1D(3))
assert isinstance(s, CompoundModel)
assert s.n_inputs == 1
assert s.n_outputs == 1
out = s(0)
assert out == result
assert isinstance(out, float)
def test_simple_two_model_compose_1d():
"""
Shift and Scale are two of the simplest models to test model composition
with.
"""
S1 = Shift(2) | Scale(3) # First shift then scale
assert isinstance(S1, CompoundModel)
assert S1.n_inputs == 1
assert S1.n_outputs == 1
assert S1(1) == 9.0
S2 = Scale(2) | Shift(3) # First scale then shift
assert isinstance(S2, CompoundModel)
assert S2.n_inputs == 1
assert S2.n_outputs == 1
assert S2(1) == 5.0
# Test with array inputs
assert_array_equal(S2([1, 2, 3]), [5.0, 7.0, 9.0])
def test_simple_two_model_compose_2d():
"""
A simple example consisting of two rotations.
"""
r1 = Rotation2D(45) | Rotation2D(45)
assert isinstance(r1, CompoundModel)
assert r1.n_inputs == 2
assert r1.n_outputs == 2
assert_allclose(r1(0, 1), (-1, 0), atol=1e-10)
r2 = Rotation2D(90) | Rotation2D(90) # Rotate twice by 90 degrees
assert_allclose(r2(0, 1), (0, -1), atol=1e-10)
# Compose R with itself to produce 4 rotations
r3 = r1 | r1
assert_allclose(r3(0, 1), (0, -1), atol=1e-10)
def test_n_submodels():
"""
Test that CompoundModel.n_submodels properly returns the number
of components.
"""
g2 = Gaussian1D() + Gaussian1D()
assert g2.n_submodels == 2
g3 = g2 + Gaussian1D()
assert g3.n_submodels == 3
g5 = g3 | g2
assert g5.n_submodels == 5
g7 = g5 / g2
assert g7.n_submodels == 7
def test_expression_formatting():
"""
Test that the expression strings from compound models are formatted
correctly.
"""
# For the purposes of this test it doesn't matter a great deal what
# model(s) are used in the expression, I don't think
G = Gaussian1D(1, 1, 1)
G2 = Gaussian2D(1, 2, 3, 4, 5, 6)
M = G + G
assert M._format_expression() == "[0] + [1]"
M = G + G + G
assert M._format_expression() == "[0] + [1] + [2]"
M = G + G * G
assert M._format_expression() == "[0] + [1] * [2]"
M = G * G + G
assert M._format_expression() == "[0] * [1] + [2]"
M = G + G * G + G
assert M._format_expression() == "[0] + [1] * [2] + [3]"
M = (G + G) * (G + G)
assert M._format_expression() == "([0] + [1]) * ([2] + [3])"
# This example uses parentheses in the expression, but those won't be
# preserved in the expression formatting since they technically aren't
# necessary, and there's no way to know that they were originally
# parenthesized (short of some deep, and probably not worthwhile
# introspection)
M = (G * G) + (G * G)
assert M._format_expression() == "[0] * [1] + [2] * [3]"
M = G**G
assert M._format_expression() == "[0] ** [1]"
M = G + G**G
assert M._format_expression() == "[0] + [1] ** [2]"
M = (G + G) ** G
assert M._format_expression() == "([0] + [1]) ** [2]"
M = G + G | G
assert M._format_expression() == "[0] + [1] | [2]"
M = G + (G | G)
assert M._format_expression() == "[0] + ([1] | [2])"
M = G & G | G2
assert M._format_expression() == "[0] & [1] | [2]"
M = G & (G | G)
assert M._format_expression() == "[0] & ([1] | [2])"
def test_basic_compound_inverse():
"""
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only.
"""
t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90)
assert_allclose(t.inverse(*t(0, 1)), (0, 1))
@pytest.mark.parametrize(
"model",
[
Shift(0) + Shift(0) | Shift(0),
Shift(0) - Shift(0) | Shift(0),
Shift(0) * Shift(0) | Shift(0),
Shift(0) / Shift(0) | Shift(0),
Shift(0) ** Shift(0) | Shift(0),
Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6),
],
)
def test_compound_unsupported_inverse(model):
"""
Ensure inverses aren't supported in cases where it shouldn't be.
"""
MESSAGE = r"No analytical or user-supplied inverse transform .*"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.inverse
def test_mapping_basic_permutations():
"""
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs.
"""
x, y = Rotation2D(90)(1, 2)
rs = Rotation2D(90) | Mapping((1, 0))
x_prime, y_prime = rs(1, 2)
assert_allclose((x, y), (y_prime, x_prime))
# A more complicated permutation
m = Rotation2D(90) & Scale(2)
x, y, z = m(1, 2, 3)
ms = m | Mapping((2, 0, 1))
x_prime, y_prime, z_prime = ms(1, 2, 3)
assert_allclose((x, y, z), (y_prime, z_prime, x_prime))
def test_mapping_inverse():
"""Tests inverting a compound model that includes a `Mapping`."""
rs1 = Rotation2D(12.1) & Scale(13.2)
rs2 = Rotation2D(14.3) & Scale(15.4)
# Rotates 2 of the coordinates and scales the third--then rotates on a
# different axis and scales on the axis of rotation. No physical meaning
# here just a simple test
m = rs1 | Mapping([2, 0, 1]) | rs2
assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08)
def test_identity_input():
"""
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362
"""
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=90)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), [-3.0, 1.0])
def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
MESSAGE = r"Unsupported operands for |:.*"
with pytest.raises(ModelDefinitionError, match=MESSAGE):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
MESSAGE = r"Both operands must match numbers of inputs and outputs"
with pytest.raises(ModelDefinitionError, match=MESSAGE):
Rotation2D(90) + Gaussian1D(1, 0, 0.1)
@pytest.mark.parametrize("poly", [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2)])
def test_compound_with_polynomials_2d(poly):
"""
Tests that polynomials are scaled when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x, y = np.mgrid[:20, :37]
result_compound = model(x, y)
result = shift(poly(x, y))
assert_allclose(result, result_compound)
def test_fix_inputs():
g1 = Gaussian2D(1, 0, 0, 1, 2)
g2 = Gaussian2D(1.5, 0.5, -0.2, 0.5, 0.3)
sg1_1 = fix_inputs(g1, {1: 0})
assert_allclose(sg1_1(0), g1(0, 0))
assert_allclose(sg1_1([0, 1, 3]), g1([0, 1, 3], [0, 0, 0]))
sg1_2 = fix_inputs(g1, {"x": 1})
assert_allclose(sg1_2(1.5), g1(1, 1.5))
gg1 = g1 & g2
sgg1_1 = fix_inputs(gg1, {1: 0.1, 3: 0.2})
assert_allclose(sgg1_1(0, 0), gg1(0, 0.1, 0, 0.2))
sgg1_2 = fix_inputs(gg1, {"x0": -0.1, 2: 0.1})
assert_allclose(sgg1_2(1, 1), gg1(-0.1, 1, 0.1, 1))
assert_allclose(sgg1_2(y0=1, y1=1), gg1(-0.1, 1, 0.1, 1))
def test_fix_inputs_invalid():
g1 = Gaussian2D(1, 0, 0, 1, 2)
MESSAGE = r"Substitution key .* not among possible input choices"
with pytest.raises(ValueError, match=MESSAGE):
fix_inputs(g1, {"x0": 0, 0: 0})
with pytest.raises(ValueError, match=MESSAGE):
fix_inputs(g1, {3: 2})
with pytest.raises(ValueError, match=MESSAGE):
fix_inputs(g1, {np.int32(3): 2})
with pytest.raises(ValueError, match=MESSAGE):
fix_inputs(g1, {np.int64(3): 2})
with pytest.raises(ValueError, match=MESSAGE):
fix_inputs(g1, {"w": 2})
MESSAGE = r'Expected a dictionary for second argument of "fix_inputs"'
with pytest.raises(ValueError, match=MESSAGE):
fix_inputs(g1, (0, 1))
MESSAGE = r".*Illegal operator: ', '#'.*"
with pytest.raises(ModelDefinitionError, match=MESSAGE):
CompoundModel("#", g1, g1)
MESSAGE = r"Too many input arguments - expected 1, got 2"
with pytest.raises(ValueError, match=MESSAGE):
gg1 = fix_inputs(g1, {0: 1})
gg1(2, y=2)
with pytest.raises(ValueError, match=MESSAGE):
gg1 = fix_inputs(g1, {np.int32(0): 1})
gg1(2, y=2)
with pytest.raises(ValueError, match=MESSAGE):
gg1 = fix_inputs(g1, {np.int64(0): 1})
gg1(2, y=2)
def test_fix_inputs_with_bounding_box():
g1 = Gaussian2D(1, 0, 0, 1, 1)
g2 = Gaussian2D(1, 0, 0, 1, 1)
assert_allclose(g1.bounding_box, ((-5.5, 5.5), (-5.5, 5.5)))
gg1 = g1 & g2
gg1.bounding_box = ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2))
assert gg1.bounding_box == ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2))
sg = fix_inputs(gg1, {0: 0, 2: 0})
assert sg.bounding_box == ((-5.5, 5.5), (-5.3, 5.3))
g1 = Gaussian1D(10, 3, 1)
g = g1 & g1
g.bounding_box = ((1, 4), (6, 8))
gf = fix_inputs(g, {0: 1})
assert gf.bounding_box == (1, 4)
def test_indexing_on_instance():
"""Test indexing on compound model instances."""
m = Gaussian1D(1, 0, 0.1) + Const1D(2)
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Const1D)
assert m.param_names == ("amplitude_0", "mean_0", "stddev_0", "amplitude_1")
# Test parameter equivalence
assert m[0].amplitude == 1 == m.amplitude_0
assert m[0].mean == 0 == m.mean_0
assert m[0].stddev == 0.1 == m.stddev_0
assert m[1].amplitude == 2 == m.amplitude_1
# Test that parameter value updates are symmetric between the compound
# model and the submodel returned by indexing
const = m[1]
m.amplitude_1 = 42
assert const.amplitude == 42
const.amplitude = 137
assert m.amplitude_1 == 137
# Similar couple of tests, but now where the compound model was created
# from model instances
g = Gaussian1D(1, 2, 3, name="g")
p = Polynomial1D(2, name="p")
m = g + p
assert m[0].name == "g"
assert m[1].name == "p"
assert m["g"].name == "g"
assert m["p"].name == "p"
poly = m[1]
m.c0_1 = 12345
assert poly.c0 == 12345
poly.c1 = 6789
assert m.c1_1 == 6789
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
MESSAGE = r"list index out of range"
with pytest.raises(IndexError, match=MESSAGE):
m[42]
MESSAGE = r"No component with name 'foobar' found"
with pytest.raises(IndexError, match=MESSAGE):
m["foobar"]
# Confirm index-by-name works with fix_inputs
g = Gaussian2D(1, 2, 3, 4, 5, name="g")
m = fix_inputs(g, {0: 1})
assert m["g"].name == "g"
# Test string slicing
A = Const1D(1.1, name="A")
B = Const1D(2.1, name="B")
C = Const1D(3.1, name="C")
M = A + B * C
assert_allclose(M["B":"C"](1), 6.510000000000001)
class _ConstraintsTestA(Model):
stddev = Parameter(default=0, min=0, max=0.3)
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(stddev, mean):
return stddev, mean
class _ConstraintsTestB(Model):
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(mean):
return mean
def test_inherit_constraints():
"""
Various tests for copying of constraint values between compound models and
their members.
Regression test for https://github.com/astropy/astropy/issues/3481
"""
model = Gaussian1D(bounds={"stddev": (0, 0.3)}, fixed={"mean": True}) + Gaussian1D(
fixed={"mean": True}
)
# Lots of assertions in this test as there are multiple interfaces to
# parameter constraints
assert "stddev_0" in model.bounds
assert model.bounds["stddev_0"] == (0, 0.3)
assert model.stddev_0.bounds == (0, 0.3)
assert "mean_0" in model.fixed
assert model.fixed["mean_0"] is True
assert model.mean_0.fixed is True
assert "mean_1" in model.fixed
assert model.fixed["mean_1"] is True
assert model.mean_1.fixed is True
assert model.stddev_0 is model[0].stddev
# Great, all the constraints were inherited properly
# Now what about if we update them through the sub-models?
model.stddev_0.bounds = (0, 0.4)
assert model[0].stddev.bounds == (0, 0.4)
assert model[0].bounds["stddev"] == (0, 0.4)
model.stddev_0.bounds = (0.1, 0.5)
assert model[0].stddev.bounds == (0.1, 0.5)
assert model[0].bounds["stddev"] == (0.1, 0.5)
model[1].mean.fixed = False
assert model.mean_1.fixed is False
assert model[1].mean.fixed is False
# Now turn off syncing of constraints
assert model.bounds["stddev_0"] == (0.1, 0.5)
model.sync_constraints = False
model[0].stddev.bounds = (0, 0.2)
assert model.bounds["stddev_0"] == (0.1, 0.5)
model.sync_constraints = True
assert model.bounds["stddev_0"] == (0, 0.2)
def test_compound_custom_inverse():
"""
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542
"""
poly = Polynomial1D(1, c0=1, c1=2)
scale = Scale(1)
shift = Shift(1)
model1 = poly | scale
model1.inverse = poly
# model1 now has a custom inverse (the polynomial itself, ignoring the
# trivial scale factor)
model2 = shift | model1
assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
# Make sure an inverse is not allowed if the models were combined with the
# wrong operator, or if one of the models doesn't have an inverse defined
MESSAGE = (
r"No analytical or user-supplied inverse transform has been implemented for"
r" this model"
)
with pytest.raises(NotImplementedError, match=MESSAGE):
(shift + model1).inverse
with pytest.raises(NotImplementedError, match=MESSAGE):
(model1 & poly).inverse
def test_pickle_compound():
"""
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228
"""
# Test pickling a compound model instance
g1 = Gaussian1D(1.0, 0.0, 0.1)
g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3])
m = g1 + g2
m2 = pickle.loads(pickle.dumps(m))
assert m.param_names == m2.param_names
assert m.__class__.__name__ == m2.__class__.__name__
assert np.all(m.parameters == m2.parameters)
assert np.all(m(0) == m2(0))
def test_update_parameters():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
assert m(1) == 4
offx.offset = 42
assert m(1) == 86
m.factor_1 = 100
assert m(1) == 4300
m2 = m | offx
assert m2(1) == 4342
def test_name():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
scl.name = "scale"
assert m.submodel_names == ("None_0", "scale")
assert m.name is None
m.name = "M"
assert m.name == "M"
m1 = m.rename("M1")
assert m.name == "M1"
assert m1.name == "M1"
def test_name_index():
g1 = Gaussian1D(1, 1, 1)
g2 = Gaussian1D(1, 2, 1)
g = g1 + g2
MESSAGE = r"No component with name 'bozo' found"
with pytest.raises(IndexError, match=MESSAGE):
g["bozo"]
g1.name = "bozo"
assert g["bozo"].mean == 1
g2.name = "bozo"
MESSAGE = r"Multiple components found using 'bozo' as name.*"
with pytest.raises(IndexError, match=MESSAGE):
g["bozo"]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_in_compound():
"""
Issue #7411 - evaluate should not change the shape of the output.
"""
t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19], bounds_error=False)
rot = Rotation2D(2)
p = Polynomial1D(1)
x = np.arange(12).reshape((3, 4))
# Create a compound model which does not execute Tabular.__call__,
# but model.evaluate and is followed by a Rotation2D which
# checks the exact shapes.
model = p & t | rot
x1, y1 = model(x, x)
assert x1.ndim == 2
assert y1.ndim == 2
def test_bounding_box():
g = Gaussian2D() + Gaussian2D(2, 0.5, 0.1, 2, 3, 0)
g.bounding_box = ((0, 1), (0, 0.5))
y, x = np.mgrid[0:10, 0:10]
y = y / 3.0
x = x / 3.0
val = g(x, y, with_bounding_box=True)
# fmt: off
compare = np.array(
[
[2.93738984, 2.93792011, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.87857153, 2.88188761, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.70492922, 2.71529265, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.45969972, 2.47912103, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]
]
)
# fmt: on
mask = ~np.isnan(val)
assert_allclose(val[mask], compare[mask])
val2 = g(x + 2, y + 2, with_bounding_box=True)
assert np.isnan(val2).sum() == 100
# val3 = g(.1, .1, with_bounding_box=True)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_bounding_box_with_units():
points = np.arange(5) * u.pix
lt = np.arange(5) * u.AA
t = Tabular1D(points, lt)
assert t(1 * u.pix, with_bounding_box=True) == 1.0 * u.AA
@pytest.mark.parametrize("poly", [Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)])
def test_compound_with_polynomials_1d(poly):
"""
Tests that polynomials are offset when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x = np.linspace(-5, 5, 10)
result_compound = model(x)
result = shift(poly(x))
assert_allclose(result, result_compound)
assert model.param_names == (
"c0_0",
"c1_0",
"c2_0",
"c3_0",
"c4_0",
"c5_0",
"offset_1",
)
def test_replace_submodel():
"""
Replace a model in a Compound model
"""
S1 = Shift(2, name="shift2") | Scale(3, name="scale3") # First shift then scale
S2 = Scale(2, name="scale2") | Shift(3, name="shift3") # First scale then shift
m = S1 & S2
assert m(1, 2) == (9, 7)
m2 = m.replace_submodel("scale3", Scale(4, name="scale4"))
assert m2(1, 2) == (12, 7)
assert m(1, 2) == (9, 7)
# Check the inverse has been updated
assert m2.inverse(12, 7) == (1, 2)
# Produce the same result by replacing a single model with a compound
m3 = m.replace_submodel("shift2", Shift(2) | Scale(2))
assert m(1, 2) == (9, 7)
assert m3(1, 2) == (18, 7)
# Check the inverse has been updated
assert m3.inverse(18, 7) == (1, 2)
# Test with arithmetic model compunding operator
m = S1 + S2
assert m(1) == 14
m2 = m.replace_submodel("scale2", Scale(4, name="scale4"))
assert m2(1) == 16
# Test with fix_inputs()
R = fix_inputs(Rotation2D(angle=90, name="rotate"), {0: 1})
m4 = S1 | R
assert_allclose(m4(0), (-6, 1))
m5 = m4.replace_submodel("rotate", Rotation2D(180))
assert_allclose(m5(0), (-1, -6))
# Check we get a value error when model name doesn't exist
MESSAGE = r"No submodels found named not_there"
with pytest.raises(ValueError, match=MESSAGE):
m2 = m.replace_submodel("not_there", Scale(2))
# And now a model set
P = Polynomial1D(degree=1, n_models=2, name="poly")
S = Shift([1, 2], n_models=2)
m = P | S
assert_array_equal(m([0, 1]), (1, 2))
MESSAGE = r"New and old models must have equal values for n_models"
with pytest.raises(ValueError, match=MESSAGE):
m2 = m.replace_submodel("poly", Polynomial1D(degree=1, c0=1))
m2 = m.replace_submodel("poly", Polynomial1D(degree=1, c0=[1, 2], n_models=2))
assert_array_equal(m2([0, 1]), (2, 4))
# Ensure previous _user_inverse doesn't stick around
S1 = Shift(1)
S2 = Shift(2)
S3 = Shift(3, name="S3")
S23 = S2 | S3
S23.inverse = Shift(-4.9)
m = S1 & S23
# This should delete the S23._user_inverse
m2 = m.replace_submodel("S3", Shift(4))
assert m2(1, 2) == (2, 8)
assert m2.inverse(2, 8) == (1, 2)
@pytest.mark.parametrize(
"expr",
[
lambda m1, m2: m1 + m2,
lambda m1, m2: m1 - m2,
lambda m1, m2: m1 * m2,
lambda m1, m2: m1 / m2,
],
)
def test_compound_evaluate(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
# Some evaluate functions assume that inputs are numpy arrays or quantities including Const1D
p1 = np.array([1, 2, 3, 4, 1, 2])
p2 = np.array([1, 0, 0.5])
model1 = Polynomial1D(5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(x, *p1, *p2),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
)
def test_compound_evaluate_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1**model2
assert_array_equal(
compound.evaluate(x, *p1, *p2),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
)
def test_compound_evaluate_double_shift():
x = np.linspace(-5, 5, 10)
y = np.linspace(-5, 5, 10)
m1 = Gaussian2D(1, 0, 0, 1, 1, 1)
m2 = Shift(1)
m3 = Shift(2)
m = Gaussian2D(1, 0, 0, 1, 1, 1) & Shift(1) & Shift(2)
assert_array_equal(
m.evaluate(x, y, x - 10, y + 20, 1, 0, 0, 1, 1, 1, 1, 2),
[
m1.evaluate(x, y, 1, 0, 0, 1, 1, 1),
m2.evaluate(x - 10, 1),
m3.evaluate(y + 20, 2),
],
)
@pytest.mark.parametrize(
"expr",
[
lambda m1, m2: m1 + m2,
lambda m1, m2: m1 - m2,
lambda m1, m2: m1 * m2,
lambda m1, m2: m1 / m2,
],
)
def test_compound_evaluate_named_param(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3, 0.5, 0.5])
model1 = Gaussian1D(2, 1, 5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
)
def test_compound_evaluate_name_param_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1**model2
assert_array_equal(
compound.evaluate(x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
)
def test_compound_evaluate_and():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0.1, 0.5])
p2 = np.array([3])
model1 = Gaussian1D()
model2 = Shift()
compound = model1 & model2
assert_array_equal(
compound.evaluate(x, x, *p1, p2),
[model1.evaluate(x, *p1), model2.evaluate(x, p2)],
)
def test_compound_evaluate_or():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([0.5])
p2_amplitude = np.array([3])
p2_mean = np.array([0])
p2_std = np.array([0.1])
model1 = Shift(0.5)
model2 = Gaussian1D(1, 0, 0.5)
compound = model1 | model2
assert_array_equal(
compound.evaluate(x, p1, p2_amplitude, p2_mean, p2_std),
model2.evaluate(model1.evaluate(x, p1), p2_amplitude, p2_mean, p2_std),
)
def test_compound_evaluate_fix_inputs_by_keyword():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the keyword
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {"x": x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
)
def test_compound_evaluate_fix_inputs_by_position():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the input index
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {0: x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_fit_multiplied_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.kg / u.s
m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s)
m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
truth = m1 * m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_fit_multiplied_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg / u.s
m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s)
m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m)
truth = m1 * m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg * u.kg / u.s
m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s)
m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m)
m4 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
m11 = m1 * m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_fit_divided_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m / u.s
m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m)
m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s)
truth = m1 / m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_fit_mixed_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m)
m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s)
m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m)
truth = m1 / m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m)
m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s)
m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m)
m4 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
m11 = m1 / m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
|
4adeff667950936a4d7da836d2ba0951aac9bc9f5c0f4a622d89f624d5a859f1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import os
import subprocess
import sys
import unittest.mock as mk
from inspect import signature
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy
import astropy.modeling.core as core
import astropy.units as u
from astropy.convolution import convolve_models
from astropy.modeling import models
from astropy.modeling.bounding_box import CompoundBoundingBox, ModelBoundingBox
from astropy.modeling.core import (
SPECIAL_OPERATORS,
CompoundModel,
Model,
_add_special_operator,
bind_bounding_box,
bind_compound_bounding_box,
custom_model,
fix_inputs,
)
from astropy.modeling.parameters import Parameter
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (
str(m) == "Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5"
)
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0))
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
MESSAGE = r"Gaussian1D.__init__.* got an unrecognized parameter 'wrong'"
with pytest.raises(TypeError, match=MESSAGE):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ["self", "args", "meta", "name", "kwargs"]
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ("a", "b")
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ("a", "b")
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ("a",)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "kwargs"]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_model_n_outputs():
"""
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved
"""
@custom_model
def model(x, y, n_outputs=2):
return x + 1, y + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 2
assert m.outputs == ("x0", "x1")
assert (
separability_matrix(m)
== [
[True, True],
[True, True],
]
).all()
@custom_model
def model(x, y, z, n_outputs=3):
return x + 1, y + 1, z + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 3
assert m.outputs == ("x0", "x1", "x2")
assert (
separability_matrix(m)
== [
[True, True, True],
[True, True, True],
[True, True, True],
]
).all()
def test_custom_model_settable_parameters():
"""
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set.
"""
@custom_model
def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.bounding_box == ((1, 2), (3, 4))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
m = model(bounding_box=((5, 6), (7, 8)))
assert m.n_outputs == 2
assert m.bounding_box == ((5, 6), (7, 8))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
@custom_model
def model(x, y, n_outputs=2, outputs=("z0", "z1")):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.outputs == ("z0", "z1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
m = model(outputs=("w0", "w1"))
assert m.n_outputs == 2
assert m.outputs == ("w0", "w1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
def test_custom_model_regected_parameters():
"""
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur).
"""
with pytest.raises(
ValueError, match=r"Parameter 'n_inputs' cannot be a model property: *"
):
@custom_model
def model1(x, y, n_outputs=2, n_inputs=3):
return x + 1, y + 1
with pytest.raises(
ValueError, match=r"Parameter 'uses_quantity' cannot be a model property: *"
):
@custom_model
def model2(x, y, n_outputs=2, uses_quantity=True):
return x + 1, y + 1
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2.0 / 3.0), c1=(1.0 / 3.0))
MESSAGE = (
r"No analytical or user-supplied inverse transform has been implemented for"
r" this model"
)
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ("y",)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, 0.2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
@pytest.mark.filterwarnings("ignore:invalid value encountered in less")
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13.0, y0=10.0, z0=8.0, a=4.0, b=3.0, c=2.0, amp=1.0):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return (
(self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a),
)
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
MESSAGE = (
r"Cannot cast ufunc 'add' output from .* to .* with casting rule 'same_kind"
)
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError, match=MESSAGE):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im)
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D().n_submodels == 1
assert models.Gaussian2D().n_submodels == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._leaflist) != id(new_model._leaflist)
assert id(model[0]) != id(new_model[0])
assert id(model[1]) != id(new_model[1])
assert id(model[2]) != id(new_model[2])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_units_with_bounding_box():
points = np.arange(10, 20)
table = np.arange(10) * u.Angstrom
t = models.Tabular1D(points, lookup_table=table)
assert isinstance(t(10), u.Quantity)
assert isinstance(t(10, with_bounding_box=True), u.Quantity)
assert_quantity_allclose(t(10), t(10, with_bounding_box=True))
RENAMED_MODEL = models.Gaussian1D.rename("CustomGaussian")
MODEL_RENAME_CODE = """
from astropy.modeling.models import Gaussian1D
print(repr(Gaussian1D))
print(repr(Gaussian1D.rename('CustomGaussian')))
""".strip()
MODEL_RENAME_EXPECTED = b"""
<class 'astropy.modeling.functional_models.Gaussian1D'>
Name: Gaussian1D
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
<class '__main__.CustomGaussian'>
Name: CustomGaussian (Gaussian1D)
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
""".strip()
def test_rename_path(tmp_path):
# Regression test for a bug that caused the path to the class to be
# incorrect in a renamed model's __repr__.
assert (
repr(RENAMED_MODEL).splitlines()[0]
== "<class 'astropy.modeling.tests.test_core.CustomGaussian'>"
)
# Make sure that when called from a user script, the class name includes
# __main__.
env = os.environ.copy()
paths = [os.path.dirname(astropy.__path__[0])] + sys.path
env["PYTHONPATH"] = os.pathsep.join(paths)
script = tmp_path / "rename.py"
with open(script, "w") as f:
f.write(MODEL_RENAME_CODE)
output = subprocess.check_output([sys.executable, script], env=env)
assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines()
@pytest.mark.parametrize(
"model_class",
[models.Gaussian1D, models.Polynomial1D, models.Shift, models.Tabular1D],
)
def test_rename_1d(model_class):
new_model = model_class.rename(name="Test1D")
assert new_model.name == "Test1D"
@pytest.mark.parametrize(
"model_class", [models.Gaussian2D, models.Polynomial2D, models.Tabular2D]
)
def test_rename_2d(model_class):
new_model = model_class.rename(name="Test2D")
assert new_model.name == "Test2D"
def test_fix_inputs_integer():
"""
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {1: 22})
assert mf(1) == (1, 22)
mf_int32 = models.fix_inputs(m, {np.int32(1): 33})
assert mf_int32(1) == (1, 33)
mf_int64 = models.fix_inputs(m, {np.int64(1): 44})
assert mf_int64(1) == (1, 44)
def test_fix_inputs_empty_dict():
"""
Tests that empty dictionary can be passed to fix_inputs
Issue #11355
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {})
assert mf(1, 2) == (1, 2)
def test_rename_inputs_outputs():
g2 = models.Gaussian2D(10, 2, 3, 1, 2)
assert g2.inputs == ("x", "y")
assert g2.outputs == ("z",)
MESSAGE = r"Expected .* number of .*, got .*"
with pytest.raises(ValueError, match=MESSAGE):
g2.inputs = ("w",)
with pytest.raises(ValueError, match=MESSAGE):
g2.outputs = ("w", "e")
def test__prepare_output_single_model():
model = models.Gaussian1D()
# No broadcast
assert (
np.array([1, 2]) == model._prepare_output_single_model(np.array([1, 2]), None)
).all()
# Broadcast to scalar
assert model._prepare_output_single_model(np.array([1]), ()) == 1
assert model._prepare_output_single_model(np.asanyarray(2), ()) == 2
# Broadcast reshape
output = np.array([[1, 2, 3], [4, 5, 6]])
reshape = np.array([[1, 2], [3, 4], [5, 6]])
assert (output == model._prepare_output_single_model(output, (2, 3))).all()
assert (reshape == model._prepare_output_single_model(output, (3, 2))).all()
# Broadcast reshape scalar
assert model._prepare_output_single_model(np.array([1]), (1, 2)) == 1
assert model._prepare_output_single_model(np.asanyarray(2), (3, 4)) == 2
# Fail to broadcast
assert (output == model._prepare_output_single_model(output, (1, 2))).all()
assert (output == model._prepare_output_single_model(output, (3, 4))).all()
def test_prepare_outputs_mixed_broadcast():
"""
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model([1, 2], 3)
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])
output = model(4, [5, 6])
assert output.shape == (2,)
np.testing.assert_allclose(output, [0.8146473164114145, 0.7371233743916278])
def test_prepare_outputs_complex_reshape():
x = np.array(
[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
]
)
y = np.array(
[
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30],
]
)
m = models.Identity(3) | models.Mapping((2, 1, 0))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((2, 1), n_inputs=3)
output = mf(1, 2)
assert output == (22, 2, 1)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
m = models.Identity(3) | models.Mapping((0, 1, 2))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((0, 1), n_inputs=3)
output = mf(1, 2)
assert output == (1, 2, 22)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
def test_prepare_outputs_single_entry_vector():
"""
jwst and gwcs both require that single entry vectors produce single
entry output vectors, not scalars. This tests for that behavior.
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model(np.array([1]), np.array([2]))
assert output.shape == (1,)
np.testing.assert_allclose(output, [0.9500411305585278])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings("ignore: Using a non-tuple")
def test_prepare_outputs_sparse_grid():
"""
Test to show that #11060 has been solved.
"""
shape = (3, 3)
data = np.arange(np.prod(shape)).reshape(shape) * u.m / u.s
points_unit = u.pix
points = [np.arange(size) * points_unit for size in shape]
kwargs = {
"bounds_error": False,
"fill_value": np.nan,
"method": "nearest",
}
transform = models.Tabular2D(points, data, **kwargs)
truth = (
np.array(
[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
]
)
* u.m
/ u.s
)
points = np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=True)
x = points[0] * u.pix
y = points[1] * u.pix
value = transform(x, y)
assert (value == truth).all()
points = (
np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=False) * u.pix
)
value = transform(*points)
assert (value == truth).all()
def test_coerce_units():
model = models.Polynomial1D(1, c0=1, c1=2)
MESSAGE = r"Can only apply 'add' function to dimensionless quantities when other .*"
with pytest.raises(u.UnitsError, match=MESSAGE):
model(u.Quantity(10, u.m))
with_input_units = model.coerce_units({"x": u.m})
result = with_input_units(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_input_units_tuple = model.coerce_units((u.m,))
result = with_input_units_tuple(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_return_units = model.coerce_units(return_units={"y": u.s})
result = with_return_units(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_return_units_tuple = model.coerce_units(return_units=(u.s,))
result = with_return_units_tuple(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_both = model.coerce_units({"x": u.m}, {"y": u.s})
result = with_both(u.Quantity(10, u.m))
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with pytest.raises(
ValueError, match=r"input_units keys.*do not match model inputs"
):
model.coerce_units({"q": u.m})
with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"):
model.coerce_units((u.m, u.s))
model_with_existing_input_units = models.BlackBody()
with pytest.raises(
ValueError,
match=r"Cannot specify input_units for model with existing input units",
):
model_with_existing_input_units.coerce_units({"x": u.m})
with pytest.raises(
ValueError, match=r"return_units keys.*do not match model outputs"
):
model.coerce_units(return_units={"q": u.m})
with pytest.raises(
ValueError, match=r"return_units length does not match n_outputs"
):
model.coerce_units(return_units=(u.m, u.s))
def test_bounding_box_general_inverse():
model = NonFittableModel(42.5)
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
model.bounding_box = ()
assert model.bounding_box.bounding_box() == ()
model.inverse = NonFittableModel(3.14)
inverse_model = model.inverse
with pytest.raises(NotImplementedError, match=MESSAGE):
inverse_model.bounding_box
def test__add_special_operator():
sop_name = "name"
sop = "value"
key = _add_special_operator(sop_name, "value")
assert key[0] == sop_name
assert key[1] == SPECIAL_OPERATORS._unique_id
assert key in SPECIAL_OPERATORS
assert SPECIAL_OPERATORS[key] == sop
def test_print_special_operator_CompoundModel(capsys):
"""
Test that issue #11310 has been fixed
"""
model = convolve_models(models.Sersic2D(), models.Gaussian2D())
with astropy.conf.set_temp("max_width", 80):
# fmt: off
assert str(model) == (
"Model: CompoundModel\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Expression: convolve_fft (([0]), ([1]))\n"
"Components: \n"
" [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., "
"x_0=0., y_0=0., ellip=0., theta=0.)>\n"
"\n"
" [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., "
"x_stddev=1., y_stddev=1., theta=0.)>\n"
"Parameters:\n"
" amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n"
" ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n"
" 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0"
)
# fmt: on
def test__validate_input_shape():
model = models.Gaussian1D()
model._n_models = 2
_input = np.array(
[
[1, 2, 3],
[4, 5, 6],
]
)
# Successful validation
assert model._validate_input_shape(_input, 0, model.inputs, 1, False) == (2, 3)
# Fail number of axes
MESSAGE = r"For model_set_axis=2, all inputs must be at least 3-dimensional"
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, model.inputs, 2, True)
# Fail number of models (has argname)
MESSAGE = r"Input argument '.*' does not have the correct dimensions in .*"
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, model.inputs, 1, True)
# Fail number of models (no argname)
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, [], 1, True)
def test__validate_input_shapes():
model = models.Gaussian1D()
model._n_models = 2
inputs = [mk.MagicMock() for _ in range(3)]
argnames = mk.MagicMock()
model_set_axis = mk.MagicMock()
all_shapes = [mk.MagicMock() for _ in inputs]
# Successful validation
with mk.patch.object(
Model, "_validate_input_shape", autospec=True, side_effect=all_shapes
) as mkValidate:
with mk.patch.object(core, "check_broadcast", autospec=True) as mkCheck:
assert mkCheck.return_value == model._validate_input_shapes(
inputs, argnames, model_set_axis
)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
# Fail check_broadcast
MESSAGE = r"All inputs must have identical shapes or must be scalars"
with mk.patch.object(
Model, "_validate_input_shape", autospec=True, side_effect=all_shapes
) as mkValidate:
with mk.patch.object(
core, "check_broadcast", autospec=True, return_value=None
) as mkCheck:
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
def test__remove_axes_from_shape():
model = models.Gaussian1D()
# len(shape) == 0
assert model._remove_axes_from_shape((), mk.MagicMock()) == ()
# axis < 0
assert model._remove_axes_from_shape((1, 2, 3), -1) == (1, 2)
assert model._remove_axes_from_shape((1, 2, 3), -2) == (1, 3)
assert model._remove_axes_from_shape((1, 2, 3), -3) == (2, 3)
# axis >= len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 3) == ()
assert model._remove_axes_from_shape((1, 2, 3), 4) == ()
# 0 <= axis < len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 0) == (2, 3)
assert model._remove_axes_from_shape((1, 2, 3), 1) == (3,)
assert model._remove_axes_from_shape((1, 2, 3), 2) == ()
def test_get_bounding_box():
model = models.Const2D(2)
# No with_bbox
assert model.get_bounding_box(False) is None
# No bounding_box
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
assert model.get_bounding_box(True) is None
# Normal bounding_box
model.bounding_box = ((0, 1), (0, 1))
assert not isinstance(model.bounding_box, CompoundBoundingBox)
assert model.get_bounding_box(True) == ((0, 1), (0, 1))
# CompoundBoundingBox with no removal
bbox = CompoundBoundingBox.validate(
model,
{(1,): ((-1, 0), (-1, 0)), (2,): ((0, 1), (0, 1))},
selector_args=[("y", False)],
)
model.bounding_box = bbox
assert isinstance(model.bounding_box, CompoundBoundingBox)
# Get using argument not with_bbox
assert model.get_bounding_box(True) == bbox
# Get using with_bbox not argument
assert model.get_bounding_box((1,)) == ((-1, 0), (-1, 0))
assert model.get_bounding_box((2,)) == ((0, 1), (0, 1))
def test_compound_bounding_box():
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox1 = CompoundBoundingBox.validate(
model, {(1,): (-1, 0), (2,): (0, 1)}, selector_args=[("x", False)]
)
bbox2 = CompoundBoundingBox.validate(
model, {(-0.5,): (-1, 0), (0.5,): (0, 1)}, selector_args=[("x", False)]
)
# Using with_bounding_box to pass a selector
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=(1,)) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=(2,)) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
# Using argument value to pass bounding_box
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0, with_bounding_box=True)
model1 = models.Gaussian1D()
truth1 = models.Gaussian1D()
model2 = models.Const1D(2)
truth2 = models.Const1D(2)
model = model1 + model2
truth = truth1 + truth2
assert isinstance(model, CompoundModel)
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=1) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=2) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError, match=MESSAGE):
model(0, with_bounding_box=True)
def test_bind_bounding_box():
model = models.Polynomial2D(3)
bbox = ((-1, 1), (-2, 2))
bind_bounding_box(model, bbox)
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box["x"] == (-2, 2)
assert model.bounding_box["y"] == (-1, 1)
bind_bounding_box(model, bbox, order="F")
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box["x"] == (-1, 1)
assert model.bounding_box["y"] == (-2, 2)
def test_bind_compound_bounding_box_using_with_bounding_box_select():
"""
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit.
"""
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox = (0, 1)
MESSAGE = r"'tuple' object has no attribute 'items"
with pytest.raises(AttributeError, match=MESSAGE):
bind_compound_bounding_box(model, bbox, "x")
bbox = {0: (-1, 0), 1: (0, 1)}
bind_compound_bounding_box(model, bbox, [("x", False)])
# No bounding box
assert model(-0.5) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0) == truth(0)
assert model(1) == truth(1)
# `with_bounding_box` selects as `-0.5` will not be a key
assert model(-0.5, with_bounding_box=0) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=1))
# `with_bounding_box` selects as `0.5` will not be a key
assert model(0.5, with_bounding_box=1) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(0,)))
# Fall back onto implicit selector
assert model(0, with_bounding_box=True) == truth(0)
assert model(1, with_bounding_box=True) == truth(1)
# Attempt to fall-back on implicit selector, but no bounding_box
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0.5, with_bounding_box=True)
# Override implicit selector
assert np.isnan(model(1, with_bounding_box=0))
def test_fix_inputs_compound_bounding_box():
base_model = models.Gaussian2D(1, 2, 3, 4, 5)
bbox = {2.5: (-1, 1), 3.14: (-7, 3)}
model = fix_inputs(base_model, {"y": 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {"x": 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"y": 2.5}, bounding_boxes=bbox, selector_args=(("y", True),)
)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=(("x", True),)
)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=((0, True),)
)
assert model.bounding_box == (-1, 1)
base_model = models.Identity(4)
bbox = {(2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1))}
model = fix_inputs(base_model, {"x0": 2.5, "x1": 1.3}, bounding_boxes=bbox)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(
base_model,
{"x0": 2.5, "x1": 1.3},
bounding_boxes=bbox,
selector_args=(("x0", True), ("x1", True)),
)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(
base_model,
{"x0": 2.5, "x1": 1.3},
bounding_boxes=bbox,
selector_args=((0, True), (1, True)),
)
assert model.bounding_box == ((-1, 1), (-3, 3))
def test_model_copy_with_bounding_box():
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5)), order="F")
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_model_copy_with_compound_bounding_box():
model = models.Polynomial2D(2)
bbox = {(0,): (-0.5, 1047.5), (1,): (-0.5, 3047.5)}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("x", True)], order="F"
)
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(
model.bounding_box.selector_args
)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[
index
]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_compound_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("slit_id", True)], order="F"
)
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(
model.bounding_box.selector_args
)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[
index
]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_user_attribute():
"""Regression test for issue #12370"""
model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1)
model.xname = "x_mean" # user-defined attribute
assert hasattr(model, "xname")
assert model.xname == "x_mean"
model_copy = model.copy()
model_copy.xname
assert hasattr(model_copy, "xname")
assert model_copy.xname == "x_mean"
def test_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Gaussian2D()
bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order="F")
model.bounding_box = bbox
x = np.array([-0.5, 0.5])
y = 0
# Everything works when its all in the bounding box
assert (model(x, y) == (model(x, y, with_bounding_box=True))).all()
def test_compound_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
model.bounding_box = bbox
x = np.array([1000, 1001])
y = np.array([2000, 2001])
slit_id = 0
# Everything works when its all in the bounding box
value0 = model(x, y, slit_id)
value1 = model(x, y, slit_id, with_bounding_box=True)
assert_equal(value0, value1)
def test_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,))
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
def test_compound_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,)) | models.Shift(1)
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
def test_bounding_box_pass_with_ignored():
"""Test the possibility of setting ignored variables in bounding box"""
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=["y"])
model.bounding_box = bbox
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
model = models.Polynomial2D(2)
bind_bounding_box(model, (-1, 1), ignored=["y"])
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
def test_compound_bounding_box_pass_with_ignored():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = {
(0,): (-0.5, 1047.5),
(1,): (-0.5, 2047.5),
}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F"
)
model.bounding_box = cbbox
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bind_compound_bounding_box(
model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F"
)
assert model.bounding_box == cbbox
@pytest.mark.parametrize("int_type", [int, np.int32, np.int64, np.uint32, np.uint64])
def test_model_integer_indexing(int_type):
"""Regression for PR 12561; verify that compound model components
can be accessed by integer index"""
gauss = models.Gaussian2D()
airy = models.AiryDisk2D()
compound = gauss + airy
assert compound[int_type(0)] == gauss
assert compound[int_type(1)] == airy
def test_model_string_indexing():
"""Regression for PR 12561; verify that compound model components
can be accessed by indexing with model name"""
gauss = models.Gaussian2D()
gauss.name = "Model1"
airy = models.AiryDisk2D()
airy.name = "Model2"
compound = gauss + airy
assert compound["Model1"] == gauss
assert compound["Model2"] == airy
|
e985d4f8ee3a200c540e03ed389f862f5cecb185ee32f827c5731674c47da6c6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
# pylint: disable=invalid-name
import functools
import itertools
import unittest.mock as mk
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import FittableModel, Model
from astropy.modeling.parameters import (
InputParameterError,
Parameter,
_tofloat,
param_repr_oneline,
)
from astropy.utils.data import get_pkg_data_filename
from . import irafutil
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
def getter1(val):
return val
class SetterModel(FittableModel):
n_inputs = 2
n_outputs = 1
xc = Parameter(default=1, setter=setter1, getter=getter1)
yc = Parameter(default=1, setter=setter2, getter=getter1)
def do_something(self, v):
pass
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return (x - xc) ** 2 + (y - yc) ** 2
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name="alpha", default=42)
@staticmethod
def evaluate(*args):
pass
def test__tofloat():
# iterable
value = _tofloat([1, 2, 3])
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
value = _tofloat(np.array([1, 2, 3]))
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
MESSAGE = r"Parameter of .* could not be converted to float"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat("test")
# quantity
assert _tofloat(1 * u.m) == 1 * u.m
# dimensions/scalar array
value = _tofloat(np.asanyarray(3))
assert isinstance(value, float)
assert value == 3
# A regular number
value = _tofloat(3)
assert isinstance(value, float)
assert value == 3
value = _tofloat(3.0)
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float64(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int64(3))
assert isinstance(value, float)
assert value == 3
# boolean
MESSAGE = r"Expected parameter to be of numerical type, not boolean"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(True)
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(False)
# other
class Value:
pass
MESSAGE = r"Don't know how to convert parameter of .* to float"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(Value)
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
p = Parameter("alpha", default=1)
assert p.name == "alpha"
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = "beta"
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
par = Parameter("alpha", default=42)
num = 42.0
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par**val == num**val
assert val**par == val**num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
# Test inherited models
class M1(Model):
m1a = Parameter(default=1.0)
m1b = Parameter(default=5.0)
def evaluate():
pass
class M2(M1):
m2c = Parameter(default=11.0)
class M3(M2):
m3d = Parameter(default=20.0)
def test_parameter_inheritance():
mod = M3()
assert mod.m1a == 1.0
assert mod.m1b == 5.0
assert mod.m2c == 11.0
assert mod.m3d == 20.0
for key in ["m1a", "m1b", "m2c", "m3d"]:
assert key in mod.__dict__
assert mod.param_names == ("m1a", "m1b", "m2c", "m3d")
def test_param_metric():
mod = M3()
assert mod._param_metrics["m1a"]["slice"] == slice(0, 1)
assert mod._param_metrics["m1b"]["slice"] == slice(1, 2)
assert mod._param_metrics["m2c"]["slice"] == slice(2, 3)
assert mod._param_metrics["m3d"]["slice"] == slice(3, 4)
mod._parameters_to_array()
assert (mod._parameters == np.array([1.0, 5.0, 11.0, 20], dtype=np.float64)).all()
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gaussian model.
"""
test_file = get_pkg_data_filename("data/idcompspec.fits")
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields["order"])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30.0, 40.0, 50.0, 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3.0, 4.0, 5.0, 6.0, 7.0]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0.0, 0.0, 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0.0, 0.0, 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
tpar = self.model.parameters
tpar[0] = 10.0
self.model.parameters = tpar
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
MESSAGE = (
r"Input parameter values not compatible with the model parameters array: .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
MESSAGE = (
r"Value for parameter c0 does not match shape or size\nexpected by model .*"
r" vs .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
MESSAGE = (
r"Value for parameter amplitude does not match shape or size\nexpected by"
r" model .* vs .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
np.testing.assert_allclose(
new_model.parameters,
np.array(
[
4826.1066602783685,
952.8943813407858,
12.641236013982386,
-1.7910672553339604,
0.90252884366711317,
]
),
rtol=10 ** (-2),
)
def testPolynomial1D(self):
d = {"c0": 11, "c1": 12, "c2": 13, "c3": 14}
p1 = models.Polynomial1D(3, **d)
np.testing.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
np.testing.assert_equal(
p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
np.testing.assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
np.testing.assert_equal(
p1.parameters, [10.0, 10.0, 10.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
np.testing.assert_equal(
p1.parameters, [10.0, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
np.testing.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {
"c0_0": [2, 3],
"c1_0": [1, 2],
"c2_0": [4, 5],
"c0_1": [1, 1],
"c0_2": [2, 2],
"c1_1": [5, 5],
}
p2 = models.Polynomial2D(2, **kw)
np.testing.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5, 1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
np.testing.assert_array_equal(sc1.factor.value, [3, 3])
def test_bounds(self):
# Valid __init__
param = Parameter(bounds=(1, 2))
assert param.bounds == (1, 2)
param = Parameter(min=1, max=2)
assert param.bounds == (1, 2)
# Errors __init__
MESSAGE = r"bounds may not be specified simultaneously with min or max .*"
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), min=1, name="test")
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), max=2, name="test")
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), min=1, max=2, name="test")
# Setters
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.bounds == (None, None) == param._bounds
# Set errors
MESSAGE = "{} value must be a number or a Quantity"
with pytest.raises(TypeError, match=MESSAGE.format("Min")):
param.bounds = ("test", None)
with pytest.raises(TypeError, match=MESSAGE.format("Max")):
param.bounds = (None, "test")
# Set number
param.bounds = (1, 2)
assert param.bounds == (1, 2) == param._bounds
# Set Quantity
param.bounds = (1 * u.m, 2 * u.m)
assert param.bounds == (1, 2) == param._bounds
def test_modify_value(self):
param = Parameter(name="test", default=[1, 2, 3])
assert (param.value == [1, 2, 3]).all()
# Errors
MESSAGE = r"Slice assignment outside the parameter dimensions for 'test'"
with pytest.raises(InputParameterError, match=MESSAGE):
param[slice(0, 0)] = 2
MESSAGE = r"Input dimension 3 invalid for 'test' parameter with dimension 1"
with pytest.raises(InputParameterError, match=MESSAGE):
param[3] = np.array([5])
# assignment of a slice
param[slice(0, 2)] = [4, 5]
assert (param.value == [4, 5, 3]).all()
# assignment of a value
param[2] = 6
assert (param.value == [4, 5, 6]).all()
def test__set_unit(self):
param = Parameter(name="test", default=[1, 2, 3])
assert param.unit is None
# No force Error (no existing unit)
MESSAGE = r"Cannot attach units to parameters that were .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.m)
# Force
param._set_unit(u.m, True)
assert param.unit == u.m
# Force magnitude unit (mag=False)
MESSAGE = r"This parameter does not support the magnitude units such as .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.ABmag, True)
# Force magnitude unit (mag=True)
param._mag = True
param._set_unit(u.ABmag, True)
assert param._unit == u.ABmag
# No force Error (existing unit)
MESSAGE = r"Cannot change the unit attribute directly, instead change the .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.K)
def test_quantity(self):
param = Parameter(name="test", default=[1, 2, 3])
assert param.unit is None
assert param.quantity is None
param = Parameter(name="test", default=[1, 2, 3], unit=u.m)
assert param.unit == u.m
assert (param.quantity == np.array([1, 2, 3]) * u.m).all()
def test_shape(self):
# Array like
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.shape == (4,)
# Reshape error
MESSAGE = r"cannot reshape array of size 4 into shape .*"
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
# Reshape success
param.shape = (2, 2)
assert param.shape == (2, 2)
assert (param.value == [[1, 2], [3, 4]]).all()
# Scalar
param = Parameter(name="test", default=1)
assert param.shape == ()
# Reshape error
MESSAGE = r"Cannot assign this shape to a scalar quantity"
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
param.shape = (1,)
# single value
param = Parameter(name="test", default=np.array([1]))
assert param.shape == (1,)
# Reshape error
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
param.shape = ()
def test_size(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.size == 4
param = Parameter(name="test", default=[1])
assert param.size == 1
param = Parameter(name="test", default=1)
assert param.size == 1
def test_std(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.std is None
assert param._std is None
param.std = 5
assert param.std == 5 == param._std
def test_fixed(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.fixed is False
assert param._fixed is False
# Set error
MESSAGE = r"Value must be boolean"
with pytest.raises(ValueError, match=MESSAGE):
param.fixed = 3
# Set
param.fixed = True
assert param.fixed is True
assert param._fixed is True
def test_tied(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.tied is False
assert param._tied is False
# Set error
MESSAGE = r"Tied must be a callable or set to False or None"
with pytest.raises(TypeError, match=MESSAGE):
param.tied = mk.NonCallableMagicMock()
# Set None
param.tied = None
assert param.tied is None
assert param._tied is None
# Set False
param.tied = False
assert param.tied is False
assert param._tied is False
# Set other
tied = mk.MagicMock()
param.tied = tied
assert param.tied == tied == param._tied
def test_validator(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param._validator is None
valid = mk.MagicMock()
param.validator(valid)
assert param._validator == valid
MESSAGE = r"This decorator method expects a callable.*"
with pytest.raises(ValueError, match=MESSAGE):
param.validator(mk.NonCallableMagicMock())
def test_validate(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param._validator is None
assert param.model is None
# Run without validator
param.validate(mk.MagicMock())
# Run with validator but no Model
validator = mk.MagicMock()
param.validator(validator)
assert param._validator == validator
param.validate(mk.MagicMock())
assert validator.call_args_list == []
# Full validate
param._model = mk.MagicMock()
value = mk.MagicMock()
param.validate(value)
assert validator.call_args_list == [mk.call(param._model, value)]
def test_copy(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
copy_param = param.copy()
assert (param == copy_param).all()
assert id(param) != id(copy_param)
def test_model(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.model is None
assert param._model is None
assert param._model_required is False
assert (param._value == [1, 2, 3, 4]).all()
setter = mk.MagicMock()
getter = mk.MagicMock()
param._setter = setter
param._getter = getter
# No Model Required
param._value = [5, 6, 7, 8]
model0 = mk.MagicMock()
setter0 = mk.MagicMock()
getter0 = mk.MagicMock()
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter0, getter0]
) as mkCreate:
param.model = model0
assert param.model == model0 == param._model
assert param._setter == setter0
assert param._getter == getter0
assert mkCreate.call_args_list == [
mk.call(setter, model0),
mk.call(getter, model0),
]
assert param._value == [5, 6, 7, 8]
param._setter = setter
param._getter = getter
# Model required
param._model_required = True
model1 = mk.MagicMock()
setter1 = mk.MagicMock()
getter1 = mk.MagicMock()
setter1.return_value = np.array([9, 10, 11, 12])
getter1.return_value = np.array([9, 10, 11, 12])
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter1, getter1]
) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1),
]
assert (param.value == [9, 10, 11, 12]).all()
param._setter = setter
param._getter = getter
param._default = None
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter1, getter1]
) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1),
]
assert param._value is None
def test_value(self):
param = Parameter(name="test", default=1)
assert not isinstance(param.value, np.ndarray)
assert param.value == 1
param = Parameter(name="test", default=[1])
assert not isinstance(param.value, np.ndarray)
assert param.value == 1
param = Parameter(name="test", default=[[1]])
assert not isinstance(param.value, np.ndarray)
assert param.value == 1
param = Parameter(name="test", default=np.array([1]))
assert not isinstance(param.value, np.ndarray)
assert param.value == 1
param = Parameter(name="test", default=[1, 2, 3])
assert isinstance(param.value, np.ndarray)
assert (param.value == [1, 2, 3]).all()
param = Parameter(name="test", default=[1], setter=setter1, getter=getter1)
assert not isinstance(param.value, np.ndarray)
assert param.value == 1
param = Parameter(name="test", default=[[1]], setter=setter1, getter=getter1)
assert not isinstance(param.value, np.ndarray)
assert param.value == 1
param = Parameter(
name="test", default=np.array([1]), setter=setter1, getter=getter1
)
assert not isinstance(param.value, np.ndarray)
assert param.value == 1
def test_raw_value(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
# Normal case
assert (param._raw_value == param.value).all()
# Bad setter
param._setter = True
param._internal_value = 4
assert param._raw_value == 4
def test__create_value_wrapper(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
# Bad ufunc
MESSAGE = r"A numpy.ufunc used for Parameter getter/setter .*"
with pytest.raises(TypeError, match=MESSAGE):
param._create_value_wrapper(np.add, mk.MagicMock())
# Good ufunc
with mk.patch(
"astropy.modeling.parameters._wrap_ufunc", autospec=True
) as mkWrap:
assert (
param._create_value_wrapper(np.negative, mk.MagicMock())
== mkWrap.return_value
)
assert mkWrap.call_args_list == [mk.call(np.negative)]
# None
assert param._create_value_wrapper(None, mk.MagicMock()) is None
# wrapper with one argument
def wrapper1(a):
pass
assert param._create_value_wrapper(wrapper1, mk.MagicMock()) == wrapper1
# wrapper with two argument2
def wrapper2(a, b):
pass
# model is None
assert param._model_required is False
assert param._create_value_wrapper(wrapper2, None) == wrapper2
assert param._model_required is True
# model is not None
param._model_required = False
model = mk.MagicMock()
with mk.patch.object(functools, "partial", autospec=True) as mkPartial:
assert (
param._create_value_wrapper(wrapper2, model) == mkPartial.return_value
)
# wrapper with more than 2 arguments
def wrapper3(a, b, c):
pass
MESSAGE = r"Parameter getter/setter must be a function .*"
with pytest.raises(TypeError, match=MESSAGE):
param._create_value_wrapper(wrapper3, mk.MagicMock())
def test_bool(self):
# single value is true
param = Parameter(name="test", default=1)
assert param.value == 1
assert np.all(param)
if param:
assert True
else:
assert False
# single value is false
param = Parameter(name="test", default=0)
assert param.value == 0
assert not np.all(param)
if param:
assert False
else:
assert True
# vector value all true
param = Parameter(name="test", default=[1, 2, 3, 4])
assert np.all(param.value == [1, 2, 3, 4])
assert np.all(param)
if param:
assert True
else:
assert False
# vector value at least one false
param = Parameter(name="test", default=[1, 2, 0, 3, 4])
assert np.all(param.value == [1, 2, 0, 3, 4])
assert not np.all(param)
if param:
assert False
else:
assert True
def test_param_repr_oneline(self):
# Single value no units
param = Parameter(name="test", default=1)
assert param_repr_oneline(param) == "1."
# Vector value no units
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param_repr_oneline(param) == "[1., 2., 3., 4.]"
# Single value units
param = Parameter(name="test", default=1 * u.m)
assert param_repr_oneline(param) == "1. m"
# Vector value units
param = Parameter(name="test", default=[1, 2, 3, 4] * u.m)
assert param_repr_oneline(param) == "[1., 2., 3., 4.] m"
def test_getter_setter(self):
msg = "setter and getter must both be input"
with pytest.raises(ValueError, match=msg):
Parameter(name="test", default=1, getter=getter1)
with pytest.raises(ValueError, match=msg):
Parameter(name="test", default=1, setter=setter1)
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, 0.1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D(
[12, 10], [3.5, 5.2], stddev=[0.4, 0.7], n_models=2
)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array(
[
[1.0, 10],
[3.5, 5.2],
[0.4, 0.7],
]
),
)
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array(
[
[11.0, 10],
[3.5, 5.2],
[0.4, 0.7],
]
),
)
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13.0, 10.0])
np.testing.assert_almost_equal(self.gmodel.mean.value, [9.0, 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
# Not broadcastable
TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array(
[
[10, 20],
[30, 40],
[50, 60],
]
)
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(
t2.param_sets
== [
[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]],
]
)
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60, 1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(
t.param_sets[0]
== [
[[10, 20, 30], [40, 50, 60]],
]
)
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(
("p1", "p2"),
[
(1, 2),
(1, [2, 3]),
([1, 2], 3),
([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5]),
],
)
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError, match=r".*"):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize(
"kwargs",
[
{"n_models": 2},
{"model_set_axis": 0},
{"n_models": 2, "model_set_axis": 0},
],
)
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
@pytest.mark.parametrize(
"kwargs",
[
{"n_models": 2},
{"model_set_axis": 0},
{"n_models": 2, "model_set_axis": 0},
],
)
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2, 2)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(
t.param_sets
== [
[[10, 20], [30, 40]],
[[1, 2], [3, 4]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
t2 = TParModel([[10, 20, 30], [40, 50, 60]], [[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(
t2.param_sets
== [
[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]],
]
)
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
def test_two_model_mixed_dimension_array_parameters(self):
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
# Can't broadcast different array shapes
TParModel(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]],
n_models=2,
)
t = TParModel(
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[1, 2], [3, 4]], n_models=2
)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]], [[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
def test_two_model_2d_array_parameters(self):
t = TParModel(
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
n_models=2,
)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
]
)
assert np.all(
t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4, 5, 6, 7, 8]
)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(
t.param_sets[0]
== [
[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]],
]
)
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(
t.parameters
== [10, 50, 20, 60, 30, 70, 30, 70, 40, 80, 50, 90, 1, 3, 2, 4, 3, 5]
)
assert t.coeff.shape == (2, 3, 2) # note change in api
assert t.e.shape == (3, 2) # note change in api
def test_wrong_number_of_params(self):
MESSAGE = r"Inconsistent dimensions for parameter .* for 2 model sets.*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
MESSAGE = r"All parameter values must be arrays of dimension at .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
MESSAGE = r"All parameter values must be arrays of dimension at .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
MESSAGE = r"Inconsistent dimensions for parameter .* for 2 model sets.*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11), model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
MESSAGE = (
r"Parameter '.*' of shape .* cannot be broadcast with parameter '.*' of"
r" shape .*"
)
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError, match=MESSAGE):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(xc=-1, yc=3, p=np.pi)
for x, y in pars:
np.testing.assert_almost_equal(model(x, y), (x + 1) ** 2 + (y - np.pi * 3) ** 2)
|
658d5b3785ae577ca62908d64dcaef5eef2208e17cc9e840793bf00f035f8f08 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import locale
import os
import urllib.error
from datetime import datetime
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils import data, misc
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_isiterable():
assert misc.isiterable(2) is False
assert misc.isiterable([2]) is True
assert misc.isiterable([1, 2, 3]) is True
assert misc.isiterable(np.array(2)) is False
assert misc.isiterable(np.array([1, 2, 3])) is True
def test_signal_number_to_name_no_failure():
# Regression test for #5340: ensure signal_number_to_name throws no
# AttributeError (it used ".iteritems()" which was removed in Python3).
misc.signal_number_to_name(0)
@pytest.mark.remote_data
def test_api_lookup():
try:
strurl = misc.find_api_page("astropy.utils.misc", "dev", False, timeout=5)
objurl = misc.find_api_page(misc, "dev", False, timeout=5)
except urllib.error.URLError:
if os.environ.get("CI", False):
pytest.xfail("Timed out in CI")
else:
raise
assert strurl == objurl
assert (
strurl
== "http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc"
)
# Try a non-dev version
objurl = misc.find_api_page(misc, "v3.2.1", False, timeout=3)
assert (
objurl
== "https://docs.astropy.org/en/v3.2.1/utils/index.html#module-astropy.utils.misc"
)
def test_is_path_hidden_deprecation():
with pytest.warns(
AstropyDeprecationWarning, match="^The is_path_hidden function is deprecated"
):
misc.is_path_hidden("data")
# This is the only test that uses astropy/utils/tests/data/.hidden_file.txt
def test_skip_hidden():
path = data.get_pkg_data_path("data")
for root, dirs, files in os.walk(path):
assert ".hidden_file.txt" in files
assert "local.dat" in files
# break after the first level since the data dir contains some other
# subdirectories that don't have these files
break
with pytest.warns(
AstropyDeprecationWarning, match="^The walk_skip_hidden function is deprecated"
):
for root, dirs, files in misc.walk_skip_hidden(path):
assert ".hidden_file.txt" not in files
assert "local.dat" in files
break
def test_JsonCustomEncoder():
from astropy import units as u
assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == "[0, 1, 2]"
assert json.dumps(1 + 2j, cls=misc.JsonCustomEncoder) == "[1.0, 2.0]"
assert json.dumps({1, 2, 1}, cls=misc.JsonCustomEncoder) == "[1, 2]"
assert (
json.dumps(b"hello world \xc3\x85", cls=misc.JsonCustomEncoder)
== '"hello world \\u00c5"'
)
assert json.dumps({1: 2}, cls=misc.JsonCustomEncoder) == '{"1": 2}' # default
assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}'
# Quantities
tmp = json.dumps({"a": 5 * u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp)
tmpd = {"a": {"unit": "cm", "value": 5.0}}
assert newd == tmpd
tmp2 = json.dumps({"a": np.arange(2) * u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp2)
tmpd = {"a": {"unit": "cm", "value": [0.0, 1.0]}}
assert newd == tmpd
tmp3 = json.dumps({"a": np.arange(2) * u.erg / u.s}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp3)
tmpd = {"a": {"unit": "erg / s", "value": [0.0, 1.0]}}
assert newd == tmpd
def test_JsonCustomEncoder_FITS_rec_from_files():
with fits.open(
fits.util.get_testdata_filepath("variable_length_table.fits")
) as hdul:
assert (
json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder)
== "[[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]"
)
with fits.open(fits.util.get_testdata_filepath("btable.fits")) as hdul:
assert (
json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder)
== '[[1, "Sirius", -1.4500000476837158, "A1V"], '
'[2, "Canopus", -0.7300000190734863, "F0Ib"], '
'[3, "Rigil Kent", -0.10000000149011612, "G2V"]]'
)
with fits.open(fits.util.get_testdata_filepath("table.fits")) as hdul:
assert (
json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder)
== '[["NGC1001", 11.100000381469727], '
'["NGC1002", 12.300000190734863], '
'["NGC1003", 15.199999809265137]]'
)
def test_set_locale():
# First, test if the required locales are available
current = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, "en_US.utf8")
locale.setlocale(locale.LC_ALL, "fr_FR.utf8")
except locale.Error as e:
pytest.skip(f"Locale error: {e}")
finally:
locale.setlocale(locale.LC_ALL, current)
date = datetime(2000, 10, 1, 0, 0, 0)
day_mon = date.strftime("%a, %b")
with misc._set_locale("en_US.utf8"):
assert date.strftime("%a, %b") == "Sun, Oct"
with misc._set_locale("fr_FR.utf8"):
assert date.strftime("%a, %b") == "dim., oct."
# Back to original
assert date.strftime("%a, %b") == day_mon
with misc._set_locale(current):
assert date.strftime("%a, %b") == day_mon
def test_dtype_bytes_or_chars():
assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8
assert misc.dtype_bytes_or_chars(np.dtype(object)) is None
assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4
assert misc.dtype_bytes_or_chars(np.array(b"12345").dtype) == 5
assert misc.dtype_bytes_or_chars(np.array("12345").dtype) == 5
|
527506d43d0af781f2c202b3882a95676513d9546bc327634d14954ee65a3541 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import base64
import contextlib
import errno
import hashlib
import io
import itertools
import os
import pathlib
import platform
import random
import shutil
import stat
import sys
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import warnings
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
import astropy.utils.data
from astropy import units as _u # u is taken
from astropy.config import paths
from astropy.utils.data import (
CacheDamaged,
CacheMissingWarning,
_deltemps,
_get_download_cache_loc,
_tempfilestodel,
cache_contents,
cache_total_size,
check_download_cache,
check_free_space_in_dir,
clear_download_cache,
compute_hash,
conf,
download_file,
download_files_in_parallel,
export_download_cache,
get_cached_urls,
get_file_contents,
get_free_space_in_dir,
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_fileobj,
get_pkg_data_path,
get_readable_fileobj,
import_download_cache,
import_file_to_cache,
is_url,
is_url_in_cache,
)
from astropy.utils.exceptions import AstropyWarning
CI = os.environ.get("CI", "false") == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "w") as f:
f.write("some contents\n")
try:
with open(f1):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmp_path):
def _valid_urls(tmp_path):
for i in itertools.count():
c = os.urandom(16).hex()
fn = tmp_path / f"valid_{i}"
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmp_path)
@pytest.fixture
def invalid_urls(tmp_path):
def _invalid_urls(tmp_path):
for i in itertools.count():
fn = tmp_path / f"invalid_{i}"
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmp_path)
@pytest.fixture
def temp_cache(tmp_path):
with paths.set_temp_cache(tmp_path):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmp_path, valid_urls):
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmp_path, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM, "os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM, "os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM, "_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(
astropy.utils.data, "_SafeTemporaryDirectory", no_TemporaryDirectory
)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://" + "a" * 256 + ".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmp_path):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmp_path):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmp_path):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel
):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel(
[u for (u, c, c_bad) in urls], cache=True, sources=sources
)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True), [u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r_, (u, c) in zip(r, urls):
assert get_file_contents(r_) == c
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u), [u for (u, c) in urls]))
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls
):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r_, u in zip(r, urls):
if u in contents:
assert get_file_contents(r_) == contents[u]
else:
assert r_ is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(
temp_cache, tmp_path
):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=tmp_path, delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmp_path, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmp_path / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmp_path, temp_cache):
with TemporaryDirectory(dir=tmp_path) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {"cafile": None, "capath": "/does/not/exist"}
msg = f"Verification of TLS/SSL certificate at {TESTURL_SSL} failed"
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(
TESTURL_SSL, cache=False, ssl_context=ssl_context, allow_insecure=True
)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url + s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all(os.path.isfile(f) for f in fnout), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmp_path, valid_urls, method):
urls = []
# tmp_path is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmp_path):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = {u for (u, c) in urls}
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r_, (_, c) in zip(r, td):
assert get_file_contents(r_) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
@pytest.mark.slow
def test_download_parallel_partial_success_lock_safe(
temp_cache, valid_urls, invalid_urls
):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmp_path):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for fn, u, c in td:
c_plus = f"{c} updated"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
"filename", ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if (not HAS_BZ2 and "bz2" in filename) or (not HAS_LZMA and "xz" in filename):
with pytest.raises(ValueError, match=r" format files are not supported"):
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmp_path):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmp_path / request.param
filename = str(datafile)
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write_bytes(contents)
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(
ModuleNotFoundError, match=r"does not provide the [lb]z[2m]a? module\."
):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmp_path):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmp_path / "tmp.dat"
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname="astropy")
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
partial_warn_msgs = ["remote data cache could not be accessed", "temporary file"]
if n_warns == 4:
partial_warn_msgs.extend(["socket", "socket"])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert (
len(partial_warn_msgs) == 0
), f"Got some unexpected warnings: {partial_warn_msgs}"
assert n_warns in (2, 4), f"Expected 2 or 4 warnings, got {n_warns}"
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(
CacheMissingWarning, match=r".*Not clearing data cache - cache inaccessible.*"
):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
"filename",
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
# fmt: off
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0\xd7\x95"
b"\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
# fmt: on
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmp_path, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = tmp_path / "the.zip"
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmp_path, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmp_path, temp_cache, valid_urls):
zip_file_name = tmp_path / "the.zip"
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmp_path):
fn = tmp_path / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding="binary") == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding="binary") != c
def test_export_import_roundtrip_different_location(tmp_path, valid_urls):
original_cache = tmp_path / "original"
original_cache.mkdir()
zip_file_name = tmp_path / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = {u for (u, c) in urls}
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmp_path / "new"
new_cache.mkdir()
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for u, c in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for u, c, h in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize("desired_size", [1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmp_path, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(tmp_path, desired_size)
def test_get_free_space_file_directory(tmp_path):
fn = tmp_path / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(fn)
free_space = get_free_space_in_dir(tmp_path)
assert free_space > 0 and not hasattr(free_space, "unit")
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(tmp_path, unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(tmp_path, unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmp_path):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmp_path))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmp_path):
fn = str(tmp_path / "file")
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "w") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "w") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "w") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == {bf1, bf2, bf3, bf4}
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmp_path, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = tmp_path / "file"
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = tmp_path / "astropy"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download" / "url"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn) as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmp_path, valid_urls):
u, c = next(valid_urls)
d1 = tmp_path / "1"
d2 = tmp_path / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmp_path, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmp_path))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = list(tmp_path.iterdir())
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert (
f.read().rstrip()
== "This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type("MockOpener", (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmp_path):
try:
with readonly_dir(tmp_path):
assert is_dir_readonly(tmp_path)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmp_path):
fn = tmp_path / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmp_path):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_import_file_cache_invalid_cross_device_link(tmp_path, monkeypatch):
def no_rename(path, mode=None):
if os.path.exists(path):
raise OSError(errno.EXDEV, "os.rename monkeypatched out")
else:
raise FileNotFoundError(f"File {path} does not exist.")
monkeypatch.setattr(os, "rename", no_rename)
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.warns(AstropyWarning, match="os.rename monkeypatched out"):
import_file_to_cache(url, filename, remove_original=True, replace=True)
assert is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW + 1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW + 1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)], pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp("allow_internet", False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), "url"))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f) + "/")
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
def test_clear_download_cache_invalid_cross_device_link(
temp_cache, valid_urls, monkeypatch
):
def no_rename(path, mode=None):
raise OSError(errno.EXDEV, "os.rename monkeypatched out")
u, c = next(valid_urls)
download_file(u, cache=True)
monkeypatch.setattr(os, "rename", no_rename)
assert is_url_in_cache(u)
with pytest.warns(AstropyWarning, match="os.rename monkeypatched out"):
clear_download_cache(u)
assert not is_url_in_cache(u)
def test_clear_download_cache_raises_os_error(temp_cache, valid_urls, monkeypatch):
def no_rename(path, mode=None):
raise OSError(errno.EBUSY, "os.rename monkeypatched out")
u, c = next(valid_urls)
download_file(u, cache=True)
monkeypatch.setattr(os, "rename", no_rename)
assert is_url_in_cache(u)
with pytest.warns(CacheMissingWarning, match="os.rename monkeypatched out"):
clear_download_cache(u)
@pytest.mark.skipif(
CI and os.environ.get("IS_CRON", "false") == "false",
reason="Flaky/too much external traffic for regular CI",
)
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
"""Test that download automatically enables TLS/SSL when required"""
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.daily"
download_file(url)
@pytest.mark.parametrize("base", ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file("file://", cache=True, sources=[u])
assert not is_url_in_cache("file:///")
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = [
"Name or service not known",
"nodename nor servname provided, or not known",
"getaddrinfo failed",
"Temporary failure in name resolution",
"No address associated with hostname",
]
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
("s", "ans"),
[
("http://googlecom", True),
("https://google.com", True),
("ftp://google.com", True),
("sftp://google.com", True),
("ssh://google.com", True),
("file:///c:/path/to/the%20file.txt", True),
("google.com", False),
("C:\\\\path\\\\file.docx", False),
("data://file", False),
],
)
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
3a257b7e1494a5f3e13bfdf08f882ff4529248bb43197f683ece59f56f93c169 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import concurrent.futures
import inspect
import pickle
import pytest
from astropy.utils.decorators import (
classproperty,
deprecated,
deprecated_attribute,
deprecated_renamed_argument,
format_doc,
lazyproperty,
sharedmethod,
)
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyPendingDeprecationWarning,
AstropyUserWarning,
)
class NewDeprecationWarning(AstropyDeprecationWarning):
"""
New Warning subclass to be used to test the deprecated decorator's
``warning_type`` parameter.
"""
def test_deprecated_attribute():
class DummyClass:
def __init__(self):
self.other = [42]
self._foo = 42
self._bar = 4242
self._message = "42"
self._pending = {42}
foo = deprecated_attribute("foo", "0.2")
bar = deprecated_attribute("bar", "0.2", warning_type=NewDeprecationWarning)
alternative = deprecated_attribute("alternative", "0.2", alternative="other")
message = deprecated_attribute("message", "0.2", message="MSG")
pending = deprecated_attribute("pending", "0.2", pending=True)
dummy = DummyClass()
default_msg = (
r"^The {} attribute is deprecated and may be removed in a future version\.$"
)
# Test getters and setters.
msg = default_msg.format("foo")
with pytest.warns(AstropyDeprecationWarning, match=msg) as w:
assert dummy.foo == 42
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning, match=msg):
dummy.foo = 24
# Handling ``_foo`` should not cause deprecation warnings.
assert dummy._foo == 24
dummy._foo = 13
assert dummy._foo == 13
msg = default_msg.format("bar")
with pytest.warns(NewDeprecationWarning, match=msg) as w:
assert dummy.bar == 4242
assert len(w) == 1
with pytest.warns(NewDeprecationWarning, match=msg):
dummy.bar = 2424
with pytest.warns(AstropyDeprecationWarning, match="^MSG$"):
assert dummy.message == "42"
with pytest.warns(AstropyDeprecationWarning, match="^MSG$"):
dummy.message = "24"
msg = default_msg.format("alternative")[:-1] + r"\n Use other instead\.$"
with pytest.warns(AstropyDeprecationWarning, match=msg):
assert dummy.alternative == [42]
with pytest.warns(AstropyDeprecationWarning, match=msg):
dummy.alternative = [24]
# ``other`` is not deprecated.
assert dummy.other == [24]
dummy.other = [31]
msg = r"^The pending attribute will be deprecated in a future version\.$"
with pytest.warns(AstropyPendingDeprecationWarning, match=msg):
assert dummy.pending == {42}
with pytest.warns(AstropyPendingDeprecationWarning, match=msg):
dummy.pending = {24}
# This needs to be defined outside of the test function, because we
# want to try to pickle it.
@deprecated("100.0")
class TA:
"""
This is the class docstring.
"""
def __init__(self):
"""
This is the __init__ docstring
"""
pass
class TMeta(type):
metaclass_attr = 1
@deprecated("100.0")
class TB(metaclass=TMeta):
pass
@deprecated("100.0", warning_type=NewDeprecationWarning)
class TC:
"""
This class has the custom warning.
"""
pass
def test_deprecated_class():
orig_A = TA.__bases__[0]
# The only thing that should be different about the new class
# is __doc__, __init__, __bases__ and __subclasshook__.
# and __init_subclass__ for Python 3.6+.
for x in dir(orig_A):
if x not in (
"__doc__",
"__init__",
"__bases__",
"__dict__",
"__subclasshook__",
"__init_subclass__",
):
assert getattr(TA, x) == getattr(orig_A, x)
with pytest.warns(AstropyDeprecationWarning) as w:
TA()
assert len(w) == 1
if TA.__doc__ is not None:
assert "function" not in TA.__doc__
assert "deprecated" in TA.__doc__
assert "function" not in TA.__init__.__doc__
assert "deprecated" in TA.__init__.__doc__
# Make sure the object is picklable
pickle.dumps(TA)
with pytest.warns(NewDeprecationWarning) as w:
TC()
assert len(w) == 1
def test_deprecated_class_with_new_method():
"""
Test that a class with __new__ method still works even if it accepts
additional arguments.
This previously failed because the deprecated decorator would wrap objects
__init__ which takes no arguments.
"""
@deprecated("1.0")
class A:
def __new__(cls, a):
return super().__new__(cls)
# Creating an instance should work but raise a DeprecationWarning
with pytest.warns(AstropyDeprecationWarning) as w:
A(1)
assert len(w) == 1
@deprecated("1.0")
class B:
def __new__(cls, a):
return super().__new__(cls)
def __init__(self, a):
pass
# Creating an instance should work but raise a DeprecationWarning
with pytest.warns(AstropyDeprecationWarning) as w:
B(1)
assert len(w) == 1
def test_deprecated_class_with_super():
"""
Regression test for an issue where classes that used ``super()`` in their
``__init__`` did not actually call the correct class's ``__init__`` in the
MRO.
"""
@deprecated("100.0")
class TB:
def __init__(self, a, b):
super().__init__()
with pytest.warns(AstropyDeprecationWarning) as w:
TB(1, 2)
assert len(w) == 1
if TB.__doc__ is not None:
assert "function" not in TB.__doc__
assert "deprecated" in TB.__doc__
assert "function" not in TB.__init__.__doc__
assert "deprecated" in TB.__init__.__doc__
def test_deprecated_class_with_custom_metaclass():
"""
Regression test for an issue where deprecating a class with a metaclass
other than type did not restore the metaclass properly.
"""
with pytest.warns(AstropyDeprecationWarning) as w:
TB()
assert len(w) == 1
assert type(TB) is TMeta
assert TB.metaclass_attr == 1
def test_deprecated_static_and_classmethod():
"""
Regression test for issue introduced by
https://github.com/astropy/astropy/pull/2811 and mentioned also here:
https://github.com/astropy/astropy/pull/2580#issuecomment-51049969
where it appears that deprecated staticmethods didn't work on Python 2.6.
"""
class A:
"""Docstring"""
@deprecated("1.0")
@staticmethod
def B():
pass
@deprecated("1.0")
@classmethod
def C(cls):
pass
with pytest.warns(AstropyDeprecationWarning) as w:
A.B()
assert len(w) == 1
if A.__doc__ is not None:
assert "deprecated" in A.B.__doc__
with pytest.warns(AstropyDeprecationWarning) as w:
A.C()
assert len(w) == 1
if A.__doc__ is not None:
assert "deprecated" in A.C.__doc__
def test_deprecated_argument():
# Tests the decorator with function, method, staticmethod and classmethod.
class Test:
@classmethod
@deprecated_renamed_argument("clobber", "overwrite", "1.3")
def test1(cls, overwrite):
return overwrite
@staticmethod
@deprecated_renamed_argument("clobber", "overwrite", "1.3")
def test2(overwrite):
return overwrite
@deprecated_renamed_argument("clobber", "overwrite", "1.3")
def test3(self, overwrite):
return overwrite
@deprecated_renamed_argument(
"clobber", "overwrite", "1.3", warning_type=NewDeprecationWarning
)
def test4(self, overwrite):
return overwrite
@deprecated_renamed_argument("clobber", "overwrite", "1.3", relax=False)
def test1(overwrite):
return overwrite
for method in [Test().test1, Test().test2, Test().test3, Test().test4, test1]:
# As positional argument only
assert method(1) == 1
# As new keyword argument
assert method(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert method(clobber=1) == 1
assert len(w) == 1
assert "test_decorators.py" in str(w[0].filename)
if method.__name__ == "test4":
assert issubclass(w[0].category, NewDeprecationWarning)
# Using both. Both keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
method(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
method(1, clobber=2)
def test_deprecated_argument_custom_message():
@deprecated_renamed_argument("foo", "bar", "4.0", message="Custom msg")
def test(bar=0):
pass
with pytest.warns(AstropyDeprecationWarning, match="Custom msg"):
test(foo=0)
def test_deprecated_argument_in_kwargs():
# To rename an argument that is consumed by "kwargs" the "arg_in_kwargs"
# parameter is used.
@deprecated_renamed_argument("clobber", "overwrite", "1.3", arg_in_kwargs=True)
def test(**kwargs):
return kwargs["overwrite"]
# As positional argument only
with pytest.raises(TypeError):
test(1)
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert test(clobber=1) == 1
assert len(w) == 1
assert "test_decorators.py" in str(w[0].filename)
# Using both. Both keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
test(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
test(1, clobber=2)
def test_deprecated_argument_relaxed():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument("clobber", "overwrite", "1.3", relax=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert test(clobber=1) == 1
assert len(w) == 1
# Using both. Both keyword
with pytest.warns(AstropyUserWarning) as w:
assert test(clobber=2, overwrite=1) == 1
assert len(w) == 2
assert '"clobber" was deprecated' in str(w[0].message)
assert '"clobber" and "overwrite" keywords were set' in str(w[1].message)
# One positional, one keyword
with pytest.warns(AstropyUserWarning) as w:
assert test(1, clobber=2) == 1
assert len(w) == 2
assert '"clobber" was deprecated' in str(w[0].message)
assert '"clobber" and "overwrite" keywords were set' in str(w[1].message)
def test_deprecated_argument_pending():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument("clobber", "overwrite", "1.3", pending=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
assert test(clobber=1) == 1
# Using both. Both keyword
assert test(clobber=2, overwrite=1) == 1
# One positional, one keyword
assert test(1, clobber=2) == 1
def test_deprecated_argument_multi_deprecation():
@deprecated_renamed_argument(
["x", "y", "z"], ["a", "b", "c"], [1.3, 1.2, 1.3], relax=True
)
def test(a, b, c):
return a, b, c
with pytest.warns(AstropyDeprecationWarning) as w:
assert test(x=1, y=2, z=3) == (1, 2, 3)
assert len(w) == 3
# Make sure relax is valid for all arguments
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
assert len(w) == 4
def test_deprecated_argument_multi_deprecation_2():
@deprecated_renamed_argument(
["x", "y", "z"], ["a", "b", "c"], [1.3, 1.2, 1.3], relax=[True, True, False]
)
def test(a, b, c):
return a, b, c
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 4
with pytest.raises(TypeError), pytest.warns(AstropyUserWarning):
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
def test_deprecated_argument_not_allowed_use():
# If the argument is supposed to be inside the kwargs one needs to set the
# arg_in_kwargs parameter. Without it it raises a TypeError.
with pytest.raises(TypeError):
@deprecated_renamed_argument("clobber", "overwrite", "1.3")
def test1(**kwargs):
return kwargs["overwrite"]
# Cannot replace "*args".
with pytest.raises(TypeError):
@deprecated_renamed_argument("overwrite", "args", "1.3")
def test2(*args):
return args
# Cannot replace "**kwargs".
with pytest.raises(TypeError):
@deprecated_renamed_argument("overwrite", "kwargs", "1.3")
def test3(**kwargs):
return kwargs
def test_deprecated_argument_remove():
@deprecated_renamed_argument("x", None, "2.0", alternative="astropy.y")
def test(dummy=11, x=3):
return dummy, x
with pytest.warns(AstropyDeprecationWarning, match=r"Use astropy\.y instead") as w:
assert test(x=1) == (11, 1)
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning) as w:
assert test(x=1, dummy=10) == (10, 1)
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning, match=r"Use astropy\.y instead"):
test(121, 1) == (121, 1) # noqa: B015
assert test() == (11, 3)
assert test(121) == (121, 3)
assert test(dummy=121) == (121, 3)
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5
def test_classproperty_docstring():
"""
Tests that the docstring is set correctly on classproperties.
This failed previously due to a bug in Python that didn't always
set __doc__ properly on instances of property subclasses.
"""
class A:
# Inherits docstring from getter
@classproperty
def foo(cls):
"""The foo."""
return 1
assert A.__dict__["foo"].__doc__ == "The foo."
class B:
# Use doc passed to classproperty constructor
def _get_foo(cls):
return 1
foo = classproperty(_get_foo, doc="The foo.")
assert B.__dict__["foo"].__doc__ == "The foo."
@pytest.mark.slow
def test_classproperty_lazy_threadsafe(fast_thread_switching):
"""
Test that a class property with lazy=True is thread-safe.
"""
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
# This is testing for race conditions, so try many times in the
# hope that we'll get the timing right.
for p in range(10000):
class A:
@classproperty(lazy=True)
def foo(cls):
nonlocal calls
calls += 1
return object()
# Have all worker threads query in parallel
calls = 0
futures = [executor.submit(lambda: A.foo) for i in range(workers)]
# Check that only one call happened and they all received it
values = [future.result() for future in futures]
assert calls == 1
assert values[0] is not None
assert values == [values[0]] * workers
@pytest.mark.slow
def test_lazyproperty_threadsafe(fast_thread_switching):
"""
Test thread safety of lazyproperty.
"""
# This test is generally similar to test_classproperty_lazy_threadsafe
# above. See there for comments.
class A:
def __init__(self):
self.calls = 0
@lazyproperty
def foo(self):
self.calls += 1
return object()
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
a = A()
futures = [executor.submit(lambda: a.foo) for i in range(workers)]
values = [future.result() for future in futures]
assert a.calls == 1
assert a.foo is not None
assert values == [a.foo] * workers
def test_format_doc_stringInput_simple():
# Simple tests with string input
docstring_fail = ""
# Raises an valueerror if input is empty
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
docstring = "test"
# A first test that replaces an empty docstring
@format_doc(docstring)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == docstring
# Test that it replaces an existing docstring
@format_doc(docstring)
def testfunc_2():
"""not test"""
pass
assert inspect.getdoc(testfunc_2) == docstring
def test_format_doc_stringInput_format():
# Tests with string input and formatting
docstring = "yes {0} no {opt}"
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc1():
pass
# Test that the formatting is done right
@format_doc(docstring, "/", opt="= life")
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == "yes / no = life"
# Test that we can include the original docstring
docstring2 = "yes {0} no {__doc__}"
@format_doc(docstring2, "/")
def testfunc3():
"""= 2 / 2 * life"""
pass
assert inspect.getdoc(testfunc3) == "yes / no = 2 / 2 * life"
def test_format_doc_objectInput_simple():
# Simple tests with object input
def docstring_fail():
pass
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
def docstring0():
"""test"""
pass
# A first test that replaces an empty docstring
@format_doc(docstring0)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == inspect.getdoc(docstring0)
# Test that it replaces an existing docstring
@format_doc(docstring0)
def testfunc_2():
"""not test"""
pass
assert inspect.getdoc(testfunc_2) == inspect.getdoc(docstring0)
def test_format_doc_objectInput_format():
# Tests with object input and formatting
def docstring():
"""test {0} test {opt}"""
pass
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc_fail():
pass
# Test that the formatting is done right
@format_doc(docstring, "+", opt="= 2 * test")
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == "test + test = 2 * test"
# Test that we can include the original docstring
def docstring2():
"""test {0} test {__doc__}"""
pass
@format_doc(docstring2, "+")
def testfunc3():
"""= 4 / 2 * test"""
pass
assert inspect.getdoc(testfunc3) == "test + test = 4 / 2 * test"
def test_format_doc_selfInput_simple():
# Simple tests with self input
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(None)
def testfunc_fail():
pass
# Test that it keeps an existing docstring
@format_doc(None)
def testfunc_1():
"""not test"""
pass
assert inspect.getdoc(testfunc_1) == "not test"
def test_format_doc_selfInput_format():
# Tests with string input which is '__doc__' (special case) and formatting
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(None)
def testfunc_fail():
"""dum {0} dum {opt}"""
pass
# Test that the formatting is done right
@format_doc(None, "di", opt="da dum")
def testfunc1():
"""dum {0} dum {opt}"""
pass
assert inspect.getdoc(testfunc1) == "dum di dum da dum"
# Test that we cannot recursively insert the original documentation
@format_doc(None, "di")
def testfunc2():
"""dum {0} dum {__doc__}"""
pass
assert inspect.getdoc(testfunc2) == "dum di dum "
def test_format_doc_onMethod():
# Check if the decorator works on methods too, to spice it up we try double
# decorator
docstring = "what we do {__doc__}"
class TestClass:
@format_doc(docstring)
@format_doc(None, "strange.")
def test_method(self):
"""is {0}"""
pass
assert inspect.getdoc(TestClass.test_method) == "what we do is strange."
def test_format_doc_onClass():
# Check if the decorator works on classes too
docstring = "what we do {__doc__} {0}{opt}"
@format_doc(docstring, "strange", opt=".")
class TestClass:
"""is"""
pass
assert inspect.getdoc(TestClass) == "what we do is strange."
|
8ecb93b205bcb16e6e222fc1470a09abdc047f4ec4cb2f2804e7b0b0c9907173 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
Generally, any new predefined class should override the
``from_unmasked(data, mask, copy=False)`` class method that
creates an instance from unmasked data and a mask, as well as
the ``unmasked`` property that returns just the data.
The `Masked` class itself provides a base ``mask`` property,
which can also be overridden if needed.
"""
import builtins
import numpy as np
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.shapes import NDArrayShapeMethods
from .function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
__all__ = ["Masked", "MaskedNDArray"]
get__doc__ = """Masked version of {0.__name__}.
Except for the ability to pass in a ``mask``, parameters are
as for `{0.__module__}.{0.__name__}`.
""".format
class Masked(NDArrayShapeMethods):
"""A scalar value or array of values with associated mask.
The resulting instance will take its exact type from whatever the
contents are, with the type generated on the fly as needed.
Parameters
----------
data : array-like
The data for which a mask is to be added. The result will be a
a subclass of the type of ``data``.
mask : array-like of bool, optional
The initial mask to assign. If not given, taken from the data.
copy : bool
Whether the data and mask should be copied. Default: `False`.
"""
_base_classes = {}
"""Explicitly defined masked classes keyed by their unmasked counterparts.
For subclasses of these unmasked classes, masked counterparts can be generated.
"""
_masked_classes = {}
"""Masked classes keyed by their unmasked data counterparts."""
def __new__(cls, *args, **kwargs):
if cls is Masked:
# Initializing with Masked itself means we're in "factory mode".
if not kwargs and len(args) == 1 and isinstance(args[0], type):
# Create a new masked class.
return cls._get_masked_cls(args[0])
else:
return cls._get_masked_instance(*args, **kwargs)
else:
# Otherwise we're a subclass and should just pass information on.
return super().__new__(cls, *args, **kwargs)
def __init_subclass__(cls, base_cls=None, data_cls=None, **kwargs):
"""Register a Masked subclass.
Parameters
----------
base_cls : type, optional
If given, it is taken to mean that ``cls`` can be used as
a base for masked versions of all subclasses of ``base_cls``,
so it is registered as such in ``_base_classes``.
data_cls : type, optional
If given, ``cls`` should will be registered as the masked version of
``data_cls``. Will set the private ``cls._data_cls`` attribute,
and auto-generate a docstring if not present already.
**kwargs
Passed on for possible further initialization by superclasses.
"""
if base_cls is not None:
Masked._base_classes[base_cls] = cls
if data_cls is not None:
cls._data_cls = data_cls
cls._masked_classes[data_cls] = cls
if cls.__doc__ is None:
cls.__doc__ = get__doc__(data_cls)
super().__init_subclass__(**kwargs)
# This base implementation just uses the class initializer.
# Subclasses can override this in case the class does not work
# with this signature, or to provide a faster implementation.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
"""Create an instance from unmasked data and a mask."""
return cls(data, mask=mask, copy=copy)
@classmethod
def _get_masked_instance(cls, data, mask=None, copy=False):
data, data_mask = cls._get_data_and_mask(data)
if mask is None:
mask = False if data_mask is None else data_mask
masked_cls = cls._get_masked_cls(data.__class__)
return masked_cls.from_unmasked(data, mask, copy)
@classmethod
def _get_masked_cls(cls, data_cls):
"""Get the masked wrapper for a given data class.
If the data class does not exist yet but is a subclass of any of the
registered base data classes, it is automatically generated
(except we skip `~numpy.ma.MaskedArray` subclasses, since then the
masking mechanisms would interfere).
"""
if issubclass(data_cls, (Masked, np.ma.MaskedArray)):
return data_cls
masked_cls = cls._masked_classes.get(data_cls)
if masked_cls is None:
# Walk through MRO and find closest base data class.
# Note: right now, will basically always be ndarray, but
# one could imagine needing some special care for one subclass,
# which would then get its own entry. E.g., if MaskedAngle
# defined something special, then MaskedLongitude should depend
# on it.
for mro_item in data_cls.__mro__:
base_cls = cls._base_classes.get(mro_item)
if base_cls is not None:
break
else:
# Just hope that MaskedNDArray can handle it.
# TODO: this covers the case where a user puts in a list or so,
# but for those one could just explicitly do something like
# _masked_classes[list] = MaskedNDArray.
return MaskedNDArray
# Create (and therefore register) new Masked subclass for the
# given data_cls.
masked_cls = type(
"Masked" + data_cls.__name__,
(data_cls, base_cls),
{},
data_cls=data_cls,
)
return masked_cls
@classmethod
def _get_data_and_mask(cls, data, allow_ma_masked=False):
"""Split data into unmasked and mask, if present.
Parameters
----------
data : array-like
Possibly masked item, judged by whether it has a ``mask`` attribute.
If so, checks for being an instance of `~astropy.utils.masked.Masked`
or `~numpy.ma.MaskedArray`, and gets unmasked data appropriately.
allow_ma_masked : bool, optional
Whether or not to process `~numpy.ma.masked`, i.e., an item that
implies no data but the presence of a mask.
Returns
-------
unmasked, mask : array-like
Unmasked will be `None` for `~numpy.ma.masked`.
Raises
------
ValueError
If `~numpy.ma.masked` is passed in and ``allow_ma_masked`` is not set.
"""
mask = getattr(data, "mask", None)
if mask is not None:
try:
data = data.unmasked
except AttributeError:
if not isinstance(data, np.ma.MaskedArray):
raise
if data is np.ma.masked:
if allow_ma_masked:
data = None
else:
raise ValueError("cannot handle np.ma.masked here.") from None
else:
data = data.data
return data, mask
@classmethod
def _get_data_and_masks(cls, *args):
data_masks = [cls._get_data_and_mask(arg) for arg in args]
return (
tuple(data for data, _ in data_masks),
tuple(mask for _, mask in data_masks),
)
def _get_mask(self):
"""The mask.
If set, replace the original mask, with whatever it is set with,
using a view if no broadcasting or type conversion is required.
"""
return self._mask
def _set_mask(self, mask, copy=False):
self_dtype = getattr(self, "dtype", None)
mask_dtype = (
np.ma.make_mask_descr(self_dtype)
if self_dtype and self_dtype.names
else np.dtype("?")
)
ma = np.asanyarray(mask, dtype=mask_dtype)
if ma.shape != self.shape:
# This will fail (correctly) if not broadcastable.
self._mask = np.empty(self.shape, dtype=mask_dtype)
self._mask[...] = ma
elif ma is mask:
# Even if not copying use a view so that shape setting
# does not propagate.
self._mask = mask.copy() if copy else mask.view()
else:
self._mask = ma
mask = property(_get_mask, _set_mask)
# Note: subclass should generally override the unmasked property.
# This one assumes the unmasked data is stored in a private attribute.
@property
def unmasked(self):
"""The unmasked values.
See Also
--------
astropy.utils.masked.Masked.filled
"""
return self._unmasked
def filled(self, fill_value):
"""Get a copy of the underlying data, with masked values filled in.
Parameters
----------
fill_value : object
Value to replace masked values with.
See Also
--------
astropy.utils.masked.Masked.unmasked
"""
unmasked = self.unmasked.copy()
if self.mask.dtype.names:
np.ma.core._recursive_filled(unmasked, self.mask, fill_value)
else:
unmasked[self.mask] = fill_value
return unmasked
def _apply(self, method, *args, **kwargs):
# Required method for NDArrayShapeMethods, to help provide __getitem__
# and shape-changing methods.
if callable(method):
data = method(self.unmasked, *args, **kwargs)
mask = method(self.mask, *args, **kwargs)
else:
data = getattr(self.unmasked, method)(*args, **kwargs)
mask = getattr(self.mask, method)(*args, **kwargs)
result = self.from_unmasked(data, mask, copy=False)
if "info" in self.__dict__:
result.info = self.info
return result
def __setitem__(self, item, value):
value, mask = self._get_data_and_mask(value, allow_ma_masked=True)
if value is not None:
self.unmasked[item] = value
self.mask[item] = mask
class MaskedInfoBase:
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {
"fits": "null_value",
"ecsv": "null_value",
"hdf5": "data_mask",
"parquet": "data_mask",
None: "null_value",
}
class MaskedNDArrayInfo(MaskedInfoBase, ParentDtypeInfo):
"""
Container for meta information like name, description, format.
"""
# Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. This is the same as for MaskedColumn.
attr_names = ParentDtypeInfo.attr_names | {"serialize_method"}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
out = super()._represent_as_dict()
masked_array = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == "data_mask":
out["data"] = masked_array.unmasked
if np.any(masked_array.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out["mask"] = masked_array.mask
elif method == "null_value":
out["data"] = np.ma.MaskedArray(
masked_array.unmasked, mask=masked_array.mask
)
else:
raise ValueError(
'serialize method must be either "data_mask" or "null_value"'
)
return out
def _construct_from_dict(self, map):
# Override usual handling, since MaskedNDArray takes shape and buffer
# as input, which is less useful here.
# The map can contain either a MaskedColumn or a Column and a mask.
# Extract the mask for the former case.
map.setdefault("mask", getattr(map["data"], "mask", False))
return self._parent_cls.from_unmasked(**map)
class MaskedArraySubclassInfo(MaskedInfoBase):
"""Mixin class to create a subclasses such as MaskedQuantityInfo."""
# This is used below in __init_subclass__, which also inserts a
# 'serialize_method' attribute in attr_names.
def _represent_as_dict(self):
# Use the data_cls as the class name for serialization,
# so that we do not have to store all possible masked classes
# in astropy.table.serialize.__construct_mixin_classes.
out = super()._represent_as_dict()
data_cls = self._parent._data_cls
out.setdefault("__class__", data_cls.__module__ + "." + data_cls.__name__)
return out
def _comparison_method(op):
"""
Create a comparison operator for MaskedNDArray.
Needed since for string dtypes the base operators bypass __array_ufunc__
and hence return unmasked results.
"""
def _compare(self, other):
other_data, other_mask = self._get_data_and_mask(other)
result = getattr(self.unmasked, op)(other_data)
if result is NotImplemented:
return NotImplemented
mask = self.mask | (other_mask if other_mask is not None else False)
return self._masked_result(result, mask, None)
return _compare
class MaskedIterator:
"""
Flat iterator object to iterate over Masked Arrays.
A `~astropy.utils.masked.MaskedIterator` iterator is returned by ``m.flat``
for any masked array ``m``. It allows iterating over the array as if it
were a 1-D array, either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
Notes
-----
The design of `~astropy.utils.masked.MaskedIterator` follows that of
`~numpy.ma.core.MaskedIterator`. It is not exported by the
`~astropy.utils.masked` module. Instead of instantiating directly,
use the ``flat`` method in the masked array instance.
"""
def __init__(self, m):
self._masked = m
self._dataiter = m.unmasked.flat
self._maskiter = m.mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
mask = self._maskiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Masked array.
if not isinstance(out, np.ndarray):
out = out[...]
mask = mask[...]
return self._masked.from_unmasked(out, mask, copy=False)
def __setitem__(self, index, value):
data, mask = self._masked._get_data_and_mask(value, allow_ma_masked=True)
if data is not None:
self._dataiter[index] = data
self._maskiter[index] = mask
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)[...]
mask = next(self._maskiter)[...]
return self._masked.from_unmasked(out, mask, copy=False)
next = __next__
class MaskedNDArray(Masked, np.ndarray, base_cls=np.ndarray, data_cls=np.ndarray):
_mask = None
info = MaskedNDArrayInfo()
def __new__(cls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
self = super().__new__(cls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(cls, **kwargs)
# For all subclasses we should set a default __new__ that passes on
# arguments other than mask to the data class, and then sets the mask.
if "__new__" not in cls.__dict__:
def __new__(newcls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
# Need to explicitly mention classes outside of class definition.
self = super(cls, newcls).__new__(newcls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
cls.__new__ = __new__
if "info" not in cls.__dict__ and hasattr(cls._data_cls, "info"):
data_info = cls._data_cls.info
attr_names = data_info.attr_names | {"serialize_method"}
new_info = type(
cls.__name__ + "Info",
(MaskedArraySubclassInfo, data_info.__class__),
dict(attr_names=attr_names),
)
cls.info = new_info()
# The two pieces typically overridden.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
# Note: have to override since __new__ would use ndarray.__new__
# which expects the shape as its first argument, not an array.
data = np.array(data, subok=True, copy=copy)
self = data.view(cls)
self._set_mask(mask, copy=copy)
return self
@property
def unmasked(self):
return super().view(self._data_cls)
@classmethod
def _get_masked_cls(cls, data_cls):
# Short-cuts
if data_cls is np.ndarray:
return MaskedNDArray
elif data_cls is None: # for .view()
return cls
return super()._get_masked_cls(data_cls)
@property
def flat(self):
"""A 1-D iterator over the Masked array.
This returns a ``MaskedIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to Python's built-in iterator, except that it also
allows assignment.
"""
return MaskedIterator(self)
@property
def _baseclass(self):
"""Work-around for MaskedArray initialization.
Allows the base class to be inferred correctly when a masked instance
is used to initialize (or viewed as) a `~numpy.ma.MaskedArray`.
"""
return self._data_cls
def view(self, dtype=None, type=None):
"""New view of the masked array.
Like `numpy.ndarray.view`, but always returning a masked array subclass.
"""
if type is None and (
isinstance(dtype, builtins.type) and issubclass(dtype, np.ndarray)
):
return super().view(self._get_masked_cls(dtype))
if dtype is None:
return super().view(self._get_masked_cls(type))
dtype = np.dtype(dtype)
if not (
dtype.itemsize == self.dtype.itemsize
and (dtype.names is None or len(dtype.names) == len(self.dtype.names))
):
raise NotImplementedError(
f"{self.__class__} cannot be viewed with a dtype with a "
"with a different number of fields or size."
)
return super().view(dtype, self._get_masked_cls(type))
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Logically, this should come from ndarray and hence be None, but
# just in case someone creates a new mixin, we check.
super_array_finalize = super().__array_finalize__
if super_array_finalize: # pragma: no cover
super_array_finalize(obj)
if self._mask is None:
# Got here after, e.g., a view of another masked class.
# Get its mask, or initialize ours.
self._set_mask(getattr(obj, "_mask", False))
if "info" in obj.__dict__:
self.info = obj.info
@property
def shape(self):
"""The shape of the data and the mask.
Usually used to get the current shape of an array, but may also be
used to reshape the array in-place by assigning a tuple of array
dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the
size of the array and the remaining dimensions.
Raises
------
AttributeError
If a copy is required, of either the data or the mask.
"""
# Redefinition to allow defining a setter and add a docstring.
return super().shape
@shape.setter
def shape(self, shape):
old_shape = self.shape
self._mask.shape = shape
# Reshape array proper in try/except just in case some broadcasting
# or so causes it to fail.
try:
super(MaskedNDArray, type(self)).shape.__set__(self, shape)
except Exception as exc:
self._mask.shape = old_shape
# Given that the mask reshaping succeeded, the only logical
# reason for an exception is something like a broadcast error in
# in __array_finalize__, or a different memory ordering between
# mask and data. For those, give a more useful error message;
# otherwise just raise the error.
if "could not broadcast" in exc.args[0]:
raise AttributeError(
"Incompatible shape for in-place modification. "
"Use `.reshape()` to make a copy with the desired "
"shape."
) from None
else: # pragma: no cover
raise
_eq_simple = _comparison_method("__eq__")
_ne_simple = _comparison_method("__ne__")
__lt__ = _comparison_method("__lt__")
__le__ = _comparison_method("__le__")
__gt__ = _comparison_method("__gt__")
__ge__ = _comparison_method("__ge__")
def __eq__(self, other):
if not self.dtype.names:
return self._eq_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack(
[self[field] == other[field] for field in self.dtype.names], axis=-1
)
return result.all(axis=-1)
def __ne__(self, other):
if not self.dtype.names:
return self._ne_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack(
[self[field] != other[field] for field in self.dtype.names], axis=-1
)
return result.any(axis=-1)
def _combine_masks(self, masks, out=None, where=True, copy=True):
"""Combine masks, possibly storing it in some output.
Parameters
----------
masks : tuple of array of bool or None
Input masks. Any that are `None` or `False` are ignored.
Should broadcast to each other.
out : output mask array, optional
Possible output array to hold the result.
where : array of bool, optional
Which elements of the output array to fill.
copy : bool optional
Whether to ensure a copy is made. Only relevant if a single
input mask is not `None`, and ``out`` is not given.
"""
masks = [m for m in masks if m is not None and m is not False]
if not masks:
return False
if len(masks) == 1:
if out is None:
return masks[0].copy() if copy else masks[0]
else:
np.copyto(out, masks[0], where=where)
return out
out = np.logical_or(masks[0], masks[1], out=out, where=where)
for mask in masks[2:]:
np.logical_or(out, mask, out=out, where=where)
return out
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop("out", None)
out_unmasked = None
out_mask = None
if out is not None:
out_unmasked, out_masks = self._get_data_and_masks(*out)
for d, m in zip(out_unmasked, out_masks):
if m is None:
# TODO: allow writing to unmasked output if nothing is masked?
if d is not None:
raise TypeError("cannot write to unmasked output")
elif out_mask is None:
out_mask = m
# TODO: where is only needed for __call__ and reduce;
# this is very fast, but still worth separating out?
where = kwargs.pop("where", True)
if where is True:
where_unmasked = True
where_mask = None
else:
where_unmasked, where_mask = self._get_data_and_mask(where)
unmasked, masks = self._get_data_and_masks(*inputs)
if ufunc.signature:
# We're dealing with a gufunc. For now, only deal with
# np.matmul and gufuncs for which the mask of any output always
# depends on all core dimension values of all inputs.
# Also ignore axes keyword for now...
# TODO: in principle, it should be possible to generate the mask
# purely based on the signature.
if "axes" in kwargs:
raise NotImplementedError(
"Masked does not yet support gufunc calls with 'axes'."
)
if ufunc is np.matmul:
# np.matmul is tricky and its signature cannot be parsed by
# _parse_gufunc_signature.
unmasked = np.atleast_1d(*unmasked)
mask0, mask1 = masks
masks = []
is_mat1 = unmasked[1].ndim >= 2
if mask0 is not None:
masks.append(np.logical_or.reduce(mask0, axis=-1, keepdims=is_mat1))
if mask1 is not None:
masks.append(
np.logical_or.reduce(mask1, axis=-2, keepdims=True)
if is_mat1
else np.logical_or.reduce(mask1)
)
mask = self._combine_masks(masks, out=out_mask, copy=False)
else:
# Parse signature with private numpy function. Note it
# cannot handle spaces in tuples, so remove those.
in_sig, out_sig = np.lib.function_base._parse_gufunc_signature(
ufunc.signature.replace(" ", "")
)
axis = kwargs.get("axis", -1)
keepdims = kwargs.get("keepdims", False)
in_masks = []
for sig, mask in zip(in_sig, masks):
if mask is not None:
if sig:
# Input has core dimensions. Assume that if any
# value in those is masked, the output will be
# masked too (TODO: for multiple core dimensions
# this may be too strong).
mask = np.logical_or.reduce(
mask, axis=axis, keepdims=keepdims
)
in_masks.append(mask)
mask = self._combine_masks(in_masks)
result_masks = []
for os in out_sig:
if os:
# Output has core dimensions. Assume all those
# get the same mask.
result_mask = np.expand_dims(mask, axis)
else:
result_mask = mask
result_masks.append(result_mask)
mask = result_masks if len(result_masks) > 1 else result_masks[0]
elif method == "__call__":
# Regular ufunc call.
# Combine the masks from the input, possibly selecting elements.
mask = self._combine_masks(masks, out=out_mask, where=where_unmasked)
# If relevant, also mask output elements for which where was masked.
if where_mask is not None:
mask |= where_mask
elif method == "outer":
# Must have two arguments; adjust masks as will be done for data.
m0, m1 = masks
if m0 is not None and m0.ndim > 0:
m0 = m0[(...,) + (np.newaxis,) * np.ndim(unmasked[1])]
mask = self._combine_masks((m0, m1), out=out_mask)
elif method in {"reduce", "accumulate"}:
# Reductions like np.add.reduce (sum).
# Treat any masked where as if the input element was masked.
mask = self._combine_masks((masks[0], where_mask), copy=False)
if mask is not False:
# By default, we simply propagate masks, since for
# things like np.sum, it makes no sense to do otherwise.
# Individual methods need to override as needed.
if method == "reduce":
axis = kwargs.get("axis", None)
keepdims = kwargs.get("keepdims", False)
mask = np.logical_or.reduce(
mask,
where=where_unmasked,
axis=axis,
keepdims=keepdims,
out=out_mask,
)
if where_unmasked is not True:
# Mask also whole rows in which no elements were selected;
# those will have been left as unmasked above.
mask |= ~np.logical_or.reduce(
where_unmasked, axis=axis, keepdims=keepdims
)
else:
# Accumulate
axis = kwargs.get("axis", 0)
mask = np.logical_or.accumulate(mask, axis=axis, out=out_mask)
elif out is None:
# Can only get here if neither input nor output was masked, but
# perhaps where was masked (possible in "not NUMPY_LT_1_25" and
# in NUMPY_LT_1_21 (latter also allowed axis).
# We don't support this.
return NotImplemented
elif method in {"reduceat", "at"}: # pragma: no cover
raise NotImplementedError(
"masked instances cannot yet deal with 'reduceat' or 'at'."
)
if out_unmasked is not None:
kwargs["out"] = out_unmasked
if where_unmasked is not True:
kwargs["where"] = where_unmasked
result = getattr(ufunc, method)(*unmasked, **kwargs)
if result is None: # pragma: no cover
# This happens for the "at" method.
return result
if out is not None and len(out) == 1:
out = out[0]
return self._masked_result(result, mask, out)
def __array_function__(self, function, types, args, kwargs):
# TODO: go through functions systematically to see which ones
# work and/or can be supported.
if function in MASKED_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in APPLY_TO_BOTH_FUNCTIONS:
helper = APPLY_TO_BOTH_FUNCTIONS[function]
try:
helper_result = helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
data_args, mask_args, kwargs, out = helper_result
if out is not None:
if not isinstance(out, Masked):
return self._not_implemented_or_raise(function, types)
function(*mask_args, out=out.mask, **kwargs)
function(*data_args, out=out.unmasked, **kwargs)
return out
mask = function(*mask_args, **kwargs)
result = function(*data_args, **kwargs)
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
dispatched_result = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
if not isinstance(dispatched_result, tuple):
return dispatched_result
result, mask, out = dispatched_result
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else: # pragma: no cover
# By default, just pass it through for now.
return super().__array_function__(function, types, args, kwargs)
if mask is None:
return result
else:
return self._masked_result(result, mask, out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Masked. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Masked subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Masked) for t in types):
raise TypeError(
"the MaskedNDArray implementation cannot handle {} "
"with the given arguments.".format(function)
) from None
else:
return NotImplemented
def _masked_result(self, result, mask, out):
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
if not isinstance(mask, (list, tuple)):
mask = (mask,) * len(result)
return tuple(
self._masked_result(result_, mask_, out_)
for (result_, mask_, out_) in zip(result, mask, out)
)
if out is None:
# Note that we cannot count on result being the same class as
# 'self' (e.g., comparison of quantity results in an ndarray, most
# operations on Longitude and Latitude result in Angle or
# Quantity), so use Masked to determine the appropriate class.
return Masked(result, mask)
# TODO: remove this sanity check once test cases are more complete.
assert isinstance(out, Masked)
# If we have an output, the result was written in-place, so we should
# also write the mask in-place (if not done already in the code).
if out._mask is not mask:
out._mask[...] = mask
return out
# Below are ndarray methods that need to be overridden as masked elements
# need to be skipped and/or an initial value needs to be set.
def _reduce_defaults(self, kwargs, initial_func=None):
"""Get default where and initial for masked reductions.
Generally, the default should be to skip all masked elements. For
reductions such as np.minimum.reduce, we also need an initial value,
which can be determined using ``initial_func``.
"""
if "where" not in kwargs:
kwargs["where"] = ~self.mask
if initial_func is not None and "initial" not in kwargs:
kwargs["initial"] = initial_func(self.unmasked)
return kwargs
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
# Unfortunately, cannot override the call to diagonal inside trace, so
# duplicate implementation in numpy/core/src/multiarray/calculation.c.
diagonal = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return diagonal.sum(-1, dtype=dtype, out=out)
def min(self, axis=None, out=None, **kwargs):
return super().min(
axis=axis, out=out, **self._reduce_defaults(kwargs, np.nanmax)
)
def max(self, axis=None, out=None, **kwargs):
return super().max(
axis=axis, out=out, **self._reduce_defaults(kwargs, np.nanmin)
)
def nonzero(self):
unmasked_nonzero = self.unmasked.nonzero()
if self.ndim >= 1:
not_masked = ~self.mask[unmasked_nonzero]
return tuple(u[not_masked] for u in unmasked_nonzero)
else:
return unmasked_nonzero if not self.mask else np.nonzero(0)
def compress(self, condition, axis=None, out=None):
if out is not None:
raise NotImplementedError("cannot yet give output")
return self._apply("compress", condition, axis=axis)
def repeat(self, repeats, axis=None):
return self._apply("repeat", repeats, axis=axis)
def choose(self, choices, out=None, mode="raise"):
# Let __array_function__ take care since choices can be masked too.
return np.choose(self, choices, out=out, mode=mode)
if NUMPY_LT_1_22:
def argmin(self, axis=None, out=None):
# TODO: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out)
def argmax(self, axis=None, out=None):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out)
else:
def argmin(self, axis=None, out=None, *, keepdims=False):
# TODO: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argmax(self, axis=None, out=None, *, keepdims=False):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argsort(self, axis=-1, kind=None, order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis on both the array
and the mask, with masked items being sorted to the end.
Parameters
----------
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis).
If None, the flattened array is used.
kind : str or None, ignored.
The kind of sort. Present only to allow subclasses to work.
order : str or list of str.
For an array with fields defined, the fields to compare first,
second, etc. A single field can be specified as a string, and not
all fields need be specified, but unspecified fields will still be
used, in dtype order, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sorts along the specified ``axis``. Use
``np.take_along_axis(self, index_array, axis=axis)`` to obtain
the sorted array.
"""
if axis is None:
data = self.ravel()
axis = -1
else:
data = self
if self.dtype.names:
# As done inside the argsort implementation in multiarray/methods.c.
if order is None:
order = self.dtype.names
else:
order = np.core._internal._newnames(self.dtype, order)
keys = tuple(data[name] for name in order[::-1])
elif order is not None:
raise ValueError("Cannot specify order when the array has no fields.")
else:
keys = (data,)
return np.lexsort(keys, axis=axis)
def sort(self, axis=-1, kind=None, order=None):
"""Sort an array in-place. Refer to `numpy.sort` for full documentation."""
# TODO: probably possible to do this faster than going through argsort!
indices = self.argsort(axis, kind=kind, order=order)
self[:] = np.take_along_axis(self, indices, axis=axis)
def argpartition(self, kth, axis=-1, kind="introselect", order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.argsort(axis=axis, order=order)
def partition(self, kth, axis=-1, kind="introselect", order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.sort(axis=axis, order=None)
def cumsum(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.add.accumulate(self, axis=axis, dtype=dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.multiply.accumulate(self, axis=axis, dtype=dtype, out=out)
def clip(self, min=None, max=None, out=None, **kwargs):
"""Return an array whose values are limited to ``[min, max]``.
Like `~numpy.clip`, but any masked values in ``min`` and ``max``
are ignored for clipping. The mask of the input array is propagated.
"""
# TODO: implement this at the ufunc level.
dmin, mmin = self._get_data_and_mask(min)
dmax, mmax = self._get_data_and_mask(max)
if mmin is None and mmax is None:
# Fast path for unmasked max, min.
return super().clip(min, max, out=out, **kwargs)
masked_out = np.positive(self, out=out)
out = masked_out.unmasked
if dmin is not None:
np.maximum(out, dmin, out=out, where=True if mmin is None else ~mmin)
if dmax is not None:
np.minimum(out, dmax, out=out, where=True if mmax is None else ~mmax)
return masked_out
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Implementation based on that in numpy/core/_methods.py
# Cast bool, unsigned int, and int to float64 by default,
# and do float16 at higher precision.
is_float16_result = False
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
elif issubclass(self.dtype.type, np.float16):
dtype = np.dtype("f4")
is_float16_result = out is None
where = ~self.mask & where
result = self.sum(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
# catch the case when an axis is fully masked to prevent div by zero:
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
neq0 = n == 0
n += neq0
result /= n
# correct fully-masked slice results to what is expected for 0/0 division
result.unmasked[neq0] = np.nan
if is_float16_result:
result = result.astype(self.dtype)
return result
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
where_final = ~self.mask & where
# Simplified implementation based on that in numpy/core/_methods.py
n = np.add.reduce(where_final, axis=axis, keepdims=keepdims)[...]
# Cast bool, unsigned int, and int to float64 by default.
if dtype is None and issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
mean = self.mean(axis=axis, dtype=dtype, keepdims=True, where=where)
x = self - mean
x *= x.conjugate() # Conjugate just returns x if not complex.
result = x.sum(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where_final
)
n -= ddof
n = np.maximum(n, 0, out=n)
result /= n
result._mask |= n == 0
return result
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
result = self.var(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
return np.sqrt(result, out=result)
def __bool__(self):
# First get result from array itself; this will error if not a scalar.
result = super().__bool__()
return result and not self.mask
def any(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_or.reduce(
self, axis=axis, out=out, keepdims=keepdims, where=~self.mask & where
)
def all(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_and.reduce(
self, axis=axis, out=out, keepdims=keepdims, where=~self.mask & where
)
# Following overrides needed since somehow the ndarray implementation
# does not actually call these.
def __str__(self):
return np.array_str(self)
def __repr__(self):
return np.array_repr(self)
def __format__(self, format_spec):
string = super().__format__(format_spec)
if self.shape == () and self.mask:
n = min(3, max(1, len(string)))
return " " * (len(string) - n) + "\u2014" * n
else:
return string
class MaskedRecarray(np.recarray, MaskedNDArray, data_cls=np.recarray):
# Explicit definition since we need to override some methods.
def __array_finalize__(self, obj):
# recarray.__array_finalize__ does not do super, so we do it
# explicitly.
super().__array_finalize__(obj)
super(np.recarray, self).__array_finalize__(obj)
# __getattribute__, __setattr__, and field use these somewhat
# obscrure ndarray methods. TODO: override in MaskedNDArray?
def getfield(self, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
return self[field]
raise NotImplementedError("can only get existing field from structured dtype.")
def setfield(self, val, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
self[field] = val
return
raise NotImplementedError("can only set existing field from structured dtype.")
|
13047deea28e3de481256f4298588fbc664c6ce95f39cf08e938c728288f9d8d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
import warnings
# First, the top-level packages:
# TODO: This list is a duplicate of the dependencies in setup.cfg "all", but
# some of the package names are different from the pip-install name (e.g.,
# beautifulsoup4 -> bs4).
_optional_deps = [
"asdf_astropy",
"bleach",
"bottleneck",
"bs4",
"bz2",
"fsspec",
"h5py",
"html5lib",
"IPython",
"jplephem",
"lxml",
"matplotlib",
"mpmath",
"pandas",
"PIL",
"pytz",
"s3fs",
"scipy",
"skyfield",
"sortedcontainers",
"lzma",
"pyarrow",
"pytest_mpl",
]
_formerly_optional_deps = ["yaml"] # for backward compatibility
_deps = {k.upper(): k for k in _optional_deps + _formerly_optional_deps}
# Any subpackages that have different import behavior:
_deps["PLT"] = "matplotlib.pyplot"
__all__ = [f"HAS_{pkg}" for pkg in _deps]
def __getattr__(name):
if name in __all__:
module_name = name[4:]
if module_name == "YAML":
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"PyYaml is now a strict dependency. HAS_YAML is deprecated as "
"of v5.0 and will be removed in a subsequent version.",
category=AstropyDeprecationWarning,
)
try:
importlib.import_module(_deps[module_name])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}.")
|
b3716b2cfbb444a58394ca83c506f888686964214693480e7ff3b6ff73ccc1f2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Simple utility functions and bug fixes for compatibility with all supported
versions of Python. This module should generally not be used directly, as
everything in `__all__` will be imported into `astropy.utils.compat` and can
be accessed from there.
"""
import functools
import sys
from astropy.utils.decorators import deprecated
__all__ = ["override__dir__", "PYTHON_LT_3_11"]
PYTHON_LT_3_11 = sys.version_info < (3, 11)
@deprecated(
since="5.2",
message="http://bugs.python.org/issue12166 is resolved, {alternative}.",
alternative="use ``sorted(super().__dir__() + ...)`` instead",
)
def override__dir__(f):
"""
When overriding a __dir__ method on an object, you often want to include the
"standard" members on the object as well. This decorator takes care of that
automatically, and all the wrapped function needs to do is return a list of
the "special" members that wouldn't be found by the normal Python means.
Example
-------
Your class could define __dir__ as follows::
@override__dir__
def __dir__(self):
return ['special_method1', 'special_method2']
Notes
-----
This function was introduced because of http://bugs.python.org/issue12166,
which has since been resolved by
http://hg.python.org/cpython/rev/8f403199f999. Now, the best way to
customize ``__dir__`` is to use ``super``.
::
def __dir__(self):
added = {'special_method1', 'special_method2'}
return sorted(set(super().__dir__()) | added)
"""
# http://bugs.python.org/issue12166
@functools.wraps(f)
def override__dir__wrapper(self):
members = set(object.__dir__(self))
members.update(f(self))
return sorted(members)
return override__dir__wrapper
|
292e0325fc4fbc94bb93f8c1a32d4cab7bf927488396ddba261a95e9c218a384 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test masked class initialization, methods, and operators.
Functions, including ufuncs, are tested in test_functions.py
"""
import operator
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.coordinates import Longitude
from astropy.units import Quantity
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.masked import Masked, MaskedNDArray
def assert_masked_equal(a, b):
assert_array_equal(a.unmasked, b.unmasked)
assert_array_equal(a.mask, b.mask)
VARIOUS_ITEMS = [(1, 1), slice(None, 1), (), 1]
class ArraySetup:
_data_cls = np.ndarray
@classmethod
def setup_class(self):
self.a = np.arange(6.0).reshape(2, 3)
self.mask_a = np.array([[True, False, False], [False, True, False]])
self.b = np.array([-3.0, -2.0, -1.0])
self.mask_b = np.array([False, True, False])
self.c = np.array([[0.25], [0.5]])
self.mask_c = np.array([[False], [True]])
self.sdt = np.dtype([("a", "f8"), ("b", "f8")])
self.mask_sdt = np.dtype([("a", "?"), ("b", "?")])
self.sa = np.array(
[
[(1.0, 2.0), (3.0, 4.0)],
[(11.0, 12.0), (13.0, 14.0)],
],
dtype=self.sdt,
)
self.mask_sa = np.array(
[
[(True, True), (False, False)],
[(False, True), (True, False)],
],
dtype=self.mask_sdt,
)
self.sb = np.array([(1.0, 2.0), (-3.0, 4.0)], dtype=self.sdt)
self.mask_sb = np.array([(True, False), (False, False)], dtype=self.mask_sdt)
self.scdt = np.dtype([("sa", "2f8"), ("sb", "i8", (2, 2))])
self.sc = np.array(
[
([1.0, 2.0], [[1, 2], [3, 4]]),
([-1.0, -2.0], [[-1, -2], [-3, -4]]),
],
dtype=self.scdt,
)
self.mask_scdt = np.dtype([("sa", "2?"), ("sb", "?", (2, 2))])
self.mask_sc = np.array(
[
([True, False], [[False, False], [True, True]]),
([False, True], [[True, False], [False, True]]),
],
dtype=self.mask_scdt,
)
class QuantitySetup(ArraySetup):
_data_cls = Quantity
@classmethod
def setup_class(self):
super().setup_class()
self.a = Quantity(self.a, u.m)
self.b = Quantity(self.b, u.cm)
self.c = Quantity(self.c, u.km)
self.sa = Quantity(self.sa, u.m, dtype=self.sdt)
self.sb = Quantity(self.sb, u.cm, dtype=self.sdt)
class LongitudeSetup(ArraySetup):
_data_cls = Longitude
@classmethod
def setup_class(self):
super().setup_class()
self.a = Longitude(self.a, u.deg)
self.b = Longitude(self.b, u.deg)
self.c = Longitude(self.c, u.deg)
# Note: Longitude does not work on structured arrays, so
# leaving it as regular array (which just reruns some tests).
class TestMaskedArrayInitialization(ArraySetup):
def test_simple(self):
ma = Masked(self.a, mask=self.mask_a)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.a))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.a)
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_structured(self):
ma = Masked(self.sa, mask=self.mask_sa)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.sa))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.sa)
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
def test_masked_ndarray_init():
# Note: as a straight ndarray subclass, MaskedNDArray passes on
# the arguments relevant for np.ndarray, not np.array.
a_in = np.arange(3, dtype=int)
m_in = np.array([True, False, False])
buff = a_in.tobytes()
# Check we're doing things correctly using regular ndarray.
a = np.ndarray(shape=(3,), dtype=int, buffer=buff)
assert_array_equal(a, a_in)
# Check with and without mask.
ma = MaskedNDArray((3,), dtype=int, mask=m_in, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, m_in)
ma = MaskedNDArray((3,), dtype=int, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, np.zeros(3, bool))
def test_cannot_initialize_with_masked():
with pytest.raises(ValueError, match="cannot handle np.ma.masked"):
Masked(np.ma.masked)
def test_cannot_just_use_anything_with_a_mask_attribute():
class my_array(np.ndarray):
mask = True
a = np.array([1.0, 2.0]).view(my_array)
with pytest.raises(AttributeError, match="unmasked"):
Masked(a)
class TestMaskedClassCreation:
"""Try creating a MaskedList and subclasses.
By no means meant to be realistic, just to check that the basic
machinery allows it.
"""
@classmethod
def setup_class(self):
self._base_classes_orig = Masked._base_classes.copy()
self._masked_classes_orig = Masked._masked_classes.copy()
class MaskedList(Masked, list, base_cls=list, data_cls=list):
def __new__(cls, *args, mask=None, copy=False, **kwargs):
self = super().__new__(cls)
self._unmasked = self._data_cls(*args, **kwargs)
self.mask = mask
return self
# Need to have shape for basics to work.
@property
def shape(self):
return (len(self._unmasked),)
self.MaskedList = MaskedList
def teardown_class(self):
Masked._base_classes = self._base_classes_orig
Masked._masked_classes = self._masked_classes_orig
def test_setup(self):
assert issubclass(self.MaskedList, Masked)
assert issubclass(self.MaskedList, list)
assert Masked(list) is self.MaskedList
def test_masked_list(self):
ml = self.MaskedList(range(3), mask=[True, False, False])
assert ml.unmasked == [0, 1, 2]
assert_array_equal(ml.mask, np.array([True, False, False]))
ml01 = ml[:2]
assert ml01.unmasked == [0, 1]
assert_array_equal(ml01.mask, np.array([True, False]))
def test_from_list(self):
ml = Masked([1, 2, 3], mask=[True, False, False])
assert ml.unmasked == [1, 2, 3]
assert_array_equal(ml.mask, np.array([True, False, False]))
def test_masked_list_subclass(self):
class MyList(list):
pass
ml = MyList(range(3))
mml = Masked(ml, mask=[False, True, False])
assert isinstance(mml, Masked)
assert isinstance(mml, MyList)
assert isinstance(mml.unmasked, MyList)
assert mml.unmasked == [0, 1, 2]
assert_array_equal(mml.mask, np.array([False, True, False]))
assert Masked(MyList) is type(mml)
class TestMaskedNDArraySubclassCreation:
"""Test that masked subclasses can be created directly and indirectly."""
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.asanyarray(*args, **kwargs).view(cls)
self.MyArray = MyArray
self.a = np.array([1.0, 2.0]).view(self.MyArray)
self.m = np.array([True, False], dtype=bool)
def teardown_method(self, method):
Masked._masked_classes.pop(self.MyArray, None)
def test_direct_creation(self):
assert self.MyArray not in Masked._masked_classes
mcls = Masked(self.MyArray)
assert issubclass(mcls, Masked)
assert issubclass(mcls, self.MyArray)
assert mcls.__name__ == "MaskedMyArray"
assert mcls.__doc__.startswith("Masked version of MyArray")
mms = mcls(self.a, mask=self.m)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
mcls = Masked(self.MyArray)
mms = mcls(self.a)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, np.zeros(mms.shape, bool))
@pytest.mark.parametrize("masked_array", [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
mcls = Masked(self.MyArray)
ma = masked_array(np.asarray(self.a), mask=self.m)
mms = mcls(ma)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_indirect_creation(self):
assert self.MyArray not in Masked._masked_classes
mms = Masked(self.a, mask=self.m)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
assert self.MyArray in Masked._masked_classes
assert Masked(self.MyArray) is type(mms)
def test_can_initialize_with_masked_values(self):
mcls = Masked(self.MyArray)
mms = mcls(Masked(np.asarray(self.a), mask=self.m))
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_viewing(self):
mms = Masked(self.a, mask=self.m)
mms2 = mms.view()
assert type(mms2) is mms.__class__
assert_masked_equal(mms2, mms)
ma = mms.view(np.ndarray)
assert type(ma) is MaskedNDArray
assert_array_equal(ma.unmasked, self.a.view(np.ndarray))
assert_array_equal(ma.mask, self.m)
class TestMaskedQuantityInitialization(TestMaskedArrayInitialization, QuantitySetup):
def test_masked_quantity_class_init(self):
# TODO: class definitions should be more easily accessible.
mcls = Masked._masked_classes[self.a.__class__]
# This is not a very careful test.
mq = mcls([1.0, 2.0], mask=[True, False], unit=u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1.0, 2.0])
assert np.all(mq.value.mask == [True, False])
assert np.all(mq.mask == [True, False])
def test_masked_quantity_getting(self):
mcls = Masked._masked_classes[self.a.__class__]
MQ = Masked(Quantity)
assert MQ is mcls
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
MQ = Masked(Quantity)
mq = MQ([1.0, 2.0], u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1.0, 2.0])
assert np.all(mq.mask == [False, False])
@pytest.mark.parametrize("masked_array", [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
MQ = Masked(Quantity)
a = np.array([1.0, 2.0])
m = np.array([True, False])
ma = masked_array(a, m)
mq = MQ(ma)
assert isinstance(mq, Masked)
assert isinstance(mq, Quantity)
assert_array_equal(mq.value.unmasked, a)
assert_array_equal(mq.mask, m)
class TestMaskSetting(ArraySetup):
def test_whole_mask_setting_simple(self):
ma = Masked(self.a)
assert ma.mask.shape == ma.shape
assert not ma.mask.any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask.all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array([[True] * 3, [False] * 3]))
ma.mask = self.mask_a
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_whole_mask_setting_structured(self):
ma = Masked(self.sa)
assert ma.mask.shape == ma.shape
assert not ma.mask["a"].any() and not ma.mask["b"].any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask["a"].all() and ma.mask["b"].all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(
ma.mask,
np.array([[(True, True)] * 2, [(False, False)] * 2], dtype=self.mask_sdt),
)
ma.mask = self.mask_sa
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_part_mask_setting(self, item):
ma = Masked(self.a)
ma.mask[item] = True
expected = np.zeros(ma.shape, bool)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, bool))
# Mask propagation
mask = np.zeros(self.a.shape, bool)
ma = Masked(self.a, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
def test_part_mask_setting_structured(self, item):
ma = Masked(self.sa)
ma.mask[item] = True
expected = np.zeros(ma.shape, self.mask_sdt)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, self.mask_sdt))
# Mask propagation
mask = np.zeros(self.sa.shape, self.mask_sdt)
ma = Masked(self.sa, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
# Following are tests where we trust the initializer works.
class MaskedArraySetup(ArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
self.mc = Masked(self.c, mask=self.mask_c)
self.msa = Masked(self.sa, mask=self.mask_sa)
self.msb = Masked(self.sb, mask=self.mask_sb)
self.msc = Masked(self.sc, mask=self.mask_sc)
class TestViewing(MaskedArraySetup):
def test_viewing_as_new_type(self):
ma2 = self.ma.view(type(self.ma))
assert_masked_equal(ma2, self.ma)
ma3 = self.ma.view()
assert_masked_equal(ma3, self.ma)
def test_viewing_as_new_dtype(self):
# Not very meaningful, but possible...
ma2 = self.ma.view("c8")
assert_array_equal(ma2.unmasked, self.a.view("c8"))
assert_array_equal(ma2.mask, self.mask_a)
@pytest.mark.parametrize("new_dtype", ["2f4", "f8,f8,f8"])
def test_viewing_as_new_dtype_not_implemented(self, new_dtype):
# But cannot (yet) view in way that would need to create a new mask,
# even though that view is possible for a regular array.
check = self.a.view(new_dtype)
with pytest.raises(NotImplementedError, match="different.*size"):
self.ma.view(check.dtype)
def test_viewing_as_something_impossible(self):
with pytest.raises(TypeError):
# Use intp to ensure have the same size as object,
# otherwise we get a different error message
Masked(np.array([1, 2], dtype=np.intp)).view(Masked)
class TestMaskedArrayCopyFilled(MaskedArraySetup):
def test_copy(self):
ma_copy = self.ma.copy()
assert type(ma_copy) is type(self.ma)
assert_array_equal(ma_copy.unmasked, self.ma.unmasked)
assert_array_equal(ma_copy.mask, self.ma.mask)
assert not np.may_share_memory(ma_copy.unmasked, self.ma.unmasked)
assert not np.may_share_memory(ma_copy.mask, self.ma.mask)
@pytest.mark.parametrize("fill_value", (0, 1))
def test_filled(self, fill_value):
fill_value = fill_value * getattr(self.a, "unit", 1)
expected = self.a.copy()
expected[self.ma.mask] = fill_value
result = self.ma.filled(fill_value)
assert_array_equal(expected, result)
def test_filled_no_fill_value(self):
with pytest.raises(TypeError, match="missing 1 required"):
self.ma.filled()
@pytest.mark.parametrize("fill_value", [(0, 1), (-1, -1)])
def test_filled_structured(self, fill_value):
fill_value = np.array(fill_value, dtype=self.sdt)
if hasattr(self.sa, "unit"):
fill_value = fill_value << self.sa.unit
expected = self.sa.copy()
expected["a"][self.msa.mask["a"]] = fill_value["a"]
expected["b"][self.msa.mask["b"]] = fill_value["b"]
result = self.msa.filled(fill_value)
assert_array_equal(expected, result)
def test_flat(self):
ma_copy = self.ma.copy()
ma_flat = ma_copy.flat
# Check that single item keeps class and mask
ma_flat1 = ma_flat[1]
assert ma_flat1.unmasked == self.a.flat[1]
assert ma_flat1.mask == self.mask_a.flat[1]
# As well as getting items via iteration.
assert all(
(ma.unmasked == a and ma.mask == m)
for (ma, a, m) in zip(self.ma.flat, self.a.flat, self.mask_a.flat)
)
# check that flat works like a view of the real array
ma_flat[1] = self.b[1]
assert ma_flat[1] == self.b[1]
assert ma_copy[0, 1] == self.b[1]
class TestMaskedQuantityCopyFilled(TestMaskedArrayCopyFilled, QuantitySetup):
pass
class TestMaskedLongitudeCopyFilled(TestMaskedArrayCopyFilled, LongitudeSetup):
pass
class TestMaskedArrayShaping(MaskedArraySetup):
def test_reshape(self):
ma_reshape = self.ma.reshape((6,))
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting(self):
ma_reshape = self.ma.copy()
ma_reshape.shape = (6,)
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting_failure(self):
ma = self.ma.copy()
with pytest.raises(ValueError, match="cannot reshape"):
ma.shape = (5,)
assert ma.shape == self.ma.shape
assert ma.mask.shape == self.ma.shape
# Here, mask can be reshaped but array cannot.
ma2 = Masked(np.broadcast_to([[1.0], [2.0]], self.a.shape), mask=self.mask_a)
with pytest.raises(AttributeError, match="ncompatible shape"):
ma2.shape = (6,)
assert ma2.shape == self.ma.shape
assert ma2.mask.shape == self.ma.shape
# Here, array can be reshaped but mask cannot.
ma3 = Masked(
self.a.copy(), mask=np.broadcast_to([[True], [False]], self.mask_a.shape)
)
with pytest.raises(AttributeError, match="ncompatible shape"):
ma3.shape = (6,)
assert ma3.shape == self.ma.shape
assert ma3.mask.shape == self.ma.shape
def test_ravel(self):
ma_ravel = self.ma.ravel()
expected_data = self.a.ravel()
expected_mask = self.mask_a.ravel()
assert ma_ravel.shape == expected_data.shape
assert_array_equal(ma_ravel.unmasked, expected_data)
assert_array_equal(ma_ravel.mask, expected_mask)
def test_transpose(self):
ma_transpose = self.ma.transpose()
expected_data = self.a.transpose()
expected_mask = self.mask_a.transpose()
assert ma_transpose.shape == expected_data.shape
assert_array_equal(ma_transpose.unmasked, expected_data)
assert_array_equal(ma_transpose.mask, expected_mask)
def test_iter(self):
for ma, d, m in zip(self.ma, self.a, self.mask_a):
assert_array_equal(ma.unmasked, d)
assert_array_equal(ma.mask, m)
class MaskedItemTests(MaskedArraySetup):
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_getitem(self, item):
ma_part = self.ma[item]
expected_data = self.a[item]
expected_mask = self.mask_a[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
def test_getitem_structured(self, item):
ma_part = self.msa[item]
expected_data = self.sa[item]
expected_mask = self.mask_sa[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize(
"indices,axis",
[([0, 1], 1), ([0, 1], 0), ([0, 1], None), ([[0, 1], [2, 3]], None)],
)
def test_take(self, indices, axis):
ma_take = self.ma.take(indices, axis=axis)
expected_data = self.a.take(indices, axis=axis)
expected_mask = self.mask_a.take(indices, axis=axis)
assert_array_equal(ma_take.unmasked, expected_data)
assert_array_equal(ma_take.mask, expected_mask)
ma_take2 = np.take(self.ma, indices, axis=axis)
assert_masked_equal(ma_take2, ma_take)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
@pytest.mark.parametrize("mask", [None, True, False])
def test_setitem(self, item, mask):
base = self.ma.copy()
expected_data = self.a.copy()
expected_mask = self.mask_a.copy()
value = self.a[0, 0] if mask is None else Masked(self.a[0, 0], mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
@pytest.mark.parametrize("mask", [None, True, False])
def test_setitem_structured(self, item, mask):
base = self.msa.copy()
expected_data = self.sa.copy()
expected_mask = self.mask_sa.copy()
value = self.sa["b"] if item == "a" else self.sa[0, 0]
if mask is not None:
value = Masked(value, mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_setitem_np_ma_masked(self, item):
base = self.ma.copy()
expected_mask = self.mask_a.copy()
base[item] = np.ma.masked
expected_mask[item] = True
assert_array_equal(base.unmasked, self.a)
assert_array_equal(base.mask, expected_mask)
class TestMaskedArrayItems(MaskedItemTests):
@classmethod
def setup_class(self):
super().setup_class()
self.d = np.array(["aa", "bb"])
self.mask_d = np.array([True, False])
self.md = Masked(self.d, self.mask_d)
# Quantity, Longitude cannot hold strings.
def test_getitem_strings(self):
md = self.md.copy()
md0 = md[0]
assert md0.unmasked == self.d[0]
assert md0.mask
md_all = md[:]
assert_masked_equal(md_all, md)
def test_setitem_strings_np_ma_masked(self):
md = self.md.copy()
md[1] = np.ma.masked
assert_array_equal(md.unmasked, self.d)
assert_array_equal(md.mask, np.ones(2, bool))
class TestMaskedQuantityItems(MaskedItemTests, QuantitySetup):
pass
class TestMaskedLongitudeItems(MaskedItemTests, LongitudeSetup):
pass
class MaskedOperatorTests(MaskedArraySetup):
@pytest.mark.parametrize("op", (operator.add, operator.sub))
def test_add_subtract(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_equality(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_not_implemented(self):
with pytest.raises(TypeError):
self.ma > "abc" # noqa: B015
@pytest.mark.parametrize("different_names", [False, True])
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_structured_equality(self, op, different_names):
msb = self.msb
if different_names:
msb = msb.astype(
[(f"different_{name}", dt) for name, dt in msb.dtype.fields.items()]
)
mapmb = op(self.msa, self.msb)
# Expected is a bit tricky here: only unmasked fields count
expected_data = np.ones(mapmb.shape, bool)
expected_mask = np.ones(mapmb.shape, bool)
for field in self.sdt.names:
fa, mfa = self.sa[field], self.mask_sa[field]
fb, mfb = self.sb[field], self.mask_sb[field]
mfequal = mfa | mfb
fequal = (fa == fb) | mfequal
expected_data &= fequal
expected_mask &= mfequal
if op is operator.ne:
expected_data = ~expected_data
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_matmul(self):
result = self.ma.T @ self.ma
assert_array_equal(result.unmasked, self.a.T @ self.a)
mask1 = np.any(self.mask_a, axis=0)
expected_mask = np.logical_or.outer(mask1, mask1)
assert_array_equal(result.mask, expected_mask)
result2 = self.ma.T @ self.a
assert_array_equal(result2.unmasked, self.a.T @ self.a)
expected_mask2 = np.logical_or.outer(mask1, np.zeros(3, bool))
assert_array_equal(result2.mask, expected_mask2)
result3 = self.a.T @ self.ma
assert_array_equal(result3.unmasked, self.a.T @ self.a)
expected_mask3 = np.logical_or.outer(np.zeros(3, bool), mask1)
assert_array_equal(result3.mask, expected_mask3)
def test_matvec(self):
result = self.ma @ self.mb
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.a @ self.b)
# Just using the masked vector still has all elements masked.
result2 = self.a @ self.mb
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.a @ self.b)
new_ma = self.ma.copy()
new_ma.mask[0, 0] = False
result3 = new_ma @ self.b
assert_array_equal(result3.unmasked, self.a @ self.b)
assert_array_equal(result3.mask, new_ma.mask.any(-1))
def test_vecmat(self):
result = self.mb @ self.ma.T
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.b @ self.a.T)
result2 = self.b @ self.ma.T
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.b @ self.a.T)
new_ma = self.ma.T.copy()
new_ma.mask[0, 0] = False
result3 = self.b @ new_ma
assert_array_equal(result3.unmasked, self.b @ self.a.T)
assert_array_equal(result3.mask, new_ma.mask.any(0))
def test_vecvec(self):
result = self.mb @ self.mb
assert result.shape == ()
assert result.mask
assert result.unmasked == self.b @ self.b
mb_no_mask = Masked(self.b, False)
result2 = mb_no_mask @ mb_no_mask
assert not result2.mask
class TestMaskedArrayOperators(MaskedOperatorTests):
# Some further tests that use strings, which are not useful for Quantity.
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_equality_strings(self, op):
m1 = Masked(np.array(["a", "b", "c"]), mask=[True, False, False])
m2 = Masked(np.array(["a", "b", "d"]), mask=[False, False, False])
result = op(m1, m2)
assert_array_equal(result.unmasked, op(m1.unmasked, m2.unmasked))
assert_array_equal(result.mask, m1.mask | m2.mask)
result2 = op(m1, m2.unmasked)
assert_masked_equal(result2, result)
def test_not_implemented(self):
with pytest.raises(TypeError):
Masked(["a", "b"]) > object() # noqa: B015
class TestMaskedQuantityOperators(MaskedOperatorTests, QuantitySetup):
pass
class TestMaskedLongitudeOperators(MaskedOperatorTests, LongitudeSetup):
pass
class TestMaskedArrayMethods(MaskedArraySetup):
def test_round(self):
# Goes via ufunc, hence easy.
mrc = self.mc.round()
expected = Masked(self.c.round(), self.mask_c)
assert_masked_equal(mrc, expected)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_sum(self, axis):
ma_sum = self.ma.sum(axis)
expected_data = self.a.sum(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_sum_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_sum = self.ma.sum(axis, where=where_final)
expected_data = self.ma.unmasked.sum(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_cumsum(self, axis):
ma_sum = self.ma.cumsum(axis)
expected_data = self.a.cumsum(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_mean(self, axis):
ma_mean = self.ma.mean(axis)
filled = self.a.copy()
filled[self.mask_a] = 0.0
count = 1 - self.ma.mask.astype(int)
expected_data = filled.sum(axis) / count.sum(axis)
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_mean_all_masked(self, axis):
# test corner case when all values are masked
md = Masked(self.a, np.ones(self.a.shape, dtype=bool))
md_mean = md.mean(axis)
assert np.all(np.isnan(md_mean.unmasked))
assert np.all(md_mean.mask)
def test_mean_int16(self):
ma = self.ma.astype("i2")
ma_mean = ma.mean()
assert ma_mean.dtype == "f8"
expected = ma.astype("f8").mean()
assert_masked_equal(ma_mean, expected)
def test_mean_float16(self):
ma = self.ma.astype("f2")
ma_mean = ma.mean()
assert ma_mean.dtype == "f2"
expected = self.ma.mean().astype("f2")
assert_masked_equal(ma_mean, expected)
def test_mean_inplace(self):
expected = self.ma.mean(1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.mean(1, out=out)
assert result is out
assert_masked_equal(out, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_mean_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_mean = self.ma.mean(axis, where=where)
expected_data = self.ma.unmasked.mean(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_var(self, axis):
ma_var = self.ma.var(axis)
filled = (self.a - self.ma.mean(axis, keepdims=True)) ** 2
filled[self.mask_a] = 0.0
count = (1 - self.ma.mask.astype(int)).sum(axis)
expected_data = filled.sum(axis) / count
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
ma_var1 = self.ma.var(axis, ddof=1)
expected_data1 = filled.sum(axis) / (count - 1)
expected_mask1 = self.ma.mask.all(axis) | (count <= 1)
assert_array_equal(ma_var1.unmasked, expected_data1)
assert_array_equal(ma_var1.mask, expected_mask1)
ma_var5 = self.ma.var(axis, ddof=5)
assert np.all(~np.isfinite(ma_var5.unmasked))
assert ma_var5.mask.all()
def test_var_int16(self):
ma = self.ma.astype("i2")
ma_var = ma.var()
assert ma_var.dtype == "f8"
expected = ma.astype("f8").var()
assert_masked_equal(ma_var, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_var_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_var = self.ma.var(axis, where=where)
expected_data = self.ma.unmasked.var(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
def test_std(self):
ma_std = self.ma.std(1, ddof=1)
ma_var1 = self.ma.var(1, ddof=1)
expected = np.sqrt(ma_var1)
assert_masked_equal(ma_std, expected)
def test_std_inplace(self):
expected = self.ma.std(1, ddof=1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.std(1, ddof=1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_std_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_std = self.ma.std(axis, where=where)
expected_data = self.ma.unmasked.std(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_std.unmasked, expected_data)
assert_array_equal(ma_std.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_min(self, axis):
ma_min = self.ma.min(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.min(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert not np.any(ma_min.mask)
def test_min_with_masked_nan(self):
ma = Masked([3.0, np.nan, 2.0], mask=[False, True, False])
ma_min = ma.min()
assert_array_equal(ma_min.unmasked, np.array(2.0))
assert not ma_min.mask
@pytest.mark.parametrize("axis", (0, 1, None))
def test_min_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_min = self.ma.min(axis, where=where_final, initial=np.inf)
expected_data = self.ma.unmasked.min(axis, where=where_final, initial=np.inf)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert_array_equal(ma_min.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_max(self, axis):
ma_max = self.ma.max(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.max(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert not np.any(ma_max.mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_max_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_max = self.ma.max(axis, where=where_final, initial=-np.inf)
expected_data = self.ma.unmasked.max(axis, where=where_final, initial=-np.inf)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert_array_equal(ma_max.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argmin(self, axis):
ma_argmin = self.ma.argmin(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.argmin(axis)
assert_array_equal(ma_argmin, expected_data)
def test_argmin_only_one_unmasked_element(self):
# Regression test for example from @taldcroft at
# https://github.com/astropy/astropy/pull/11127#discussion_r600864559
ma = Masked(data=[1, 2], mask=[True, False])
assert ma.argmin() == 1
if not NUMPY_LT_1_22:
def test_argmin_keepdims(self):
ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])
assert_array_equal(ma.argmin(axis=0, keepdims=True), np.array([[1, 0]]))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argmax(self, axis):
ma_argmax = self.ma.argmax(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.argmax(axis)
assert_array_equal(ma_argmax, expected_data)
if not NUMPY_LT_1_22:
def test_argmax_keepdims(self):
ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])
assert_array_equal(ma.argmax(axis=1, keepdims=True), np.array([[1], [1]]))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argsort(self, axis):
ma_argsort = self.ma.argsort(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max() * 1.1
expected_data = filled.argsort(axis)
assert_array_equal(ma_argsort, expected_data)
@pytest.mark.parametrize("order", [None, "a", ("a", "b"), ("b", "a")])
@pytest.mark.parametrize("axis", [0, 1])
def test_structured_argsort(self, axis, order):
ma_argsort = self.msa.argsort(axis, order=order)
filled = self.msa.filled(fill_value=np.array((np.inf, np.inf), dtype=self.sdt))
expected_data = filled.argsort(axis, order=order)
assert_array_equal(ma_argsort, expected_data)
def test_argsort_error(self):
with pytest.raises(ValueError, match="when the array has no fields"):
self.ma.argsort(axis=0, order="a")
@pytest.mark.parametrize("axis", (0, 1))
def test_sort(self, axis):
ma_sort = self.ma.copy()
ma_sort.sort(axis)
indices = self.ma.argsort(axis)
expected_data = np.take_along_axis(self.ma.unmasked, indices, axis)
expected_mask = np.take_along_axis(self.ma.mask, indices, axis)
assert_array_equal(ma_sort.unmasked, expected_data)
assert_array_equal(ma_sort.mask, expected_mask)
@pytest.mark.parametrize("kth", [1, 3])
def test_argpartition(self, kth):
ma = self.ma.ravel()
ma_argpartition = ma.argpartition(kth)
partitioned = ma[ma_argpartition]
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
@pytest.mark.parametrize("kth", [1, 3])
def test_partition(self, kth):
partitioned = self.ma.flatten()
partitioned.partition(kth)
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
def test_all_explicit(self):
a1 = np.array(
[
[1.0, 2.0],
[3.0, 4.0],
]
)
a2 = np.array(
[
[1.0, 0.0],
[3.0, 4.0],
]
)
if self._data_cls is not np.ndarray:
a1 = self._data_cls(a1, self.a.unit)
a2 = self._data_cls(a2, self.a.unit)
ma1 = Masked(
a1,
mask=[
[False, False],
[True, True],
],
)
ma2 = Masked(
a2,
mask=[
[False, True],
[False, True],
],
)
ma1_eq_ma2 = ma1 == ma2
assert_array_equal(
ma1_eq_ma2.unmasked,
np.array(
[
[True, False],
[True, True],
]
),
)
assert_array_equal(
ma1_eq_ma2.mask,
np.array(
[
[False, True],
[True, True],
]
),
)
assert ma1_eq_ma2.all()
assert not (ma1 != ma2).all()
ma_eq1 = ma1_eq_ma2.all(1)
assert_array_equal(ma_eq1.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
ma_eq0 = ma1_eq_ma2.all(0)
assert_array_equal(ma_eq0.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
@pytest.mark.parametrize("method", ["any", "all"])
@pytest.mark.parametrize(
"array,axis",
[("a", 0), ("a", 1), ("a", None), ("b", None), ("c", 0), ("c", 1), ("c", None)],
)
def test_all_and_any(self, array, axis, method):
ma = getattr(self, "m" + array)
ma_eq = ma == ma
ma_all_or_any = getattr(ma_eq, method)(axis=axis)
filled = ma_eq.unmasked.copy()
filled[ma_eq.mask] = method == "all"
a_all_or_any = getattr(filled, method)(axis=axis)
all_masked = ma.mask.all(axis)
assert_array_equal(ma_all_or_any.mask, all_masked)
assert_array_equal(ma_all_or_any.unmasked, a_all_or_any)
# interpretation as bool
as_bool = [bool(a) for a in ma_all_or_any.ravel()]
expected = [bool(a) for a in (a_all_or_any & ~all_masked).ravel()]
assert as_bool == expected
def test_any_inplace(self):
ma_eq = self.ma == self.ma
expected = ma_eq.any(1)
out = Masked(np.zeros_like(expected.unmasked))
result = ma_eq.any(1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.parametrize("method", ("all", "any"))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_all_and_any_where(self, method, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_eq = self.ma == self.ma
ma_any = getattr(ma_eq, method)(axis, where=where)
expected_data = getattr(ma_eq.unmasked, method)(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_any.unmasked, expected_data)
assert_array_equal(ma_any.mask, expected_mask)
@pytest.mark.parametrize("offset", (0, 1))
def test_diagonal(self, offset):
mda = self.ma.diagonal(offset=offset)
expected = Masked(
self.a.diagonal(offset=offset), self.mask_a.diagonal(offset=offset)
)
assert_masked_equal(mda, expected)
@pytest.mark.parametrize("offset", (0, 1))
def test_trace(self, offset):
mta = self.ma.trace(offset=offset)
expected = Masked(
self.a.trace(offset=offset), self.mask_a.trace(offset=offset, dtype=bool)
)
assert_masked_equal(mta, expected)
def test_clip(self):
maclip = self.ma.clip(self.b, self.c)
expected = Masked(self.a.clip(self.b, self.c), self.mask_a)
assert_masked_equal(maclip, expected)
def test_clip_masked_min_max(self):
maclip = self.ma.clip(self.mb, self.mc)
# Need to be careful with min, max because of Longitude, which wraps.
dmax = np.maximum(np.maximum(self.a, self.b), self.c).max()
dmin = np.minimum(np.minimum(self.a, self.b), self.c).min()
expected = Masked(
self.a.clip(self.mb.filled(dmin), self.mc.filled(dmax)), mask=self.mask_a
)
assert_masked_equal(maclip, expected)
class TestMaskedQuantityMethods(TestMaskedArrayMethods, QuantitySetup):
pass
class TestMaskedLongitudeMethods(TestMaskedArrayMethods, LongitudeSetup):
pass
class TestMaskedArrayProductMethods(MaskedArraySetup):
# These cannot work on Quantity, so done separately
@pytest.mark.parametrize("axis", (0, 1, None))
def test_prod(self, axis):
ma_sum = self.ma.prod(axis)
expected_data = self.a.prod(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_cumprod(self, axis):
ma_sum = self.ma.cumprod(axis)
expected_data = self.a.cumprod(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
def test_masked_str_explicit():
sa = np.array([(1.0, 2.0), (3.0, 4.0)], dtype="f8,f8")
msa = Masked(sa, [(False, True), (False, False)])
assert str(msa) == "[(1., ——) (3., 4.)]"
assert str(msa[0]) == "(1., ——)"
assert str(msa[1]) == "(3., 4.)"
with np.printoptions(precision=3, floatmode="fixed"):
assert str(msa) == "[(1.000, ———) (3.000, 4.000)]"
def test_masked_repr_explicit():
# Use explicit endianness to ensure tests pass on all architectures
sa = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=">f8,>f8")
msa = Masked(sa, [(False, True), (False, False)])
assert (
repr(msa)
== "MaskedNDArray([(1., ——), (3., 4.)], dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
assert (
repr(msa[0]) == "MaskedNDArray((1., ——), dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
assert (
repr(msa[1]) == "MaskedNDArray((3., 4.), dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
def test_masked_repr_summary():
ma = Masked(np.arange(15.0), mask=[True] + [False] * 14)
with np.printoptions(threshold=2):
assert repr(ma) == "MaskedNDArray([———, 1., 2., ..., 12., 13., 14.])"
def test_masked_repr_nodata():
assert repr(Masked([])) == "MaskedNDArray([], dtype=float64)"
class TestMaskedArrayRepr(MaskedArraySetup):
def test_array_str(self):
# very blunt check they work at all.
str(self.ma)
str(self.mb)
str(self.mc)
str(self.msa)
str(self.msb)
str(self.msc)
def test_scalar_str(self):
assert self.mb[0].shape == ()
str(self.mb[0])
assert self.msb[0].shape == ()
str(self.msb[0])
assert self.msc[0].shape == ()
str(self.msc[0])
def test_array_repr(self):
repr(self.ma)
repr(self.mb)
repr(self.mc)
repr(self.msa)
repr(self.msb)
repr(self.msc)
def test_scalar_repr(self):
repr(self.mb[0])
repr(self.msb[0])
repr(self.msc[0])
class TestMaskedQuantityRepr(TestMaskedArrayRepr, QuantitySetup):
pass
class TestMaskedRecarray(MaskedArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ra = self.sa.view(np.recarray)
self.mra = Masked(self.ra, mask=self.mask_sa)
def test_recarray_setup(self):
assert isinstance(self.mra, Masked)
assert isinstance(self.mra, np.recarray)
assert np.all(self.mra.unmasked == self.ra)
assert np.all(self.mra.mask == self.mask_sa)
assert_array_equal(self.mra.view(np.ndarray), self.sa)
assert isinstance(self.mra.a, Masked)
assert_array_equal(self.mra.a.unmasked, self.sa["a"])
assert_array_equal(self.mra.a.mask, self.mask_sa["a"])
def test_recarray_setting(self):
mra = self.mra.copy()
mra.a = self.msa["b"]
assert_array_equal(mra.a.unmasked, self.msa["b"].unmasked)
assert_array_equal(mra.a.mask, self.msa["b"].mask)
@pytest.mark.parametrize("attr", [0, "a"])
def test_recarray_field_getting(self, attr):
mra_a = self.mra.field(attr)
assert isinstance(mra_a, Masked)
assert_array_equal(mra_a.unmasked, self.sa["a"])
assert_array_equal(mra_a.mask, self.mask_sa["a"])
@pytest.mark.parametrize("attr", [0, "a"])
def test_recarray_field_setting(self, attr):
mra = self.mra.copy()
mra.field(attr, self.msa["b"])
assert_array_equal(mra.a.unmasked, self.msa["b"].unmasked)
assert_array_equal(mra.a.mask, self.msa["b"].mask)
class TestMaskedArrayInteractionWithNumpyMA(MaskedArraySetup):
def test_masked_array_from_masked(self):
"""Check that we can initialize a MaskedArray properly."""
np_ma = np.ma.MaskedArray(self.ma)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
def test_view_as_masked_array(self):
"""Test that we can be viewed as a MaskedArray."""
np_ma = self.ma.view(np.ma.MaskedArray)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
class TestMaskedQuantityInteractionWithNumpyMA(
TestMaskedArrayInteractionWithNumpyMA, QuantitySetup
):
pass
|
7874713260493b702ad8263e5a9b2822c95efaa56c71acba14b4754423589b4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from .axislabels import AxisLabels
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .frame import EllipticalFrame, RectangularFrame1D
from .grid_paths import get_gridline_path, get_lon_lat_path
from .ticklabels import TickLabels
from .ticks import Ticks
__all__ = ["CoordinateHelper"]
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
LINES_TO_PATCHES_LINESTYLE = {
"-": "solid",
"--": "dashed",
"-.": "dashdot",
":": "dotted",
"none": "none",
"None": "none",
" ": "none",
"": "none",
}
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid="ignore"):
return np.mod(values - coord_wrap, 360.0) - (360.0 - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : `astropy.units.Quantity`
The angle at which the longitude wraps (defaults to 360 degrees).
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(
self,
parent_axes=None,
parent_map=None,
transform=None,
coord_index=None,
coord_type="scalar",
coord_unit=None,
coord_wrap=None,
frame=None,
format_unit=None,
default_label=None,
):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self._format_unit = format_unit
self.frame = frame
self.default_label = default_label or ""
self._auto_axislabel = True
# Disable auto label for elliptical frames as it puts labels in
# annoying places.
if issubclass(self.parent_axes.frame_class, EllipticalFrame):
self._auto_axislabel = False
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
self.ticks.display_minor_ticks(rcParams["xtick.minor.visible"])
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self.grid_lines_kwargs = {
"visible": False,
"facecolor": "none",
"edgecolor": rcParams["grid.color"],
"linestyle": LINES_TO_PATCHES_LINESTYLE[rcParams["grid.linestyle"]],
"linewidth": rcParams["grid.linewidth"],
"alpha": rcParams["grid.alpha"],
"transform": self.parent_axes.transData,
}
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == "lines" and not self.transform.has_inverse:
raise ValueError(
"The specified transform has no inverse, so the "
"grid cannot be drawn using grid_type='lines'"
)
if grid_type is None:
grid_type = "lines" if self.transform.has_inverse else "contours"
if grid_type in ("lines", "contours"):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if "color" in kwargs:
kwargs["edgecolor"] = kwargs.pop("color")
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs["visible"]:
if not draw_grid:
self.grid_lines_kwargs["visible"] = False
else:
self.grid_lines_kwargs["visible"] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : `~astropy.units.Quantity`, optional
The value to wrap at for angular coordinates.
"""
self.coord_type = coord_type
if coord_wrap is not None and not isinstance(coord_wrap, u.Quantity):
warnings.warn(
"Passing 'coord_wrap' as a number is deprecated. Use a Quantity with units convertible to angular degrees instead.",
AstropyDeprecationWarning,
)
coord_wrap = coord_wrap * u.deg
if coord_type == "longitude" and coord_wrap is None:
self.coord_wrap = 360 * u.deg
elif coord_type != "longitude" and coord_wrap is not None:
raise NotImplementedError(
"coord_wrap is not yet supported for non-longitude coordinates"
)
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == "scalar":
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ["longitude", "latitude"]:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(
unit=self.coord_unit, format_unit=self._format_unit
)
else:
raise ValueError(
"coord_type should be one of 'scalar', 'longitude', or 'latitude'"
)
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or `~matplotlib.ticker.Formatter`
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter instance")
def format_coord(self, value, format="auto"):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == "longitude":
value = wrap_angle_at(value, self.coord_wrap.to_value(u.deg))
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def get_format_unit(self):
"""
Get the unit for the major tick labels.
"""
return self._formatter_locator.format_unit
def set_ticks(
self,
values=None,
spacing=None,
number=None,
size=None,
width=None,
color=None,
alpha=None,
direction=None,
exclude_overlapping=None,
):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError(
"At most one of values, spacing, or number should be specified"
)
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
if direction is not None:
if direction in ("in", "out"):
self.ticks.set_tick_out(direction == "out")
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn(
"exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning,
)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear.
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(
self, color=None, size=None, pad=None, exclude_overlapping=None, **kwargs
):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
**kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self.ticklabels.set_size(size)
if color is not None:
self.ticklabels.set_color(color)
if pad is not None:
self.ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear.
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
**kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop("fontdict", None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
if fontdict is not None:
self.axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label.
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_auto_axislabel(self, auto_label):
"""
Render default axis labels if no explicit label is provided.
Parameters
----------
auto_label : `bool`
`True` if default labels will be rendered.
"""
self._auto_axislabel = bool(auto_label)
def get_auto_axislabel(self):
"""
Render default axis labels if no explicit label is provided.
Returns
-------
auto_axislabel : `bool`
`True` if default labels will be rendered.
"""
return self._auto_axislabel
def _get_default_axislabel(self):
unit = self.get_format_unit() or self.coord_unit
if not unit or unit is u.one or self.coord_type in ("longitude", "latitude"):
return f"{self.default_label}"
else:
return f"{self.default_label} [{unit:latex}]"
def set_axislabel_position(self, position):
"""
Set where axis labels should appear.
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group("grid lines")
self._update_ticks()
if self.grid_lines_kwargs["visible"]:
if isinstance(self.frame, RectangularFrame1D):
self._update_grid_lines_1d()
else:
if self._grid_type == "lines":
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == "lines":
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group("grid lines")
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox):
"""
Draw all ticks and ticklabels.
"""
renderer.open_group("ticks")
self.ticks.draw(renderer)
self.ticklabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
tick_out_size=self.ticks.out_size,
)
renderer.close_group("ticks")
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, visible_ticks):
# Render the default axis label if no axis label is set.
if self._auto_axislabel and not self.get_axislabel():
self.set_axislabel(self._get_default_axislabel())
renderer.open_group("axis labels")
self.axislabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=self.ticks.ticks_locs,
visible_ticks=visible_ticks,
)
renderer.close_group("axis labels")
def _update_ticks(self):
if self.coord_index is None:
return
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(
*coord_range[self.coord_index]
)
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(
self._fl_spacing,
self.get_minor_frequency(),
*coord_range[self.coord_index],
)
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# https://matplotlib.org/stable/tutorials/advanced/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
if spine.data.size == 0:
continue
if not isinstance(self.frame, RectangularFrame1D):
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
if np.isnan(world0).all():
continue
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
with np.errstate(invalid="ignore"):
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == "lower" else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
with np.errstate(invalid="ignore"):
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = world1 - world0
dy = world2 - world0
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.0)
dy = wrap_angle_at(dy, 180.0)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack(
[spine.normal_angle, spine.normal_angle[-1]]
)
with np.errstate(invalid="ignore"):
reset = ((normal_angle_full - tick_angle) % 360 > 90.0) & (
(tick_angle - normal_angle_full) % 360 > 90.0
)
tick_angle[reset] -= 180.0
else:
rotation = 90 if axis == "b" else -90
tick_angle = np.zeros((conf.frame_boundary_samples,)) + rotation
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap.to_value(u.deg))
w2 = wrap_angle_at(w2, self.coord_wrap.to_value(u.deg))
with np.errstate(invalid="ignore"):
w1[w2 - w1 > 180.0] += 360
w2[w1 - w2 > 180.0] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(
minor_ticks_w_coordinates,
spine,
axis,
w1,
w2,
tick_angle,
ticks="minor",
)
# format tick labels, add to scene
text = self.formatter(
self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing
)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(
self, tick_world_coordinates, spine, axis, w1, w2, tick_angle, ticks="major"
):
if self.coord_type == "longitude":
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack(
[tick_world_coordinates_values, tick_world_coordinates_values + 360]
)
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(
self.coord_unit
)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid="ignore"):
intersections = np.hstack(
[
np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0],
]
)
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.0e-13, atol=1.0e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (
spine.data[imax, 0] - spine.data[imin, 0]
)
y_data_i = spine.data[imin, 1] + frac * (
spine.data[imax, 1] - spine.data[imin, 1]
)
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.0:
delta_angle -= 360.0
elif delta_angle < -180.0:
delta_angle += 360.0
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap.to_value(u.deg))
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == "major":
self.ticks.add(
axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac,
)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(
dict(
axis=axis,
data=(x_data_i, y_data_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac,
)
)
self.lbl_world.append(world)
else:
self.ticks.add_minor(
minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac,
)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines_1d(self):
if self.coord_index is None:
return
x_ticks_pos = [a[0] for a in self.ticks.pixel["b"]]
ymin, ymax = self.parent_axes.get_ylim()
self.grid_lines = []
for x_coord in x_ticks_pos:
pixel = [[x_coord, ymin], [x_coord, ymax]]
self.grid_lines.append(Path(pixel))
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
if self.coord_index is None:
return
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
if n_coord == 0:
return
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(
coord_range[1][0], coord_range[1][1], n_samples
)
else:
xy_world[subset, 0] = np.linspace(
coord_range[0][0], coord_range[0][1], n_samples
)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(
self._get_gridline(
xy_world[subset], pixel[subset], xy_world_round[subset]
)
)
def add_tickable_gridline(self, name, constant):
"""
Define a gridline that can be used for ticks and labels.
This gridline is not itself drawn, but instead can be specified in calls to
methods such as
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
for drawing ticks and labels. Since the gridline has a constant value in this
coordinate, and thus would not have any ticks or labels for the same coordinate,
the call to
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
would typically be made on the complementary coordinate.
Parameters
----------
name : str
The name for the gridline, usually a single character, but can be longer
constant : `~astropy.units.Quantity`
The constant coordinate value of the gridline
Notes
-----
A limitation is that the tickable part of the gridline must be contiguous. If
the gridline consists of more than one disconnected segment within the plot
extent, only one of those segments will be made tickable.
"""
if self.coord_index is None:
return
if name in self.frame:
raise ValueError(f"The frame already has a spine with the name '{name}'")
coord_range = self.parent_map.get_coord_range()
constant = constant.to_value(self.coord_unit)
from . import conf
n_samples = conf.grid_samples
# See comment in _update_grid_lines() about a WCS with more than 2 axes
xy_world = np.zeros((n_samples, 2))
xy_world[:, self.coord_index] = np.repeat(constant, n_samples)
# If the complementary coordinate is longitude, we attempt to close the gridline
# If such closure is a discontinuity, it will be filtered out later
if self.parent_map[1 - self.coord_index].coord_type == "longitude":
xy_world[:-1, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples - 1,
)
xy_world[-1, 1 - self.coord_index] = coord_range[1 - self.coord_index][0]
else:
xy_world[:, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples,
)
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
# Get the path of the gridline, which masks hidden parts
gridline = self._get_gridline(xy_world, pixel, xy_world_round)
def data_for_spine(spine):
vertices = gridline.vertices.copy()
codes = gridline.codes.copy()
# Retain the parts of the gridline within the rectangular plot bounds.
# We ought to use the potentially non-rectangular plot frame, but
# calculating that patch requires updating all spines first, which is a
# catch-22.
xmin, xmax = spine.parent_axes.get_xlim()
ymin, ymax = spine.parent_axes.get_ylim()
keep = (
(vertices[:, 0] >= xmin)
& (vertices[:, 0] <= xmax)
& (vertices[:, 1] >= ymin)
& (vertices[:, 1] <= ymax)
)
codes[~keep] = Path.MOVETO
codes[1:][~keep[:-1]] = Path.MOVETO
# We isolate the last segment (the last run of LINETOs), which must be preceded
# by at least one MOVETO and may be succeeded by MOVETOs.
# We have to account for longitude wrapping as well.
# Bail out if there is no visible segment
lineto = np.flatnonzero(codes == Path.LINETO)
if np.size(lineto) == 0:
return np.zeros((0, 2))
# Find the start of the last segment (the last MOVETO before the LINETOs)
last_segment = np.flatnonzero(codes[: lineto[-1]] == Path.MOVETO)[-1]
# Double the gridline if it is closed (i.e., spans all longitudes)
if vertices[0, 0] == vertices[-1, 0] and vertices[0, 1] == vertices[-1, 1]:
codes = np.concatenate([codes, codes[1:]])
vertices = np.vstack([vertices, vertices[1:, :]])
# Stop the last segment before any trailing MOVETOs
moveto = np.flatnonzero(codes[last_segment + 1 :] == Path.MOVETO)
if np.size(moveto) > 0:
return vertices[last_segment : last_segment + moveto[0] + 1, :]
else:
return vertices[last_segment:n_samples, :]
self.frame[name] = self.frame.spine_class(
self.frame.parent_axes, self.frame.transform, data_func=data_for_spine
)
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == "scalar":
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _clear_grid_contour(self):
if hasattr(self, "_grid") and self._grid:
for line in self._grid.collections:
line.remove()
def _update_grid_contour(self):
if self.coord_index is None:
return
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res), np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == "longitude":
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (
tick_world_coordinates_values[0] + tick_world_coordinates_values[1]
)
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(
tick_world_coordinates_values, mid
)
# Replace wraps by NaN
with np.errstate(invalid="ignore"):
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (
np.abs(np.diff(field[:-1, :], axis=1)) > 180
)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
with np.errstate(invalid="ignore"):
self._grid = self.parent_axes.contour(
x,
y,
field.transpose(),
levels=np.sort(tick_world_coordinates_values),
)
else:
self._grid = None
def tick_params(self, which="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if "colors" in kwargs:
if "color" not in kwargs:
kwargs["color"] = kwargs["colors"]
if "labelcolor" not in kwargs:
kwargs["labelcolor"] = kwargs["colors"]
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == "minor":
if len(set(kwargs) - {"length"}) > 0:
raise ValueError(
"When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)"
)
else:
if "length" in kwargs:
self.ticks.set_minor_ticksize(kwargs["length"])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(
size=kwargs.get("length"),
width=kwargs.get("width"),
color=kwargs.get("color"),
direction=kwargs.get("direction"),
)
# Set the tick position
position = None
for arg in ("bottom", "left", "top", "right"):
if arg in kwargs and position is None:
position = ""
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(
color=kwargs.get("labelcolor"),
size=kwargs.get("labelsize"),
pad=kwargs.get("pad"),
)
# Set the tick label position
position = None
for arg in ("bottom", "left", "top", "right"):
if "label" + arg in kwargs and position is None:
position = ""
if kwargs.get("label" + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if "grid_color" in kwargs:
self.grid_lines_kwargs["edgecolor"] = kwargs["grid_color"]
if "grid_alpha" in kwargs:
self.grid_lines_kwargs["alpha"] = kwargs["grid_alpha"]
if "grid_linewidth" in kwargs:
self.grid_lines_kwargs["linewidth"] = kwargs["grid_linewidth"]
if "grid_linestyle" in kwargs:
if kwargs["grid_linestyle"] in LINES_TO_PATCHES_LINESTYLE:
self.grid_lines_kwargs["linestyle"] = LINES_TO_PATCHES_LINESTYLE[
kwargs["grid_linestyle"]
]
else:
self.grid_lines_kwargs["linestyle"] = kwargs["grid_linestyle"]
|
7406c8d9e4192a42c597745cbb659ea7ea30b7ecbbcb263539f716d483e83236 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from astropy import units as u
from astropy.coordinates import Angle
from astropy.units import UnitsError
DMS_RE = re.compile("^dd(:mm(:ss(.(s)+)?)?)?$")
HMS_RE = re.compile("^hh(:mm(:ss(.(s)+)?)?)?$")
DDEC_RE = re.compile("^d(.(d)+)?$")
DMIN_RE = re.compile("^m(.(m)+)?$")
DSEC_RE = re.compile("^s(.(s)+)?$")
SCAL_RE = re.compile("^x(.(x)+)?$")
# Units with custom representations - see the note where it is used inside
# AngleFormatterLocator.formatter for more details.
CUSTOM_UNITS = {
u.degree: u.def_unit(
"custom_degree",
represents=u.degree,
format={"generic": "\xb0", "latex": r"^\circ", "unicode": "°"},
),
u.arcmin: u.def_unit(
"custom_arcmin",
represents=u.arcmin,
format={"generic": "'", "latex": r"^\prime", "unicode": "′"},
),
u.arcsec: u.def_unit(
"custom_arcsec",
represents=u.arcsec,
format={"generic": '"', "latex": r"^{\prime\prime}", "unicode": "″"},
),
u.hourangle: u.def_unit(
"custom_hourangle",
represents=u.hourangle,
format={
"generic": "h",
"latex": r"^{\mathrm{h}}",
"unicode": r"$\mathregular{^h}$",
},
),
}
class BaseFormatterLocator:
"""
A joint formatter/locator.
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
format_unit=None,
):
if len([x for x in (values, number, spacing) if x is None]) < 2:
raise ValueError("At most one of values/number/spacing can be specified")
self._unit = unit
self._format_unit = format_unit or unit
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError(
"value should be in units compatible with "
"coordinate units ({}) but found {}".format(self._unit, values.unit)
)
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator.
Parameters
----------
number : int, optional
Number of ticks.
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
decimal=None,
format_unit=None,
show_decimal_unit=True,
):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is False:
raise UnitsError(
"Units should be degrees or hours when using non-decimal"
" (sexagesimal) mode"
)
self._decimal = decimal
self._sep = None
self.show_decimal_unit = show_decimal_unit
super().__init__(
values=values,
number=number,
spacing=spacing,
format=format,
unit=unit,
format_unit=format_unit,
)
@property
def decimal(self):
decimal = self._decimal
if self.format_unit not in (u.degree, u.hourangle, u.hour):
if self._decimal is None:
decimal = True
elif self._decimal is False:
raise UnitsError(
"Units should be degrees or hours when using non-decimal"
" (sexagesimal) mode"
)
elif self._decimal is None:
decimal = False
return decimal
@decimal.setter
def decimal(self, value):
self._decimal = value
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (
not isinstance(spacing, u.Quantity) or spacing.unit.physical_type != "angle"
):
raise TypeError(
"spacing should be an astropy.units.Quantity "
"instance with units of angle"
)
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if "." in value:
self._precision = len(value) - value.index(".") - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(":") + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if "." in value:
self._precision = len(value) - value.index(".") - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(":") + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
else:
raise ValueError(f"Invalid format: {value}")
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.0e-10:
warnings.warn(
"Spacing is not a multiple of base spacing - resetting spacing to"
" match format"
)
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self.decimal:
spacing = self._format_unit / (10.0**self._precision)
else:
if self._fields == 1:
spacing = 1.0 * u.degree
elif self._fields == 2:
spacing = 1.0 * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1.0 * u.arcsec
else:
spacing = u.arcsec / (10.0**self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced). We return a
# non-zero spacing in case the caller needs to format a single
# coordinate, e.g. for mousover.
if value_min == value_max:
return [] * self._unit, 1 * u.arcsec
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number == 0:
return [] * self._unit, np.nan * self._unit
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self.decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(
dv.to_value(self._format_unit)
) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format="auto"):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self.decimal
unit = self._format_unit
if unit is u.hour:
unit = u.hourangle
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len(
f"{spacing:.10f}".replace("0", " ").strip().split(".", 1)[1]
)
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
is_latex = format == "latex" or (
format == "auto" and rcParams["text.usetex"]
)
if decimal:
if self.show_decimal_unit:
sep = "fromunit"
if is_latex:
fmt = "latex"
else:
if unit is u.hourangle:
fmt = "unicode"
else:
fmt = "generic"
unit = CUSTOM_UNITS.get(unit, unit)
else:
sep = "fromunit"
fmt = None
elif self.sep is not None:
sep = self.sep
fmt = None
else:
sep = "fromunit"
if unit == u.degree:
if is_latex:
fmt = "latex"
else:
sep = ("\xb0", "'", '"')
fmt = None
else:
if format == "ascii":
fmt = None
elif is_latex:
fmt = "latex"
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (
r"$\mathregular{^h}$",
r"$\mathregular{^m}$",
r"$\mathregular{^s}$",
)
fmt = None
angles = Angle(values)
string = angles.to_string(
unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt,
).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator.
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
format_unit=None,
):
if unit is not None:
unit = unit
format_unit = format_unit or unit
elif spacing is not None:
unit = spacing.unit
format_unit = format_unit or spacing.unit
elif values is not None:
unit = values.unit
format_unit = format_unit or values.unit
super().__init__(
values=values,
number=number,
spacing=spacing,
format=format,
unit=unit,
format_unit=format_unit,
)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn(
"Spacing is too small - resetting spacing to match format"
)
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.0e-10:
warnings.warn(
"Spacing is not a multiple of base spacing - resetting spacing"
" to match format"
)
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith("%"):
raise ValueError(f"Invalid format: {value}")
@property
def base_spacing(self):
return self._format_unit / (10.0**self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if (
self.format is not None
and (not self.format.startswith("%"))
and dv < self.base_spacing
):
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(
dv.to_value(self._format_unit)
) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format="auto"):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.0:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith("%"):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [
("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit))
for x in values
]
else:
return []
|
e764a5c819278f7183d2c43535ad8840919a18cf7157bca9df926a1360e2e7b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
from functools import partial
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
from astropy.coordinates import BaseCoordinateFrame, SkyCoord
from astropy.utils import minversion
from astropy.utils.compat.optional_deps import HAS_PIL
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from .coordinates_map import CoordinatesMap
from .frame import RectangularFrame, RectangularFrame1D
from .transforms import CoordinateTransform
from .utils import get_coord_meta, transform_contour_set_inplace
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ["WCSAxes", "WCSAxesSubplot"]
VISUAL_PROPERTIES = ["facecolor", "edgecolor", "linewidth", "alpha", "linestyle"]
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way.
"""
def draw(self, renderer):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
*args
``*args`` can be a single ``(left, bottom, width, height)``
rectangle or a single `matplotlib.transforms.Bbox`. This specifies
the rectangle (in figure coordinates) where the Axes is positioned.
``*args`` can also consist of three numbers or a single three-digit
number; in the latter case, the digits are considered as
independent numbers. The numbers are interpreted as ``(nrows,
ncols, index)``: ``(nrows, ncols)`` specifies the size of an array
of subplots, and ``index`` is the 1-based index of the subplot
being created. Finally, ``*args`` can also directly be a
`matplotlib.gridspec.SubplotSpec` instance.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
Attributes
----------
coords : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
Container for coordinate information.
"""
def __init__(
self,
fig,
*args,
wcs=None,
transform=None,
coord_meta=None,
transData=None,
slices=None,
frame_class=None,
**kwargs,
):
""" """
super().__init__(fig, *args, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif wcs is not None and (
wcs.pixel_n_dim == 1 or (slices is not None and "y" not in slices)
):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if transData is not None:
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(
wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta
)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect("key_press_event", self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(
coord.format_coord(world[coord.coord_index], format="ascii")
)
coord_string = " ".join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == "w":
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop("origin", "lower")
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = "lower"
elif origin == "upper":
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
if HAS_PIL:
from PIL.Image import Image
if minversion("PIL", "9.1"):
from PIL.Image import Transpose
FLIP_TOP_BOTTOM = Transpose.FLIP_TOP_BOTTOM
else:
from PIL.Image import FLIP_TOP_BOTTOM
if isinstance(X, Image) or hasattr(X, "getpixel"):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop("transform", None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop("transform", None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def _transform_plot_args(self, *args, **kwargs):
"""
Apply transformations to arguments to ``plot_coord`` and
``scatter_coord``.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == "longitude":
plot_data.append(frame0.spherical.lon.to_value(coord.coord_unit))
elif coord.coord_type == "latitude":
plot_data.append(frame0.spherical.lat.to_value(coord.coord_unit))
else:
raise NotImplementedError(
"Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude."
)
if "transform" in kwargs.keys():
raise TypeError(
"The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame."
)
transform = self.get_transform(native_frame)
kwargs.update({"transform": transform})
args = tuple(plot_data) + args[1:]
return args, kwargs
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot :
This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().plot(*args, **kwargs)
def text_coord(self, *args, **kwargs):
"""
Print a text string using `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.text_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.text`. All other arguments are the same as
`matplotlib.axes.Axes.text`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.text`.
See Also
--------
matplotlib.axes.Axes.text :
This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().text(*args, **kwargs)
def scatter_coord(self, *args, **kwargs):
"""
Scatter `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.scatter_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.scatter`. All other arguments are the same as
`matplotlib.axes.Axes.scatter`. If not specified a ``transform``
keyword argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to scatter on the axes. This is converted to
the first two arguments to `matplotlib.axes.Axes.scatter`.
See Also
--------
matplotlib.axes.Axes.scatter : This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().scatter(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, "coords"):
previous_frame = {
"path": self.coords.frame._path,
"color": self.coords.frame.get_color(),
"linewidth": self.coords.frame.get_linewidth(),
}
else:
previous_frame = {"path": None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(
self.wcs, self.frame_class, slices=slices
)
self.coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame["path"],
)
self._transform_pixel2world = transform
if previous_frame["path"] is not None:
self.coords.frame.set_color(previous_frame["color"])
self.coords.frame.set_linewidth(previous_frame["linewidth"])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(
coord_meta.get("default_axislabel_position", ["b", "l"])
):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticklabel_position", ["b", "l"])
):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticks_position", ["bltr", "bltr"])
):
self.coords[ind].set_ticks_position(pos)
if rcParams["axes.grid"]:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
# Draw grids
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
# Draw tick labels
for coord in coords:
coord._draw_ticks(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
)
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
# Draw axis labels
for coord in coords:
coord._draw_axislabels(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
visible_ticks=visible_ticks,
)
self.coords.frame.draw(renderer)
def draw(self, renderer):
"""Draw the axes."""
# Before we do any drawing, we need to remove any existing grid lines
# drawn with contours, otherwise if we try and remove the contours
# part way through drawing, we end up with the issue mentioned in
# https://github.com/astropy/astropy/issues/12446
for coords in self._all_coords:
for coord in coords:
coord._clear_grid_contour()
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer)
self._drawn = True
# Matplotlib internally sometimes calls set_xlabel(label=...).
def set_xlabel(self, xlabel=None, labelpad=1, loc=None, **kwargs):
"""Set x-label."""
if xlabel is None:
xlabel = kwargs.pop("label", None)
if xlabel is None:
raise TypeError(
"set_xlabel() missing 1 required positional argument: 'xlabel'"
)
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, loc=None, **kwargs):
"""Set y-label."""
if ylabel is None:
ylabel = kwargs.pop("label", None)
if ylabel is None:
raise TypeError(
"set_ylabel() missing 1 required positional argument: 'ylabel'"
)
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
"""Get coordinates overlay on given frame.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame`
Frame to get overlay for. If a string must correspond to
one of the coordinate frames registered in the astropy
frame transform graph.
coord_meta : dict
Metadata for the coordinates overlay.
Returns
-------
overlay : `~astropy.visualization.wcsaxes.CoordinatesMap`
Coordinates overlay.
"""
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position("t")
coords[1].set_axislabel_position("r")
coords[0].set_ticklabel_position("t")
coords[1].set_ticklabel_position("r")
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame.
"""
if isinstance(frame, (BaseLowLevelWCS, BaseHighLevelWCS)):
if isinstance(frame, BaseHighLevelWCS):
frame = frame.low_level_wcs
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (
self._transform_pixel2world
+ CoordinateTransform(
self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in,
)
+ transform_world2pixel
)
elif isinstance(frame, str) and frame == "pixel":
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if isinstance(frame, str) and frame == "world":
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(
self._transform_pixel2world.frame_out, frame
)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x, but we only support 3.x now.
if not self.get_visible():
return
# Do a draw to populate the self._bboxes list
self.draw_wcsaxes(renderer)
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
bb.append(super().get_tightbbox(renderer, *args, **kwargs))
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis="both", *, which="major", **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
axis : 'both', 'x', 'y'
Which axis to turn the gridlines on/off for.
which : str
Currently only ``'major'`` is supported.
"""
if not hasattr(self, "coords"):
return
if which != "major":
raise NotImplementedError(
"Plotting the grid for the minor ticks is not supported."
)
if axis == "both":
self.coords.grid(draw_grid=b, **kwargs)
elif axis == "x":
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == "y":
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError("axis should be one of x/y/both")
def tick_params(self, axis="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, "coords"):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == "both":
for pos in ("bottom", "left", "top", "right"):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if "label" + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ("x", "y") and self.frame_class is RectangularFrame:
spine = "b" if axis == "x" else "l"
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes.
"""
pass
|
6a70ef736fd3d5f554096858df864f7a6740db83749926215dc291841cf32126 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.transforms import Affine2D, IdentityTransform
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.tests.figures import figure_test
from astropy.time import Time
from astropy.units import Quantity
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.frame import RectangularFrame, RectangularFrame1D
from astropy.visualization.wcsaxes.wcsapi import (
WCSWorld2PixelTransform,
apply_slices,
transform_coord_meta_from_wcs,
)
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseLowLevelWCS, SlicedLowLevelWCS
@pytest.fixture
def plt_close():
yield
plt.close("all")
WCS2D = WCS(naxis=2)
WCS2D.wcs.ctype = ["x", "y"]
WCS2D.wcs.cunit = ["km", "km"]
WCS2D.wcs.crpix = [614.5, 856.5]
WCS2D.wcs.cdelt = [6.25, 6.25]
WCS2D.wcs.crval = [0.0, 0.0]
WCS3D = WCS(naxis=3)
WCS3D.wcs.ctype = ["x", "y", "z"]
WCS3D.wcs.cunit = ["km", "km", "km"]
WCS3D.wcs.crpix = [614.5, 856.5, 333]
WCS3D.wcs.cdelt = [6.25, 6.25, 23]
WCS3D.wcs.crval = [0.0, 0.0, 1.0]
@pytest.fixture
def wcs_4d():
header = dedent(
"""\
WCSAXES = 4 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CRPIX3 = 0.0 / Pixel coordinate of reference point
CRPIX4 = 5.0 / Pixel coordinate of reference point
CDELT1 = 0.4 / [min] Coordinate increment at reference point
CDELT2 = 2E-11 / [m] Coordinate increment at reference point
CDELT3 = 0.0027777777777778 / [deg] Coordinate increment at reference point
CDELT4 = 0.0013888888888889 / [deg] Coordinate increment at reference point
CUNIT1 = 'min' / Units of coordinate increment and value
CUNIT2 = 'm' / Units of coordinate increment and value
CUNIT3 = 'deg' / Units of coordinate increment and value
CUNIT4 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'TIME' / Coordinate type code
CTYPE2 = 'WAVE' / Vacuum wavelength (linear)
CTYPE3 = 'HPLT-TAN' / Coordinate type codegnomonic projection
CTYPE4 = 'HPLN-TAN' / Coordinate type codegnomonic projection
CRVAL1 = 0.0 / [min] Coordinate value at reference point
CRVAL2 = 0.0 / [m] Coordinate value at reference point
CRVAL3 = 0.0 / [deg] Coordinate value at reference point
CRVAL4 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
"""
)
return WCS(header=fits.Header.fromstring(header, sep="\n"))
@pytest.fixture
def cube_wcs():
cube_header = get_pkg_data_filename("data/cube_header")
header = fits.Header.fromtextfile(cube_header)
return WCS(header=header)
def test_shorthand_inversion():
"""
Test that the Matplotlib subtraction shorthand for composing and inverting
transformations works.
"""
w1 = WCS(naxis=2)
w1.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w1.wcs.crpix = [256.0, 256.0]
w1.wcs.cdelt = [-0.05, 0.05]
w1.wcs.crval = [120.0, -19.0]
w2 = WCS(naxis=2)
w2.wcs.ctype = ["RA---SIN", "DEC--SIN"]
w2.wcs.crpix = [256.0, 256.0]
w2.wcs.cdelt = [-0.05, 0.05]
w2.wcs.crval = [235.0, +23.7]
t1 = WCSWorld2PixelTransform(w1)
t2 = WCSWorld2PixelTransform(w2)
assert t1 - t2 == t1 + t2.inverted()
assert t1 - t2 != t2.inverted() + t1
assert t1 - t1 == IdentityTransform()
# We add Affine2D to catch the fact that in Matplotlib, having a Composite
# transform can end up in more strict requirements for the dimensionality.
def test_2d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world, world_2)
def test_3d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS3D[:, 0, :]) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world[:, 0], world_2[:, 0])
np.testing.assert_allclose(world[:, 1], world_2[:, 1])
def test_coord_type_from_ctype(cube_wcs):
_, coord_meta = transform_coord_meta_from_wcs(
cube_wcs, RectangularFrame, slices=(50, "y", "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ["l", "r", "b"]
assert ticklabel_position == ["l", "r", "b"]
assert ticks_position == ["l", "r", "b"]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cname = ["Longitude", ""]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["default_axis_label"] == ["Longitude", "pos.galactic.lat"]
assert coord_meta["name"] == [
("pos.galactic.lon", "glon-tan", "glon", "Longitude"),
("pos.galactic.lat", "glat-tan", "glat"),
]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.arcsec, u.arcsec]
assert coord_meta["wrap"] == [180.0 * u.deg, None]
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame, slices=("y", "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes should be swapped because of slices
assert axislabel_position == ["l", "b"]
assert ticklabel_position == ["l", "b"]
assert ticks_position == ["bltr", "bltr"]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HGLN-TAN", "HGLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [180.0 * u.deg, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["CRLN-TAN", "CRLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [360.0 * u.deg, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.hourangle, u.deg]
assert coord_meta["wrap"] == [None, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["spam", "spam"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["scalar", "scalar"]
assert coord_meta["format_unit"] == [u.one, u.one]
assert coord_meta["wrap"] == [None, None]
def test_coord_type_1d_1d_wcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.crpix = [256.0]
wcs.wcs.cdelt = [-0.05]
wcs.wcs.crval = [50.0]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D)
assert coord_meta["type"] == ["scalar"]
assert coord_meta["format_unit"] == [u.m]
assert coord_meta["wrap"] == [None]
def test_coord_type_1d_2d_wcs_correlated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame1D, slices=("x", 0)
)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["visible"] == [True, True]
def test_coord_type_1d_2d_wcs_uncorrelated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["WAVE", "UTC"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cunit = ["nm", "s"]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame1D, slices=("x", 0)
)
assert coord_meta["type"] == ["scalar", "scalar"]
assert coord_meta["format_unit"] == [u.m, u.s]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["visible"] == [True, False]
def test_coord_meta_4d(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(
wcs_4d, RectangularFrame, slices=(0, 0, "x", "y")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
assert axislabel_position == ["", "", "b", "l"]
assert ticklabel_position == ["", "", "b", "l"]
assert ticks_position == ["", "", "bltr", "bltr"]
def test_coord_meta_4d_line_plot(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(
wcs_4d, RectangularFrame1D, slices=(0, 0, 0, "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ["", "", "t", "b"]
assert ticklabel_position == ["", "", "t", "b"]
assert ticks_position == ["", "", "t", "b"]
@pytest.fixture
def sub_wcs(wcs_4d, wcs_slice):
return SlicedLowLevelWCS(wcs_4d, wcs_slice)
@pytest.mark.parametrize(
("wcs_slice", "wcsaxes_slices", "world_map", "ndim"),
[
(np.s_[...], [0, 0, "x", "y"], (2, 3), 2),
(np.s_[...], [0, "x", 0, "y"], (1, 2, 3), 3),
(np.s_[...], ["x", 0, 0, "y"], (0, 2, 3), 3),
(np.s_[...], ["x", "y", 0, 0], (0, 1), 2),
(np.s_[:, :, 0, :], [0, "x", "y"], (1, 2), 2),
(np.s_[:, :, 0, :], ["x", 0, "y"], (0, 1, 2), 3),
(np.s_[:, :, 0, :], ["x", "y", 0], (0, 1, 2), 3),
(np.s_[:, 0, :, :], ["x", "y", 0], (0, 1), 2),
],
)
def test_apply_slices(sub_wcs, wcs_slice, wcsaxes_slices, world_map, ndim):
transform_wcs, _, out_world_map = apply_slices(sub_wcs, wcsaxes_slices)
assert transform_wcs.world_n_dim == ndim
assert out_world_map == world_map
# parametrize here to pass to the fixture
@pytest.mark.parametrize("wcs_slice", [np.s_[:, :, 0, :]])
def test_sliced_ND_input(wcs_4d, sub_wcs, wcs_slice, plt_close):
slices_wcsaxes = [0, "x", "y"]
for sub_wcs_ in (sub_wcs, SlicedLowLevelWCS(wcs_4d, wcs_slice)):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
_, coord_meta = transform_coord_meta_from_wcs(
sub_wcs_, RectangularFrame, slices=slices_wcsaxes
)
assert all(len(x) == 3 for x in coord_meta.values())
assert coord_meta["name"] == [
"time",
("custom:pos.helioprojective.lat", "hplt-tan", "hplt"),
("custom:pos.helioprojective.lon", "hpln-tan", "hpln"),
]
assert coord_meta["type"] == ["scalar", "latitude", "longitude"]
assert coord_meta["wrap"] == [None, None, 180.0 * u.deg]
assert coord_meta["unit"] == [u.Unit("min"), u.Unit("deg"), u.Unit("deg")]
assert coord_meta["visible"] == [False, True, True]
assert coord_meta["format_unit"] == [
u.Unit("min"),
u.Unit("arcsec"),
u.Unit("arcsec"),
]
assert coord_meta["default_axislabel_position"] == ["", "b", "l"]
assert coord_meta["default_ticklabel_position"] == ["", "b", "l"]
assert coord_meta["default_ticks_position"] == ["", "bltr", "bltr"]
# Validate the axes initialize correctly
plt.clf()
plt.subplot(projection=sub_wcs_, slices=slices_wcsaxes)
class LowLevelWCS5D(BaseLowLevelWCS):
pixel_dim = 2
@property
def pixel_n_dim(self):
return self.pixel_dim
@property
def world_n_dim(self):
return 5
@property
def world_axis_physical_types(self):
return [
"em.freq",
"time",
"pos.eq.ra",
"pos.eq.dec",
"phys.polarization.stokes",
]
@property
def world_axis_units(self):
return ["Hz", "day", "deg", "deg", ""]
@property
def world_axis_names(self):
return ["Frequency", "", "RA", "DEC", ""]
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = (list(pixel_arrays) * 3)[:-1] # make list have 5 elements
return [
np.asarray(pix) * scale
for pix, scale in zip(pixel_arrays, [10, 0.2, 0.4, 0.39, 2])
]
def world_to_pixel_values(self, *world_arrays):
world_arrays = world_arrays[:2] # make list have 2 elements
return [
np.asarray(world) / scale for world, scale in zip(world_arrays, [10, 0.2])
]
@property
def world_axis_object_components(self):
return [
("freq", 0, "value"),
("time", 0, "mjd"),
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
("stokes", 0, "value"),
]
@property
def world_axis_object_classes(self):
return {
"celestial": (SkyCoord, (), {"unit": "deg"}),
"time": (Time, (), {"format": "mjd"}),
"freq": (Quantity, (), {"unit": "Hz"}),
"stokes": (Quantity, (), {"unit": "one"}),
}
def test_edge_axes():
# Check that axes on the edge of a spherical projection are shown properley
# (see https://github.com/astropy/astropy/issues/10441)
shape = [180, 360]
data = np.random.rand(*shape)
header = {
"wcsaxes": 2,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": 1.0,
"cdelt2": 1.0,
"cunit1": "deg",
"cunit2": "deg",
"ctype1": "CRLN-CAR",
"ctype2": "CRLT-CAR",
"crval1": 0.0,
"crval2": 0.0,
"lonpole": 0.0,
"latpole": 90.0,
}
wcs = WCS(header)
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=wcs)
ax.imshow(data, origin="lower")
# By default the x- and y- axes should be drawn
lon = ax.coords[0]
lat = ax.coords[1]
fig.canvas.draw()
np.testing.assert_equal(
lon.ticks.world["b"], np.array([90.0, 180.0, 180.0, 270.0, 0.0])
)
np.testing.assert_equal(
lat.ticks.world["l"], np.array([-90.0, -60.0, -30.0, 0.0, 30.0, 60.0, 90.0])
)
def test_coord_meta_wcsapi():
wcs = LowLevelWCS5D()
wcs.pixel_dim = 5
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame, slices=[0, 0, "x", "y", 0]
)
assert coord_meta["name"] == [
("em.freq", "Frequency"),
"time",
("pos.eq.ra", "RA"),
("pos.eq.dec", "DEC"),
"phys.polarization.stokes",
]
assert coord_meta["type"] == ["scalar", "scalar", "longitude", "latitude", "scalar"]
assert coord_meta["wrap"] == [None, None, None, None, None]
assert coord_meta["unit"] == [
u.Unit("Hz"),
u.Unit("d"),
u.Unit("deg"),
u.Unit("deg"),
u.one,
]
assert coord_meta["visible"] == [True, True, True, True, True]
assert coord_meta["format_unit"] == [
u.Unit("Hz"),
u.Unit("d"),
u.Unit("hourangle"),
u.Unit("deg"),
u.one,
]
assert coord_meta["default_axislabel_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_ticklabel_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_ticks_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_axis_label"] == [
"Frequency",
"time",
"RA",
"DEC",
"phys.polarization.stokes",
]
@figure_test
def test_wcsapi_5d_with_names(plt_close):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=LowLevelWCS5D())
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
class LowLevelWCSCelestial2D(BaseLowLevelWCS):
# APE 14 WCS that has celestial coordinates that are deliberately not in degrees
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return [
"pos.eq.ra",
"pos.eq.dec",
]
@property
def world_axis_units(self):
return ["arcsec", "arcsec"]
@property
def world_axis_names(self):
return ["RA", "DEC"]
# Since the units are in arcsec, we can just go for an identity transform
# where 1 pixel = 1" since this is not completely unrealistic
def pixel_to_world_values(self, *pixel_arrays):
return pixel_arrays
def world_to_pixel_values(self, *world_arrays):
return world_arrays
@property
def world_axis_object_components(self):
return [
("celestial", 0, "spherical.lon.arcsec"),
("celestial", 1, "spherical.lat.arcsec"),
]
@property
def world_axis_object_classes(self):
return {
"celestial": (SkyCoord, (), {"unit": "arcsec"}),
}
@figure_test
def test_wcsapi_2d_celestial_arcsec(plt_close):
# Regression test for plot_coord/scatter_coord/text_coord with celestial WCS that is not in degrees
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=LowLevelWCSCelestial2D())
ax.set_xlim(-0.5, 200.5)
ax.set_ylim(-0.5, 200.5)
ax.coords[0].set_format_unit("arcsec")
ax.plot_coord(SkyCoord([50, 150], [100, 100], unit="arcsec"), "ro")
ax.scatter_coord(
SkyCoord([100, 100], [50, 150], unit="arcsec"), color="green", s=50
)
ax.text_coord(
SkyCoord(50, 50, unit="arcsec"),
"Plot Label",
color="blue",
ha="right",
va="top",
)
return fig
|
280016b051efb4323ab5689b6f3e0abf029353c7956168b22a072e3ed57b5523 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.lines
import matplotlib.text
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib import rc_context
from matplotlib.figure import Figure
from matplotlib.patches import Circle, Rectangle
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.tests.figures import figure_test
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.visualization.wcsaxes import WCSAxes, add_beam, add_scalebar
from astropy.visualization.wcsaxes.frame import EllipticalFrame
from astropy.visualization.wcsaxes.patches import Quadrangle, SphericalCircle
from astropy.wcs import WCS
class BaseImageTests:
@classmethod
def setup_class(cls):
msx_header = get_pkg_data_filename("data/msx_header")
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = get_pkg_data_filename("data/rosat_header")
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = get_pkg_data_filename("data/cube_header")
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = get_pkg_data_filename("data/slice_header")
cls.slice_header = fits.Header.fromtextfile(slice_header)
def teardown_method(self, method):
plt.close("all")
class TestBasic(BaseImageTests):
@figure_test
def test_tight_layout(self):
# Check that tight_layout works on a WCSAxes.
fig = plt.figure(figsize=(8, 6))
for i in (1, 2):
fig.add_subplot(2, 1, i, projection=WCS(self.msx_header))
fig.tight_layout()
return fig
@figure_test
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
return fig
@figure_test
def test_axes_off(self):
# Test for turning the axes off
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header))
ax.imshow(np.arange(12).reshape((3, 4)))
ax.set_axis_off()
return fig
@figure_test
@pytest.mark.parametrize("axisbelow", [True, False, "line"])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
ax.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30.0, 50.0), 60.0, 50.0, facecolor="green", edgecolor="red")
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@figure_test
def test_contour_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(
data,
transform=ax.get_transform(wcs_msx),
colors="orange",
levels=[2.5e-5, 5e-5, 1.0e-4],
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_contourf_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contourf(
data, transform=ax.get_transform(wcs_msx), levels=[2.5e-5, 5e-5, 1.0e-4]
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.25, 0.25, 0.65, 0.65], projection=WCS(self.msx_header), aspect="equal"
)
# Change the format of the ticks
ax.coords[0].set_major_formatter("dd:mm:ss")
ax.coords[1].set_major_formatter("dd:mm:ss.ssss")
# Overlay grid on image
ax.grid(color="red", alpha=1.0, lw=1, linestyle="dashed")
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords["glon"].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords["glat"].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords["glon"].set_axislabel("Galactic Longitude", minpad=1.6)
ax.coords["glat"].set_axislabel("Galactic Latitude", minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color("red")
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == "red"
assert ax.coords.frame.get_linewidth() == 2
return fig
@figure_test
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.rosat_header), aspect="equal"
)
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color="black", alpha=1.0, lw=1, linestyle="dashed")
p = Circle((300, 100), radius=40, ec="yellow", fc="none")
ax.add_patch(p)
p = Circle(
(30.0, 20.0),
radius=20.0,
ec="orange",
fc="none",
transform=ax.get_transform("world"),
)
ax.add_patch(p)
p = Circle(
(60.0, 50.0),
radius=20.0,
ec="red",
fc="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(p)
p = Circle(
(40.0, 60.0),
radius=20.0,
ec="green",
fc="none",
transform=ax.get_transform("galactic"),
)
ax.add_patch(p)
return fig
@figure_test
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel("Velocity m/s")
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[0].grid(grid_type="contours", color="purple", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="orange", linestyle="solid")
ax.coords[2].grid(grid_type="contours", color="red", linestyle="solid")
return fig
@figure_test
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=("x", "y", 50),
aspect="equal",
)
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type="contours", color="blue", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="red", linestyle="solid")
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
lines = ax.plot_coord(c, "o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(lines, list)
assert isinstance(lines[0], matplotlib.lines.Line2D)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_scatter_coord(self):
from matplotlib.collections import PathCollection
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
sc = ax.scatter_coord(c, marker="o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(sc, PathCollection)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_text_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
text = ax.text_coord(c, "Sample Label", color="blue", ha="right", va="top")
# Test that plot_coord returns the results from ax.text
assert isinstance(text, matplotlib.text.Text)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_major_formatter("x.xx")
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel("Velocity km/s")
ax.coords[1].set_ticks(width=1)
ax.coords[2].set_ticks(width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@figure_test
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color="blue", alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color="red", alpha=0.9, width=1)
ax.coords[0].set_ticks_position("all")
ax.coords[1].set_ticks_position("all")
ax.coords[0].set_axislabel("X-axis", size=20)
ax.coords[1].set_axislabel(
"Y-axis",
color="green",
size=25,
weight="regular",
style="normal",
family="cmtt10",
)
ax.coords[0].set_axislabel_position("t")
ax.coords[1].set_axislabel_position("r")
ax.coords[0].set_ticklabel(
color="purple",
size=15,
alpha=1,
weight="light",
style="normal",
family="cmss10",
)
ax.coords[1].set_ticklabel(
color="black", size=18, alpha=0.9, weight="bold", family="cmr10"
)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("r")
return fig
@figure_test
def test_no_ticks(self):
# Check that setting no ticks works
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks(number=0)
ax.coords[0].grid(True)
return fig
@figure_test
def test_rcparams(self):
# Test custom rcParams
with rc_context(
{
"axes.labelcolor": "purple",
"axes.labelsize": 14,
"axes.labelweight": "bold",
"axes.linewidth": 3,
"axes.facecolor": "0.5",
"axes.edgecolor": "green",
"xtick.color": "red",
"xtick.labelsize": 8,
"xtick.direction": "in",
"xtick.minor.visible": True,
"xtick.minor.size": 5,
"xtick.major.size": 20,
"xtick.major.width": 3,
"xtick.major.pad": 10,
"grid.color": "blue",
"grid.linestyle": ":",
"grid.linewidth": 1,
"grid.alpha": 0.5,
}
):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.15, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.2, 0.2, 0.6, 0.6], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type("scalar")
ax.coords[1].set_coord_type("scalar")
ax.coords[0].set_major_formatter("x.xxx")
ax.coords[1].set_major_formatter("x.xxx")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("all")
return fig
@figure_test
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule("always")
ax.coords[1].ticklabels.set_visible(False)
return fig
@figure_test(savefig_kwargs={"bbox_inches": "tight"})
def test_noncelestial_angular(self, tmp_path):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["solar-x", "solar-y"]
wcs.wcs.cunit = ["arcsec", "arcsec"]
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin="lower")
ax.coords[0].set_coord_type("longitude", coord_wrap=180 * u.deg)
ax.coords[1].set_coord_type("latitude")
ax.coords[0].set_major_formatter("s.s")
ax.coords[1].set_major_formatter("s.s")
ax.coords[0].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.coords[1].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.grid(color="white", ls="solid")
# Force drawing (needed for format_coord)
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(512, 512) == "513.0 513.0 (world)"
return fig
@figure_test
def test_patches_distortion(self, tmp_path):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
# Pixel coordinates
r = Rectangle((30.0, 50.0), 60.0, 50.0, edgecolor="green", facecolor="none")
ax.add_patch(r)
# FK5 coordinates
r = Rectangle(
(266.4, -28.9),
0.3,
0.3,
edgecolor="cyan",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r)
# FK5 coordinates
c = Circle(
(266.4, -29.1),
0.15,
edgecolor="magenta",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(c)
# Pixel coordinates
ax.scatter(
[40, 100, 130],
[30, 130, 60],
s=100,
edgecolor="red",
facecolor=(1, 0, 0, 0.5),
)
# World coordinates (should not be distorted)
ax.scatter(
266.78238,
-28.769255,
transform=ax.get_transform("fk5"),
s=300,
edgecolor="red",
facecolor="none",
)
# World coordinates (should not be distorted)
r1 = SphericalCircle(
(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r1)
r2 = SphericalCircle(
SkyCoord(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
with pytest.warns(
AstropyUserWarning,
match="Received `center` of representation type "
"<class 'astropy.coordinates.*CartesianRepresentation'> "
"will be converted to SphericalRepresentation",
):
r3 = SphericalCircle(
SkyCoord(
x=-0.05486461,
y=-0.87204803,
z=-0.48633538,
representation_type="cartesian",
),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
# Test to verify that SphericalCircle works irrespective of whether
# the input(center) is a tuple or a SkyCoord object.
assert (r1.get_xy() == r2.get_xy()).all()
assert np.allclose(r1.get_xy(), r3.get_xy())
assert np.allclose(r2.get_xy()[0], [266.4, -29.25])
return fig
@figure_test
def test_quadrangle(self, tmp_path):
# Test that Quadrangle can have curved edges while Rectangle does not
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
ax.set_xlim(0, 10000)
ax.set_ylim(-10000, 0)
# Add a quadrangle patch (100 degrees by 20 degrees)
q = Quadrangle(
(255, -70) * u.deg,
100 * u.deg,
20 * u.deg,
label="Quadrangle",
edgecolor="blue",
facecolor="none",
transform=ax.get_transform("icrs"),
)
ax.add_patch(q)
# Add a rectangle patch (100 degrees by 20 degrees)
r = Rectangle(
(255, -70),
100,
20,
label="Rectangle",
edgecolor="red",
facecolor="none",
linestyle="--",
transform=ax.get_transform("icrs"),
)
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@figure_test
def test_beam_shape_from_args(self, tmp_path):
# Test for adding the beam shape with the beam parameters as arguments
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(
ax,
major=2 * u.arcmin,
minor=1 * u.arcmin,
angle=-30 * u.degree,
corner="bottom right",
frame=True,
borderpad=0.0,
pad=1.0,
color="black",
)
return fig
@figure_test
def test_beam_shape_from_header(self, tmp_path):
# Test for adding the beam shape with the beam parameters from a header
hdr = self.msx_header
hdr["BMAJ"] = (2 * u.arcmin).to(u.degree).value
hdr["BMIN"] = (1 * u.arcmin).to(u.degree).value
hdr["BPA"] = 30.0
wcs = WCS(hdr)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(ax, header=hdr)
return fig
@figure_test
def test_scalebar(self, tmp_path):
# Test for adding a scale bar
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_scalebar(
ax,
2 * u.arcmin,
label="2'",
corner="top right",
borderpad=1.0,
label_top=True,
)
return fig
@figure_test
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@figure_test
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@figure_test(style={"text.usetex": True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@figure_test
def test_tick_params(self):
# This is a test to make sure that tick_params works correctly. We try
# and test as much as possible with a single reference image.
wcs = WCS()
wcs.wcs.ctype = ["lon", "lat"]
fig = plt.figure(figsize=(6, 6))
# The first subplot tests:
# - that plt.tick_params works
# - that by default both axes are changed
# - changing the tick direction and appearance, the label appearance and padding
ax = fig.add_subplot(2, 2, 1, projection=wcs)
plt.tick_params(
direction="in",
length=20,
width=5,
pad=6,
labelsize=6,
color="red",
labelcolor="blue",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The second subplot tests:
# - that specifying grid parameters doesn't actually cause the grid to
# be shown (as expected)
# - that axis= can be given integer coordinates or their string name
# - that the tick positioning works (bottom/left/top/right)
# Make sure that we can pass things that can index coords
ax = fig.add_subplot(2, 2, 2, projection=wcs)
plt.tick_params(
axis=0,
direction="in",
length=20,
width=5,
pad=4,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
plt.tick_params(
axis="lat",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The third subplot tests:
# - that ax.tick_params works
# - that the grid has the correct settings once shown explicitly
# - that we can use axis='x' and axis='y'
ax = fig.add_subplot(2, 2, 3, projection=wcs)
ax.tick_params(
axis="x",
direction="in",
length=20,
width=5,
pad=20,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
ax.tick_params(
axis="y",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
plt.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The final subplot tests:
# - that we can use tick_params on a specific coordinate
# - that the label positioning can be customized
# - that the colors argument works
# - that which='minor' works
ax = fig.add_subplot(2, 2, 4, projection=wcs)
ax.coords[0].tick_params(
length=4,
pad=2,
colors="orange",
labelbottom=True,
labeltop=True,
labelsize=10,
)
ax.coords[1].display_minor_ticks(True)
ax.coords[1].tick_params(which="minor", length=6)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
@pytest.fixture
def wave_wcs_1d():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.cunit = ["m"]
wcs.wcs.crpix = [1]
wcs.wcs.cdelt = [5]
wcs.wcs.crval = [45]
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_1d_wcs(wave_wcs_1d):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.set_xlabel("this is the x-axis")
ax.set_ylabel("this is the y-axis")
return fig
@figure_test
def test_1d_plot_1d_wcs_format_unit(wave_wcs_1d):
"""
This test ensures that the format unit is updated and displayed for both
the axis ticks and default axis labels.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.coords[0].set_format_unit("nm")
return fig
@pytest.fixture
def spatial_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [15] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_2d_wcs_correlated(spatial_wcs_2d):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d, slices=("x", 0))
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
ax.coords["glon"].set_ticks(color="red")
ax.coords["glon"].set_ticklabel(color="red")
ax.coords["glon"].grid(color="red")
ax.coords["glat"].set_ticks(color="blue")
ax.coords["glat"].set_ticklabel(color="blue")
ax.coords["glat"].grid(color="blue")
return fig
@pytest.fixture
def spatial_wcs_2d_small_angle():
"""
This WCS has an almost linear correlation between the pixel and world axes
close to the reference pixel.
"""
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [10 / 3600, 5 / 3600]
wcs.wcs.crval = [0] * 2
wcs.wcs.set()
return wcs
@pytest.mark.parametrize(
"slices, bottom_axis",
[
# Remember SLLWCS takes slices in array order
(np.s_[0, :], "custom:pos.helioprojective.lon"),
(np.s_[:, 0], "custom:pos.helioprojective.lat"),
],
)
@figure_test
def test_1d_plot_1d_sliced_low_level_wcs(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
Test that a SLLWCS through a coupled 2D WCS plots as line OK.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle[slices])
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
plt.draw()
assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"]
return fig
@pytest.mark.parametrize(
"slices, bottom_axis", [(("x", 0), "hpln"), ((0, "x"), "hplt")]
)
@figure_test
def test_1d_plot_put_varying_axis_on_bottom_lon(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
When we plot a 1D slice through spatial axes, we want to put the axis which
actually changes on the bottom.
For example an aligned wcs, pixel grid where you plot a lon slice through a
lat axis, you would end up with no ticks on the bottom as the lon doesn't
change, and a set of lat ticks on the top because it does but it's the
correlated axis not the actual one you are plotting against.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle, slices=slices)
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
plt.draw()
assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"]
return fig
@figure_test
def test_allsky_labels_wrap():
# Regression test for a bug that caused some tick labels to not be shown
# when looking at all-sky maps in the case where coord_wrap < 360
fig = plt.figure(figsize=(4, 4))
icen = 0
for ctype in [("GLON-CAR", "GLAT-CAR"), ("HGLN-CAR", "HGLT-CAR")]:
for cen in [0, 90, 180, 270]:
icen += 1
wcs = WCS(naxis=2)
wcs.wcs.ctype = ctype
wcs.wcs.crval = cen, 0
wcs.wcs.crpix = 360.5, 180.5
wcs.wcs.cdelt = -0.5, 0.5
ax = fig.add_subplot(8, 1, icen, projection=wcs)
ax.set_xlim(-0.5, 719.5)
ax.coords[0].set_ticks(spacing=50 * u.deg)
ax.coords[0].set_ticks_position("b")
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
ax.coords[1].set_ticklabel_visible(False)
ax.coords[1].set_ticks_visible(False)
fig.subplots_adjust(hspace=2, left=0.05, right=0.95, bottom=0.1, top=0.95)
return fig
@figure_test
def test_tickable_gridlines():
wcs = WCS(
{
"naxis": 2,
"naxis1": 360,
"naxis2": 180,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": -1,
"cdelt2": 1,
"ctype1": "RA---CAR",
"ctype2": "DEC--CAR",
}
)
fig = Figure()
ax = fig.add_subplot(projection=wcs)
ax.set_xlim(-0.5, 360 - 0.5)
ax.set_ylim(-0.5, 150 - 0.5)
lon, lat = ax.coords
lon.grid()
lat.grid()
overlay = ax.get_coords_overlay("galactic")
overlay[0].set_ticks(spacing=30 * u.deg)
overlay[1].set_ticks(spacing=30 * u.deg)
# Test both single-character and multi-character names
overlay[1].add_tickable_gridline("g", -30 * u.deg)
overlay[0].add_tickable_gridline("const-glon", 30 * u.deg)
overlay[0].grid(color="magenta")
overlay[0].set_ticklabel_position("gt")
overlay[0].set_ticklabel(color="magenta")
overlay[0].set_axislabel("Galactic longitude", color="magenta")
overlay[1].grid(color="blue")
overlay[1].set_ticklabel_position(("const-glon", "r"))
overlay[1].set_ticklabel(color="blue")
overlay[1].set_axislabel("Galactic latitude", color="blue")
return fig
|
0c540934c1376767a093946ebef969e9588a488c9dcccdd2adaea27e476c972f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import textwrap
from collections import OrderedDict
from itertools import chain, permutations
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.nddata import NDDataArray
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy.utils import NumpyRNGContext
from astropy.utils.masked import Masked
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, HighLevelWCSWrapper, SlicedLowLevelWCS
from .test_nduncertainty import FakeUncertainty
class FakeNumpyArray:
"""
Class that has a few of the attributes of a numpy array.
These attributes are checked for by NDData.
"""
def __init__(self):
super().__init__()
def shape(self):
pass
def __getitem__(self, key):
pass
def __array__(self):
pass
@property
def dtype(self):
return "fake"
class MinimalUncertainty:
"""
Define the minimum attributes acceptable as an uncertainty object.
"""
def __init__(self, value):
self._uncertainty = value
@property
def uncertainty_type(self):
return "totally and completely fake"
class BadNDDataSubclass(NDData):
def __init__(
self,
data,
uncertainty=None,
mask=None,
wcs=None,
meta=None,
unit=None,
psf=None,
):
self._data = data
self._uncertainty = uncertainty
self._mask = mask
self._wcs = wcs
self._psf = psf
self._unit = unit
self._meta = meta
# Setter tests
def test_uncertainty_setter():
nd = NDData([1, 2, 3])
good_uncertainty = MinimalUncertainty(5)
nd.uncertainty = good_uncertainty
assert nd.uncertainty is good_uncertainty
# Check the fake uncertainty (minimal does not work since it has no
# parent_nddata attribute from NDUncertainty)
nd.uncertainty = FakeUncertainty(5)
assert nd.uncertainty.parent_nddata is nd
# Check that it works if the uncertainty was set during init
nd = NDData(nd)
assert isinstance(nd.uncertainty, FakeUncertainty)
nd.uncertainty = 10
assert not isinstance(nd.uncertainty, FakeUncertainty)
assert nd.uncertainty.array == 10
def test_mask_setter():
# Since it just changes the _mask attribute everything should work
nd = NDData([1, 2, 3])
nd.mask = True
assert nd.mask
nd.mask = False
assert not nd.mask
# Check that it replaces a mask from init
nd = NDData(nd, mask=True)
assert nd.mask
nd.mask = False
assert not nd.mask
# Init tests
def test_nddata_empty():
with pytest.raises(TypeError):
NDData() # empty initializer should fail
def test_nddata_init_data_nonarray():
inp = [1, 2, 3]
nd = NDData(inp)
assert (np.array(inp) == nd.data).all()
def test_nddata_init_data_ndarray():
# random floats
with NumpyRNGContext(123):
nd = NDData(np.random.random((10, 10)))
assert nd.data.shape == (10, 10)
assert nd.data.size == 100
assert nd.data.dtype == np.dtype(float)
# specific integers
nd = NDData(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.data.size == 6
assert nd.data.dtype == np.dtype(int)
# Tests to ensure that creating a new NDData object copies by *reference*.
a = np.ones((10, 10))
nd_ref = NDData(a)
a[0, 0] = 0
assert nd_ref.data[0, 0] == 0
# Except we choose copy=True
a = np.ones((10, 10))
nd_ref = NDData(a, copy=True)
a[0, 0] = 0
assert nd_ref.data[0, 0] != 0
def test_nddata_init_data_maskedarray():
with NumpyRNGContext(456):
NDData(np.random.random((10, 10)), mask=np.random.random((10, 10)) > 0.5)
# Another test (just copied here)
with NumpyRNGContext(12345):
a = np.random.randn(100)
marr = np.ma.masked_where(a > 0, a)
nd = NDData(marr)
# check that masks and data match
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# check that they are both by reference
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 123456789
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# or not if we choose copy=True
nd = NDData(marr, copy=True)
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 0
assert nd.mask[10] != marr.mask[10]
assert nd.data[11] != marr.data[11]
@pytest.mark.parametrize("data", [np.array([1, 2, 3]), 5])
def test_nddata_init_data_quantity(data):
# Test an array and a scalar because a scalar Quantity does not always
# behave the same way as an array.
quantity = data * u.adu
ndd = NDData(quantity)
assert ndd.unit == quantity.unit
assert_array_equal(ndd.data, np.array(quantity))
if ndd.data.size > 1:
# check that if it is an array it is not copied
quantity.value[1] = 100
assert ndd.data[1] == quantity.value[1]
# or is copied if we choose copy=True
ndd = NDData(quantity, copy=True)
quantity.value[1] = 5
assert ndd.data[1] != quantity.value[1]
# provide a quantity and override the unit
ndd_unit = NDData(data * u.erg, unit=u.J)
assert ndd_unit.unit == u.J
np.testing.assert_allclose((ndd_unit.data * ndd_unit.unit).to_value(u.erg), data)
def test_nddata_init_data_masked_quantity():
a = np.array([2, 3])
q = a * u.m
m = False
mq = Masked(q, mask=m)
nd = NDData(mq)
assert_array_equal(nd.data, a)
# This test failed before the change in nddata init because the masked
# arrays data (which in fact was a quantity was directly saved)
assert nd.unit == u.m
assert not isinstance(nd.data, u.Quantity)
np.testing.assert_array_equal(nd.mask, np.array(m))
def test_nddata_init_data_nddata():
nd1 = NDData(np.array([1]))
nd2 = NDData(nd1)
assert nd2.wcs == nd1.wcs
assert nd2.uncertainty == nd1.uncertainty
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
assert nd2.psf == nd1.psf
# Check that it is copied by reference
nd1 = NDData(np.ones((5, 5)))
nd2 = NDData(nd1)
assert nd1.data is nd2.data
# Check that it is really copied if copy=True
nd2 = NDData(nd1, copy=True)
nd1.data[2, 3] = 10
assert nd1.data[2, 3] != nd2.data[2, 3]
# Now let's see what happens if we have all explicitly set
nd1 = NDData(
np.array([1]),
mask=False,
uncertainty=StdDevUncertainty(10),
unit=u.s,
meta={"dest": "mordor"},
wcs=WCS(naxis=1),
psf=np.array([10]),
)
nd2 = NDData(nd1)
assert nd2.data is nd1.data
assert nd2.wcs is nd1.wcs
assert nd2.uncertainty.array == nd1.uncertainty.array
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
assert nd2.psf == nd1.psf
# now what happens if we overwrite them all too
nd3 = NDData(
nd1,
mask=True,
uncertainty=StdDevUncertainty(200),
unit=u.km,
meta={"observer": "ME"},
wcs=WCS(naxis=1),
psf=np.array([20]),
)
assert nd3.data is nd1.data
assert nd3.wcs is not nd1.wcs
assert nd3.uncertainty.array != nd1.uncertainty.array
assert nd3.mask != nd1.mask
assert nd3.unit != nd1.unit
assert nd3.meta != nd1.meta
assert nd3.psf != nd1.psf
def test_nddata_init_data_nddata_subclass():
uncert = StdDevUncertainty(3)
# There might be some incompatible subclasses of NDData around.
bnd = BadNDDataSubclass(False, True, 3, 2, "gollum", 100, 12)
# Before changing the NDData init this would not have raised an error but
# would have lead to a compromised nddata instance
with pytest.raises(TypeError):
NDData(bnd)
# but if it has no actual incompatible attributes it passes
bnd_good = BadNDDataSubclass(
np.array([1, 2]),
uncert,
3,
HighLevelWCSWrapper(WCS(naxis=1)),
{"enemy": "black knight"},
u.km,
)
nd = NDData(bnd_good)
assert nd.unit == bnd_good.unit
assert nd.meta == bnd_good.meta
assert nd.uncertainty == bnd_good.uncertainty
assert nd.mask == bnd_good.mask
assert nd.wcs is bnd_good.wcs
assert nd.data is bnd_good.data
def test_nddata_init_data_fail():
# First one is sliceable but has no shape, so should fail.
with pytest.raises(TypeError):
NDData({"a": "dict"})
# This has a shape but is not sliceable
class Shape:
def __init__(self):
self.shape = 5
def __repr__(self):
return "7"
with pytest.raises(TypeError):
NDData(Shape())
def test_nddata_init_data_fakes():
ndd1 = NDData(FakeNumpyArray())
# First make sure that NDData isn't converting its data to a numpy array.
assert isinstance(ndd1.data, FakeNumpyArray)
# Make a new NDData initialized from an NDData
ndd2 = NDData(ndd1)
# Check that the data wasn't converted to numpy
assert isinstance(ndd2.data, FakeNumpyArray)
# Specific parameters
def test_param_uncertainty():
u = StdDevUncertainty(array=np.ones((5, 5)))
d = NDData(np.ones((5, 5)), uncertainty=u)
# Test that the parent_nddata is set.
assert d.uncertainty.parent_nddata is d
# Test conflicting uncertainties (other NDData)
u2 = StdDevUncertainty(array=np.ones((5, 5)) * 2)
d2 = NDData(d, uncertainty=u2)
assert d2.uncertainty is u2
assert d2.uncertainty.parent_nddata is d2
def test_param_wcs():
# Since everything is allowed we only need to test something
nd = NDData([1], wcs=WCS(naxis=1))
assert nd.wcs is not None
# Test conflicting wcs (other NDData)
nd2 = NDData(nd, wcs=WCS(naxis=1))
assert nd2.wcs is not None and nd2.wcs is not nd.wcs
def test_param_meta():
# everything dict-like is allowed
with pytest.raises(TypeError):
NDData([1], meta=3)
nd = NDData([1, 2, 3], meta={})
assert len(nd.meta) == 0
nd = NDData([1, 2, 3])
assert isinstance(nd.meta, OrderedDict)
assert len(nd.meta) == 0
# Test conflicting meta (other NDData)
nd2 = NDData(nd, meta={"image": "sun"})
assert len(nd2.meta) == 1
nd3 = NDData(nd2, meta={"image": "moon"})
assert len(nd3.meta) == 1
assert nd3.meta["image"] == "moon"
def test_param_mask():
# Since everything is allowed we only need to test something
nd = NDData([1], mask=False)
assert not nd.mask
# Test conflicting mask (other NDData)
nd2 = NDData(nd, mask=True)
assert nd2.mask
# (masked array)
nd3 = NDData(np.ma.array([1], mask=False), mask=True)
assert nd3.mask
# (masked quantity)
mq = np.ma.array(np.array([2, 3]) * u.m, mask=False)
nd4 = NDData(mq, mask=True)
assert nd4.mask
def test_param_unit():
with pytest.raises(ValueError):
NDData(np.ones((5, 5)), unit="NotAValidUnit")
NDData([1, 2, 3], unit="meter")
# Test conflicting units (quantity as data)
q = np.array([1, 2, 3]) * u.m
nd = NDData(q, unit="cm")
assert nd.unit != q.unit
assert nd.unit == u.cm
# (masked quantity)
mq = np.ma.array(np.array([2, 3]) * u.m, mask=False)
nd2 = NDData(mq, unit=u.pc)
assert nd2.unit == u.pc
# (another NDData as data)
nd3 = NDData(nd, unit="km")
assert nd3.unit == u.km
# (MaskedQuantity given to NDData)
mq_astropy = Masked.from_unmasked(q, False)
nd4 = NDData(mq_astropy, unit="km")
assert nd4.unit == u.km
def test_pickle_nddata_with_uncertainty():
ndd = NDData(
np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m
)
ndd_dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(ndd_dumped)
assert type(ndd_restored.uncertainty) is StdDevUncertainty
assert ndd_restored.uncertainty.parent_nddata is ndd_restored
assert ndd_restored.uncertainty.unit == u.m
def test_pickle_uncertainty_only():
ndd = NDData(
np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m
)
uncertainty_dumped = pickle.dumps(ndd.uncertainty)
uncertainty_restored = pickle.loads(uncertainty_dumped)
np.testing.assert_array_equal(ndd.uncertainty.array, uncertainty_restored.array)
assert ndd.uncertainty.unit == uncertainty_restored.unit
# Even though it has a parent there is no one that references the parent
# after unpickling so the weakref "dies" immediately after unpickling
# finishes.
assert uncertainty_restored.parent_nddata is None
def test_pickle_nddata_without_uncertainty():
ndd = NDData(np.ones(3), unit=u.m)
dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(dumped)
np.testing.assert_array_equal(ndd.data, ndd_restored.data)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaNDData(MetaBaseTest):
test_class = NDData
args = np.array([[1.0]])
# Representation tests
def test_nddata_str():
arr1d = NDData(np.array([1, 2, 3]))
assert str(arr1d) == "[1 2 3]"
arr2d = NDData(np.array([[1, 2], [3, 4]]))
assert str(arr2d) == textwrap.dedent(
"""
[[1 2]
[3 4]]"""[
1:
]
)
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert str(arr3d) == textwrap.dedent(
"""
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]"""[
1:
]
)
# let's add units!
arr = NDData(np.array([1, 2, 3]), unit="km")
assert str(arr) == "[1 2 3] km"
# what if it had these units?
arr = NDData(np.array([1, 2, 3]), unit="erg cm^-2 s^-1 A^-1")
assert str(arr) == "[1 2 3] erg / (A s cm2)"
def test_nddata_repr():
# The big test is eval(repr()) should be equal to the original!
arr1d = NDData(np.array([1, 2, 3]))
s = repr(arr1d)
assert s == "NDData([1, 2, 3])"
got = eval(s)
assert np.all(got.data == arr1d.data)
assert got.unit == arr1d.unit
arr2d = NDData(np.array([[1, 2], [3, 4]]))
s = repr(arr2d)
assert s == textwrap.dedent(
"""
NDData([[1, 2],
[3, 4]])"""[
1:
]
)
got = eval(s)
assert np.all(got.data == arr2d.data)
assert got.unit == arr2d.unit
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
s = repr(arr3d)
assert s == textwrap.dedent(
"""
NDData([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])"""[
1:
]
)
got = eval(s)
assert np.all(got.data == arr3d.data)
assert got.unit == arr3d.unit
# let's add units!
arr = NDData(np.array([1, 2, 3]), unit="km")
s = repr(arr)
assert s == "NDData([1, 2, 3], unit='km')"
got = eval(s)
assert np.all(got.data == arr.data)
assert got.unit == arr.unit
# Not supported features
def test_slicing_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd[0]
def test_arithmetic_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd + ndd
def test_nddata_wcs_setter_error_cases():
ndd = NDData(np.ones((5, 5)))
# Setting with a non-WCS should raise an error
with pytest.raises(TypeError):
ndd.wcs = "I am not a WCS"
naxis = 2
# This should succeed since the WCS is currently None
ndd.wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
with pytest.raises(ValueError):
# This should fail since the WCS is not None
ndd.wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
def test_nddata_wcs_setter_with_low_level_wcs():
ndd = NDData(np.ones((5, 5)))
wcs = WCS()
# If the wcs property is set with a low level WCS it should get
# wrapped to high level.
low_level = SlicedLowLevelWCS(wcs, 5)
assert not isinstance(low_level, BaseHighLevelWCS)
ndd.wcs = low_level
assert isinstance(ndd.wcs, BaseHighLevelWCS)
def test_nddata_init_with_low_level_wcs():
wcs = WCS()
low_level = SlicedLowLevelWCS(wcs, 5)
ndd = NDData(np.ones((5, 5)), wcs=low_level)
assert isinstance(ndd.wcs, BaseHighLevelWCS)
class NDDataCustomWCS(NDData):
@property
def wcs(self):
return WCS()
def test_overriden_wcs():
# Check that a sub-class that overrides `.wcs` without providing a setter
# works
NDDataCustomWCS(np.ones((5, 5)))
# set up parameters for test_collapse:
np.random.seed(42)
collapse_units = [None, u.Jy]
collapse_propagate = [True, False]
collapse_data_shapes = [
# 3D example:
(4, 3, 2),
# 5D example
(6, 5, 4, 3, 2),
]
collapse_ignore_masked = [True, False]
collapse_masks = list(
chain.from_iterable(
[
# try the operations without a mask (all False):
np.zeros(collapse_data_shape).astype(bool)
]
+ [
# assemble a bunch of random masks:
np.random.randint(0, 2, size=collapse_data_shape).astype(bool)
for _ in range(10)
]
for collapse_data_shape in collapse_data_shapes
)
)
# the following provides pytest.mark.parametrize with every
# permutation of (1) the units, (2) propagating/not propagating
# uncertainties, and (3) the data shapes of different ndim.
permute = (
len(collapse_masks)
* len(collapse_propagate)
* len(collapse_units)
* len(collapse_ignore_masked)
)
collapse_units = permute // len(collapse_units) * collapse_units
collapse_propagate = permute // len(collapse_propagate) * collapse_propagate
collapse_masks = permute // len(collapse_masks) * collapse_masks
collapse_ignore_masked = permute // len(collapse_ignore_masked) * collapse_ignore_masked
@pytest.mark.parametrize(
"mask, unit, propagate_uncertainties, operation_ignores_mask",
zip(collapse_masks, collapse_units, collapse_propagate, collapse_ignore_masked),
)
def test_collapse(mask, unit, propagate_uncertainties, operation_ignores_mask):
# unique set of combinations of each of the N-1 axes for an N-D cube:
axes_permutations = {tuple(axes[:2]) for axes in permutations(range(mask.ndim))}
# each of the single axis slices:
axes_permutations.update(set(range(mask.ndim)))
axes_permutations.update({None})
cube = np.arange(np.prod(mask.shape)).reshape(mask.shape)
numpy_cube = np.ma.masked_array(cube, mask=mask)
ma_cube = Masked(cube, mask=mask)
ndarr = NDDataArray(cube, uncertainty=StdDevUncertainty(cube), unit=unit, mask=mask)
# By construction, the minimum value along each axis is always the zeroth index and
# the maximum is always the last along that axis. We verify that here, so we can
# test that the correct uncertainties are extracted during the
# `NDDataArray.min` and `NDDataArray.max` methods later:
for axis in range(cube.ndim):
assert np.all(np.equal(cube.argmin(axis=axis), 0))
assert np.all(np.equal(cube.argmax(axis=axis), cube.shape[axis] - 1))
# confirm that supported nddata methods agree with corresponding numpy methods
# for the masked data array:
sum_methods = ["sum", "mean"]
ext_methods = ["min", "max"]
all_methods = sum_methods + ext_methods
# for all supported methods, ensure the masking is propagated:
for method in all_methods:
for axes in axes_permutations:
astropy_method = getattr(ma_cube, method)(axis=axes)
numpy_method = getattr(numpy_cube, method)(axis=axes)
nddata_method = getattr(ndarr, method)(
axis=axes,
propagate_uncertainties=propagate_uncertainties,
operation_ignores_mask=operation_ignores_mask,
)
astropy_unmasked = astropy_method.base[~astropy_method.mask]
nddata_unmasked = nddata_method.data[~nddata_method.mask]
# check if the units are passed through correctly:
assert unit == nddata_method.unit
# check if the numpy and astropy.utils.masked results agree when
# the result is not fully masked:
if len(astropy_unmasked) > 0:
if not operation_ignores_mask:
# compare with astropy
assert np.all(np.equal(astropy_unmasked, nddata_unmasked))
assert np.all(np.equal(astropy_method.mask, nddata_method.mask))
else:
# compare with numpy
assert np.ma.all(
np.ma.equal(numpy_method, np.asanyarray(nddata_method))
)
# For extremum methods, ensure the uncertainty returned corresponds to the
# min/max data value. We've created the uncertainties to have the same value
# as the data array, so we can just check for equality:
if method in ext_methods and propagate_uncertainties:
assert np.ma.all(np.ma.equal(astropy_method, nddata_method))
|
a9712d6d787f9b5bd5954a481f866554230cf11c419e8c212dc0233842254cd3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.nddata import NDData, NDSlicingMixin
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nduncertainty import (
NDUncertainty,
StdDevUncertainty,
UnknownUncertainty,
)
# Just add the Mixin to NDData
# TODO: Make this use NDDataRef instead!
class NDDataSliceable(NDSlicingMixin, NDData):
pass
# Just some uncertainty (following the StdDevUncertainty implementation of
# storing the uncertainty in a property 'array') with slicing.
class SomeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return "fake"
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
def test_slicing_only_data():
data = np.arange(10)
nd = NDDataSliceable(data)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
def test_slicing_data_scalar_fail():
data = np.array(10)
nd = NDDataSliceable(data)
with pytest.raises(TypeError): # as exc
nd[:]
# assert exc.value.args[0] == 'Scalars cannot be sliced.'
def test_slicing_1ddata_ndslice():
data = np.array([10, 20])
nd = NDDataSliceable(data)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
@pytest.mark.parametrize("prop_name", ["mask", "uncertainty"])
def test_slicing_1dmask_ndslice(prop_name):
# Data is 2d but mask/uncertainty only 1d so this should let the IndexError when
# slicing the mask rise to the user.
data = np.ones((3, 3))
kwarg = {prop_name: np.ones(3)}
nd = NDDataSliceable(data, **kwarg)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
def test_slicing_all_npndarray_1d():
data = np.arange(10)
mask = data > 3
uncertainty = StdDevUncertainty(np.linspace(10, 20, 10))
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
# Just to have them too
unit = u.s
meta = {"observer": "Brian"}
nd = NDDataSliceable(
data, mask=mask, uncertainty=uncertainty, wcs=wcs, unit=unit, meta=meta
)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5].array, nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
assert unit is nd2.unit
assert meta == nd.meta
def test_slicing_all_npndarray_nd():
# See what happens for multidimensional properties
data = np.arange(1000).reshape(10, 10, 10)
mask = data > 3
uncertainty = np.linspace(10, 20, 1000).reshape(10, 10, 10)
naxis = 3
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
# Slice only 1D
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
# Slice 3D
nd2 = nd[2:5, :, 4:7]
assert_array_equal(data[2:5, :, 4:7], nd2.data)
assert_array_equal(mask[2:5, :, 4:7], nd2.mask)
assert_array_equal(uncertainty[2:5, :, 4:7], nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1, 5, 1) == nd.wcs.pixel_to_world(5, 5, 3)
def test_slicing_all_npndarray_shape_diff():
data = np.arange(10)
mask = (data > 3)[0:9]
uncertainty = np.linspace(10, 20, 15)
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
# All are sliced even if the shapes differ (no Info)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
def test_slicing_all_something_wrong():
data = np.arange(10)
mask = [False] * 10
uncertainty = UnknownUncertainty({"rdnoise": 2.9, "gain": 1.4})
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
# Sliced properties:
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
# Not sliced attributes (they will raise a Info nevertheless)
assert uncertainty.array == nd2.uncertainty.array
assert uncertainty.uncertainty_type == nd2.uncertainty.uncertainty_type
assert uncertainty.unit == nd2.uncertainty.unit
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
def test_boolean_slicing():
data = np.arange(10)
mask = data.copy()
uncertainty = StdDevUncertainty(data.copy())
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
with pytest.raises(ValueError):
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
nd.wcs = None
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
assert_array_equal(data[3:8], nd2.data)
assert_array_equal(mask[3:8], nd2.mask)
|
6ca5a40ef32899aa4580dfa3dee8bb408c12a274cf75ae40c6a13a1448e07686 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import numpy as np
__all__ = ["deserialize_class", "wcs_info_str"]
def deserialize_class(tpl, construct=True):
"""
Deserialize classes recursively.
"""
if not isinstance(tpl, tuple) or len(tpl) != 3:
raise ValueError("Expected a tuple of three values")
module, klass = tpl[0].rsplit(".", 1)
module = importlib.import_module(module)
klass = getattr(module, klass)
args = tuple(
deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1]
)
kwargs = dict(
(key, deserialize_class(val)) if isinstance(val, tuple) else (key, val)
for (key, val) in tpl[2].items()
)
if construct:
return klass(*args, **kwargs)
else:
return klass, args, kwargs
def wcs_info_str(wcs):
# Overall header
s = f"{wcs.__class__.__name__} Transformation\n\n"
s += "This transformation has {} pixel and {} world dimensions\n\n".format(
wcs.pixel_n_dim, wcs.world_n_dim
)
s += f"Array shape (Numpy order): {wcs.array_shape}\n\n"
# Pixel dimensions table
array_shape = wcs.array_shape or (0,)
pixel_shape = wcs.pixel_shape or (None,) * wcs.pixel_n_dim
# Find largest between header size and value length
pixel_dim_width = max(9, len(str(wcs.pixel_n_dim)))
pixel_nam_width = max(9, *map(len, wcs.pixel_axis_names))
pixel_siz_width = max(9, len(str(max(array_shape))))
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +
'Bounds\n')
# fmt: on
for ipix in range(wcs.pixel_n_dim):
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 'g}').format(ipix) + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format(wcs.pixel_axis_names[ipix] or 'None') + ' ' +
(" " * 5 + str(None) if pixel_shape[ipix] is None else
('{0:' + str(pixel_siz_width) + 'g}').format(pixel_shape[ipix])) + ' ' +
'{:s}'.format(str(None if wcs.pixel_bounds is None else wcs.pixel_bounds[ipix]) + '\n'))
# fmt: on
s += "\n"
# World dimensions table
# Find largest between header size and value length
world_dim_width = max(9, len(str(wcs.world_n_dim)))
world_nam_width = max(9, *(len(x) for x in wcs.world_axis_names if x is not None))
world_typ_width = max(
[13] + [len(x) for x in wcs.world_axis_physical_types if x is not None]
)
# fmt: off
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +
('{0:' + str(world_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +
'Units\n')
# fmt: on
for iwrl in range(wcs.world_n_dim):
name = wcs.world_axis_names[iwrl] or "None"
typ = wcs.world_axis_physical_types[iwrl] or "None"
unit = wcs.world_axis_units[iwrl] or "unknown"
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_nam_width) + 's}').format(name) + ' ' +
('{0:' + str(world_typ_width) + 's}').format(typ) + ' ' +
'{:s}'.format(unit + '\n'))
# fmt: on
s += "\n"
# Axis correlation matrix
pixel_dim_width = max(3, len(str(wcs.world_n_dim)))
s += "Correlation between pixel and world axes:\n\n"
# fmt: off
s += (' ' * world_dim_width + ' ' +
('{0:^' + str(wcs.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +
'\n')
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +
''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
matrix = wcs.axis_correlation_matrix
matrix_str = np.empty(matrix.shape, dtype="U3")
matrix_str[matrix] = "yes"
matrix_str[~matrix] = "no"
for iwrl in range(wcs.world_n_dim):
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +
''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
# Make sure we get rid of the extra whitespace at the end of some lines
return "\n".join([l.rstrip() for l in s.splitlines()])
|
e452463d901ce44cb1367a581e2829cd87d1a40b89c3287a9a65a116ef2ee725 | import abc
from collections import OrderedDict, defaultdict
import numpy as np
from .utils import deserialize_class
__all__ = ["BaseHighLevelWCS", "HighLevelWCSMixin"]
def rec_getattr(obj, att):
for a in att.split("."):
obj = getattr(obj, a)
return obj
def default_order(components):
order = []
for key, _, _ in components:
if key not in order:
order.append(key)
return order
def _toindex(value):
"""Convert value to an int or an int array.
Input coordinates converted to integers
corresponding to the center of the pixel.
The convention is that the center of the pixel is
(0, 0), while the lower left corner is (-0.5, -0.5).
The outputs are used to index the mask.
Examples
--------
>>> _toindex(np.array([-0.5, 0.49999]))
array([0, 0])
>>> _toindex(np.array([0.5, 1.49999]))
array([1, 1])
>>> _toindex(np.array([1.5, 2.49999]))
array([2, 2])
"""
indx = np.asarray(np.floor(np.asarray(value) + 0.5), dtype=int)
return indx
class BaseHighLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the high-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def low_level_wcs(self):
"""
Returns a reference to the underlying low-level WCS object.
"""
@abc.abstractmethod
def pixel_to_world(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates (represented by
high-level objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` for pixel
indexing and ordering conventions.
"""
def array_index_to_world(self, *index_arrays):
"""
Convert array indices to world coordinates (represented by Astropy
objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_index_to_world_values` for
pixel indexing and ordering conventions.
"""
return self.pixel_to_world(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to pixel
coordinates.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` for pixel
indexing and ordering conventions.
"""
def world_to_array_index(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to array
indices.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_array_index_values` for
pixel indexing and ordering conventions. The indices should be returned
as rounded integers.
"""
if self.low_level_wcs.pixel_n_dim == 1:
return _toindex(self.world_to_pixel(*world_objects))
else:
return tuple(_toindex(self.world_to_pixel(*world_objects)[::-1]).tolist())
def high_level_objects_to_values(*world_objects, low_level_wcs):
"""
Convert the input high level object to low level values.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert the high level objects
(such as `~.SkyCoord`) to low level "values" `~.Quantity` objects.
This is used in `.HighLevelWCSMixin.world_to_pixel`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_objects: object
High level coordinate objects.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
serialized_classes = low_level_wcs.world_axis_object_classes
components = low_level_wcs.world_axis_object_components
# Deserialize world_axis_object_classes using the default order
classes = OrderedDict()
for key in default_order(components):
if low_level_wcs.serialized_classes:
classes[key] = deserialize_class(serialized_classes[key], construct=False)
else:
classes[key] = serialized_classes[key]
# Check that the number of classes matches the number of inputs
if len(world_objects) != len(classes):
raise ValueError(
f"Number of world inputs ({len(world_objects)}) does not match expected"
f" ({len(classes)})"
)
# Determine whether the classes are uniquely matched, that is we check
# whether there is only one of each class.
world_by_key = {}
unique_match = True
for w in world_objects:
matches = []
for key, (klass, *_) in classes.items():
if isinstance(w, klass):
matches.append(key)
if len(matches) == 1:
world_by_key[matches[0]] = w
else:
unique_match = False
break
# If the match is not unique, the order of the classes needs to match,
# whereas if all classes are unique, we can still intelligently match
# them even if the order is wrong.
objects = {}
if unique_match:
for key, (klass, args, kwargs, *rest) in classes.items():
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(world_by_key[key], SkyCoord):
if "frame" in kwargs:
objects[key] = world_by_key[key].transform_to(kwargs["frame"])
else:
objects[key] = world_by_key[key]
else:
objects[key] = klass_gen(world_by_key[key], *args, **kwargs)
else:
for ikey, key in enumerate(classes):
klass, args, kwargs, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
w = world_objects[ikey]
if not isinstance(w, klass):
raise ValueError(
"Expected the following order of world arguments:"
f" {', '.join([k.__name__ for (k, *_) in classes.values()])}"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(w, SkyCoord):
if "frame" in kwargs:
objects[key] = w.transform_to(kwargs["frame"])
else:
objects[key] = w
else:
objects[key] = klass_gen(w, *args, **kwargs)
# We now extract the attributes needed for the world values
world = []
for key, _, attr in components:
if callable(attr):
world.append(attr(objects[key]))
else:
world.append(rec_getattr(objects[key], attr))
return world
def values_to_high_level_objects(*world_values, low_level_wcs):
"""
Convert low level values into high level objects.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert low level "values"
`~.Quantity` objects, to high level objects (such as `~.SkyCoord).
This is used in `.HighLevelWCSMixin.pixel_to_world`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_values: object
Low level, "values" representations of the world coordinates.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
components = low_level_wcs.world_axis_object_components
classes = low_level_wcs.world_axis_object_classes
# Deserialize classes
if low_level_wcs.serialized_classes:
classes_new = {}
for key, value in classes.items():
classes_new[key] = deserialize_class(value, construct=False)
classes = classes_new
args = defaultdict(list)
kwargs = defaultdict(dict)
for i, (key, attr, _) in enumerate(components):
if isinstance(attr, str):
kwargs[key][attr] = world_values[i]
else:
while attr > len(args[key]) - 1:
args[key].append(None)
args[key][attr] = world_values[i]
result = []
for key in default_order(components):
klass, ar, kw, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
result.append(klass_gen(*args[key], *ar, **kwargs[key], **kw))
return result
class HighLevelWCSMixin(BaseHighLevelWCS):
"""
Mix-in class that automatically provides the high-level WCS API for the
low-level WCS object given by the `~HighLevelWCSMixin.low_level_wcs`
property.
"""
@property
def low_level_wcs(self):
return self
def world_to_pixel(self, *world_objects):
world_values = high_level_objects_to_values(
*world_objects, low_level_wcs=self.low_level_wcs
)
# Finally we convert to pixel coordinates
pixel_values = self.low_level_wcs.world_to_pixel_values(*world_values)
return pixel_values
def pixel_to_world(self, *pixel_arrays):
# Compute the world coordinate values
world_values = self.low_level_wcs.pixel_to_world_values(*pixel_arrays)
if self.low_level_wcs.world_n_dim == 1:
world_values = (world_values,)
pixel_values = values_to_high_level_objects(
*world_values, low_level_wcs=self.low_level_wcs
)
if len(pixel_values) == 1:
return pixel_values[0]
else:
return pixel_values
|
ccc9051bee04e064aa2dea2c316860621538713e87d64d57eb20b9a6816050a4 | # This file includes the definition of a mix-in class that provides the low-
# and high-level WCS API to the astropy.wcs.WCS object. We keep this code
# isolated in this mix-in class to avoid making the main wcs.py file too
# long.
import warnings
import numpy as np
from astropy import units as u
from astropy.constants import c
from astropy.coordinates import ICRS, Galactic, SpectralCoord
from astropy.coordinates.spectral_coordinate import (
attach_zero_velocities,
update_differentials_to_match,
)
from astropy.utils.exceptions import AstropyUserWarning
from .high_level_api import HighLevelWCSMixin
from .low_level_api import BaseLowLevelWCS
from .wrappers import SlicedLowLevelWCS
__all__ = ["custom_ctype_to_ucd_mapping", "SlicedFITSWCS", "FITSWCSAPIMixin"]
C_SI = c.si.value
VELOCITY_FRAMES = {
"GEOCENT": "gcrs",
"BARYCENT": "icrs",
"HELIOCENT": "hcrs",
"LSRK": "lsrk",
"LSRD": "lsrd",
}
# The spectra velocity frames below are needed for FITS spectral WCS
# (see Greisen 06 table 12) but aren't yet defined as real
# astropy.coordinates frames, so we instead define them here as instances
# of existing coordinate frames with offset velocities. In future we should
# make these real frames so that users can more easily recognize these
# velocity frames when used in SpectralCoord.
# This frame is defined as a velocity of 220 km/s in the
# direction of l=90, b=0. The rotation velocity is defined
# in:
#
# Kerr and Lynden-Bell 1986, Review of galactic constants.
#
# NOTE: this may differ from the assumptions of galcen_v_sun
# in the Galactocentric frame - the value used here is
# the one adopted by the WCS standard for spectral
# transformations.
VELOCITY_FRAMES["GALACTOC"] = Galactic(
u=0 * u.km,
v=0 * u.km,
w=0 * u.km,
U=0 * u.km / u.s,
V=-220 * u.km / u.s,
W=0 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
# This frame is defined as a velocity of 300 km/s in the
# direction of l=90, b=0. This is defined in:
#
# Transactions of the IAU Vol. XVI B Proceedings of the
# 16th General Assembly, Reports of Meetings of Commissions:
# Comptes Rendus Des Séances Des Commissions, Commission 28,
# p201.
#
# Note that these values differ from those used by CASA
# (308 km/s towards l=105, b=-7) but we use the above values
# since these are the ones defined in Greisen et al (2006).
VELOCITY_FRAMES["LOCALGRP"] = Galactic(
u=0 * u.km,
v=0 * u.km,
w=0 * u.km,
U=0 * u.km / u.s,
V=-300 * u.km / u.s,
W=0 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
# This frame is defined as a velocity of 368 km/s in the
# direction of l=263.85, b=48.25. This is defined in:
#
# Bennett et al. (2003), First-Year Wilkinson Microwave
# Anisotropy Probe (WMAP) Observations: Preliminary Maps
# and Basic Results
#
# Note that in that paper, the dipole is expressed as a
# temperature (T=3.346 +/- 0.017mK)
VELOCITY_FRAMES["CMBDIPOL"] = Galactic(
l=263.85 * u.deg,
b=48.25 * u.deg,
distance=0 * u.km,
radial_velocity=-(3.346e-3 / 2.725 * c).to(u.km / u.s),
)
# Mapping from CTYPE axis name to UCD1
CTYPE_TO_UCD1 = {
# Celestial coordinates
"RA": "pos.eq.ra",
"DEC": "pos.eq.dec",
"GLON": "pos.galactic.lon",
"GLAT": "pos.galactic.lat",
"ELON": "pos.ecliptic.lon",
"ELAT": "pos.ecliptic.lat",
"TLON": "pos.bodyrc.lon",
"TLAT": "pos.bodyrc.lat",
"HPLT": "custom:pos.helioprojective.lat",
"HPLN": "custom:pos.helioprojective.lon",
"HPRZ": "custom:pos.helioprojective.z",
"HGLN": "custom:pos.heliographic.stonyhurst.lon",
"HGLT": "custom:pos.heliographic.stonyhurst.lat",
"CRLN": "custom:pos.heliographic.carrington.lon",
"CRLT": "custom:pos.heliographic.carrington.lat",
"SOLX": "custom:pos.heliocentric.x",
"SOLY": "custom:pos.heliocentric.y",
"SOLZ": "custom:pos.heliocentric.z",
# Spectral coordinates (WCS paper 3)
"FREQ": "em.freq", # Frequency
"ENER": "em.energy", # Energy
"WAVN": "em.wavenumber", # Wavenumber
"WAVE": "em.wl", # Vacuum wavelength
"VRAD": "spect.dopplerVeloc.radio", # Radio velocity
"VOPT": "spect.dopplerVeloc.opt", # Optical velocity
"ZOPT": "src.redshift", # Redshift
"AWAV": "em.wl", # Air wavelength
"VELO": "spect.dopplerVeloc", # Apparent radial velocity
"BETA": "custom:spect.doplerVeloc.beta", # Beta factor (v/c)
"STOKES": "phys.polarization.stokes", # STOKES parameters
# Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
"TIME": "time",
"TAI": "time",
"TT": "time",
"TDT": "time",
"ET": "time",
"IAT": "time",
"UT1": "time",
"UTC": "time",
"GMT": "time",
"GPS": "time",
"TCG": "time",
"TCB": "time",
"TDB": "time",
"LOCAL": "time",
# Distance coordinates
"DIST": "pos.distance",
"DSUN": "custom:pos.distance.sunToObserver"
# UT() and TT() are handled separately in world_axis_physical_types
}
# Keep a list of additional custom mappings that have been registered. This
# is kept as a list in case nested context managers are used
CTYPE_TO_UCD1_CUSTOM = []
class custom_ctype_to_ucd_mapping:
"""
A context manager that makes it possible to temporarily add new CTYPE to
UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.
Parameters
----------
mapping : dict
A dictionary mapping a CTYPE value to a UCD1+ value
Examples
--------
Consider a WCS with the following CTYPE::
>>> from astropy.wcs import WCS
>>> wcs = WCS(naxis=1)
>>> wcs.wcs.ctype = ['SPAM']
By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,
but this can be overridden::
>>> wcs.world_axis_physical_types
[None]
>>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
... wcs.world_axis_physical_types
['food.spam']
"""
def __init__(self, mapping):
CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)
self.mapping = mapping
def __enter__(self):
pass
def __exit__(self, type, value, tb):
CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)
class SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):
pass
class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API.
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@array_shape.setter
def array_shape(self, value):
if value is None:
self.pixel_shape = None
else:
self.pixel_shape = value[::-1]
@property
def pixel_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError(
f"The number of data axes, {self.naxis}, does not equal the shape"
f" {len(value)}."
)
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError(
"The number of data axes, "
f"{self.naxis}, does not equal the number of "
f"pixel bounds {len(value)}."
)
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
# TODO: need to support e.g. TT(TAI)
for ctype in self.wcs.ctype:
if ctype.upper().startswith(("UT(", "TT(")):
types.append("time")
else:
ctype_name = ctype.split("-")[0]
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if ctype_name in custom_mapping:
types.append(custom_mapping[ctype_name])
break
else:
types.append(CTYPE_TO_UCD1.get(ctype_name.upper(), None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ""
elif isinstance(unit, u.Unit):
unit = unit.to_string(format="vounit")
else:
try:
unit = u.Unit(unit).to_string(format="vounit")
except u.UnitsError:
unit = ""
units.append(unit)
return units
@property
def world_axis_names(self):
return list(self.wcs.cname)
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def pixel_to_world_values(self, *pixel_arrays):
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def world_to_pixel_values(self, *world_arrays):
# avoid circular import
from astropy.wcs.wcs import NoConvergence
try:
pixel = self.all_world2pix(*world_arrays, 0)
except NoConvergence as e:
warnings.warn(str(e))
# use best_solution contained in the exception and format the same
# way as all_world2pix does (using _array_converter)
pixel = self._array_converter(
lambda *args: e.best_solution, "input", *world_arrays, 0
)
return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (
self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.specsys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat,
)
# If the cache is present, we need to check that the 'hash' matches.
if getattr(self, "_components_and_classes_cache", None) is not None:
cache = self._components_and_classes_cache
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.coordinates import EarthLocation, SkyCoord, StokesCoord
from astropy.time import Time, TimeDelta
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.wcs.utils import wcs_to_celestial_frame
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
try:
celestial_frame = wcs_to_celestial_frame(self)
except ValueError:
# Some WCSes, e.g. solar, can be recognized by WCSLIB as being
# celestial but we don't necessarily have frames for them.
celestial_frame = None
else:
kwargs = {}
kwargs["frame"] = celestial_frame
# Very occasionally (i.e. with TAB) wcs does not convert the units to degrees
kwargs["unit"] = (
u.Unit(self.wcs.cunit[self.wcs.lng]),
u.Unit(self.wcs.cunit[self.wcs.lat]),
)
classes["celestial"] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ("celestial", 0, "spherical.lon.degree")
components[self.wcs.lat] = ("celestial", 1, "spherical.lat.degree")
# Next, we check for spectral components
if self.has_spectral:
# Find index of spectral coordinate
ispec = self.wcs.spec
ctype = self.wcs.ctype[ispec][:4]
ctype = ctype.upper()
kwargs = {}
# Determine observer location and velocity
# TODO: determine how WCS standard would deal with observer on a
# spacecraft far from earth. For now assume the obsgeo parameters,
# if present, give the geocentric observer location.
if np.isnan(self.wcs.obsgeo[0]):
observer = None
else:
earth_location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
# Get the time scale from TIMESYS or fall back to 'utc'
tscale = self.wcs.timesys.lower() or "utc"
if np.isnan(self.wcs.mjdavg):
obstime = Time(
self.wcs.mjdobs,
format="mjd",
scale=tscale,
location=earth_location,
)
else:
obstime = Time(
self.wcs.mjdavg,
format="mjd",
scale=tscale,
location=earth_location,
)
observer_location = SkyCoord(earth_location.get_itrs(obstime=obstime))
if self.wcs.specsys in VELOCITY_FRAMES:
frame = VELOCITY_FRAMES[self.wcs.specsys]
observer = observer_location.transform_to(frame)
if isinstance(frame, str):
observer = attach_zero_velocities(observer)
else:
observer = update_differentials_to_match(
observer_location,
VELOCITY_FRAMES[self.wcs.specsys],
preserve_observer_frame=True,
)
elif self.wcs.specsys == "TOPOCENT":
observer = attach_zero_velocities(observer_location)
else:
raise NotImplementedError(
f"SPECSYS={self.wcs.specsys} not yet supported"
)
# Determine target
# This is tricker. In principle the target for each pixel is the
# celestial coordinates of the pixel, but we then need to be very
# careful about SSYSOBS which is tricky. For now, we set the
# target using the reference celestial coordinate in the WCS (if
# any).
if self.has_celestial and celestial_frame is not None:
# NOTE: celestial_frame was defined higher up
# NOTE: we set the distance explicitly to avoid warnings in SpectralCoord
target = SkyCoord(
self.wcs.crval[self.wcs.lng] * self.wcs.cunit[self.wcs.lng],
self.wcs.crval[self.wcs.lat] * self.wcs.cunit[self.wcs.lat],
frame=celestial_frame,
distance=1000 * u.kpc,
)
target = attach_zero_velocities(target)
else:
target = None
# SpectralCoord does not work properly if either observer or target
# are not convertible to ICRS, so if this is the case, we (for now)
# drop the observer and target from the SpectralCoord and warn the
# user.
if observer is not None:
try:
observer.transform_to(ICRS())
except Exception:
warnings.warn(
"observer cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
observer = None
if target is not None:
try:
target.transform_to(ICRS())
except Exception:
warnings.warn(
"target cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
target = None
# NOTE: below we include Quantity in classes['spectral'] instead
# of SpectralCoord - this is because we want to also be able to
# accept plain quantities.
if ctype == "ZOPT":
def spectralcoord_from_redshift(redshift):
if isinstance(redshift, SpectralCoord):
return redshift
return SpectralCoord(
(redshift + 1) * self.wcs.restwav,
unit=u.m,
observer=observer,
target=target,
)
def redshift_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m) / self.wcs.restwav - 1.0
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m)
/ self.wcs.restwav
- 1.0
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_redshift)
components[self.wcs.spec] = ("spectral", 0, redshift_from_spectralcoord)
elif ctype == "BETA":
def spectralcoord_from_beta(beta):
if isinstance(beta, SpectralCoord):
return beta
return SpectralCoord(
beta * C_SI,
unit=u.m / u.s,
doppler_convention="relativistic",
doppler_rest=self.wcs.restwav * u.m,
observer=observer,
target=target,
)
def beta_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
doppler_equiv = u.doppler_relativistic(self.wcs.restwav * u.m)
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m / u.s, doppler_equiv) / C_SI
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m / u.s, doppler_equiv)
/ C_SI
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_beta)
components[self.wcs.spec] = ("spectral", 0, beta_from_spectralcoord)
else:
kwargs["unit"] = self.wcs.cunit[ispec]
if self.wcs.restfrq > 0:
if ctype == "VELO":
kwargs["doppler_convention"] = "relativistic"
kwargs["doppler_rest"] = self.wcs.restfrq * u.Hz
elif ctype == "VRAD":
kwargs["doppler_convention"] = "radio"
kwargs["doppler_rest"] = self.wcs.restfrq * u.Hz
elif ctype == "VOPT":
kwargs["doppler_convention"] = "optical"
kwargs["doppler_rest"] = self.wcs.restwav * u.m
def spectralcoord_from_value(value):
if isinstance(value, SpectralCoord):
return value
return SpectralCoord(
value, observer=observer, target=target, **kwargs
)
def value_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(**kwargs)
else:
return spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(**kwargs)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_value)
components[self.wcs.spec] = ("spectral", 0, value_from_spectralcoord)
# We can then make sure we correctly return Time objects where appropriate
# (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
if "time" in self.world_axis_physical_types:
multiple_time = self.world_axis_physical_types.count("time") > 1
for i in range(self.naxis):
if self.world_axis_physical_types[i] == "time":
if multiple_time:
name = f"time.{i}"
else:
name = "time"
# Initialize delta
reference_time_delta = None
# Extract time scale, and remove any algorithm code
scale = self.wcs.ctype[i].split("-")[0].lower()
if scale == "time":
if self.wcs.timesys:
scale = self.wcs.timesys.lower()
else:
scale = "utc"
# Drop sub-scales
if "(" in scale:
pos = scale.index("(")
scale, subscale = scale[:pos], scale[pos + 1 : -1]
warnings.warn(
"Dropping unsupported sub-scale "
f"{subscale.upper()} from scale {scale.upper()}",
UserWarning,
)
# TODO: consider having GPS as a scale in Time
# For now GPS is not a scale, we approximate this by TAI - 19s
if scale == "gps":
reference_time_delta = TimeDelta(19, format="sec")
scale = "tai"
elif scale.upper() in FITS_DEPRECATED_SCALES:
scale = FITS_DEPRECATED_SCALES[scale.upper()]
elif scale not in Time.SCALES:
raise ValueError(f"Unrecognized time CTYPE={self.wcs.ctype[i]}")
# Determine location
trefpos = self.wcs.trefpos.lower()
if trefpos.startswith("topocent"):
# Note that some headers use TOPOCENT instead of TOPOCENTER
if np.any(np.isnan(self.wcs.obsgeo[:3])):
warnings.warn(
"Missing or incomplete observer location "
"information, setting location in Time to None",
UserWarning,
)
location = None
else:
location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
elif trefpos == "geocenter":
location = EarthLocation(0, 0, 0, unit=u.m)
elif trefpos == "":
location = None
else:
# TODO: implement support for more locations when Time supports it
warnings.warn(
f"Observation location '{trefpos}' is not "
"supported, setting location in Time to None",
UserWarning,
)
location = None
reference_time = Time(
np.nan_to_num(self.wcs.mjdref[0]),
np.nan_to_num(self.wcs.mjdref[1]),
format="mjd",
scale=scale,
location=location,
)
if reference_time_delta is not None:
reference_time = reference_time + reference_time_delta
def time_from_reference_and_offset(offset):
if isinstance(offset, Time):
return offset
return reference_time + TimeDelta(offset, format="sec")
def offset_from_time_and_reference(time):
return (time - reference_time).sec
classes[name] = (Time, (), {}, time_from_reference_and_offset)
components[i] = (name, 0, offset_from_time_and_reference)
if "phys.polarization.stokes" in self.world_axis_physical_types:
for i in range(self.naxis):
if self.world_axis_physical_types[i] == "phys.polarization.stokes":
name = "stokes"
classes[name] = (StokesCoord, (), {})
components[i] = (name, 0, "value")
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
for i in range(self.naxis):
if components[i] is None:
name = self.wcs.ctype[i].split("-")[0].lower()
if name == "":
name = "world"
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {"unit": self.wcs.cunit[i]})
components[i] = (name, 0, "value")
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
|
652e1de73f97cef70981c5d93df827fa36c843ed68f596522227da4a4e65e13f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
from contextlib import nullcontext
from datetime import datetime
import numpy as np
import pytest
from numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_almost_equal_nulp,
assert_array_equal,
)
from packaging.version import Version
from astropy import units as u
from astropy import wcs
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import (
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_filenames,
)
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyUserWarning,
AstropyWarning,
)
from astropy.utils.misc import NumpyRNGContext
from astropy.wcs import _wcs
_WCSLIB_VER = Version(_wcs.__version__)
# NOTE: User can choose to use system wcslib instead of bundled.
def ctx_for_v71_dateref_warnings():
if _WCSLIB_VER >= Version("7.1") and _WCSLIB_VER < Version("7.3"):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=(
r"'datfix' made the change 'Set DATE-REF to '1858-11-17' from"
r" MJD-REF'\."
),
)
else:
ctx = nullcontext()
return ctx
class TestMaps:
def setup_method(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames("data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
f"test_spectra has wrong number data files: found {len(self._file_list)},"
f" expected {n_data_files}"
)
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding="binary"
)
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
class TestSpectra:
def setup_method(self):
self._file_list = list(get_pkg_data_filenames("data/spectra", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 6
assert len(self._file_list) == n_data_files, (
f"test_spectra has wrong number data files: found {len(self._file_list)},"
f" expected {n_data_files}"
)
def test_spectra(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "spectra", filename), encoding="binary"
)
# finally run the test.
if _WCSLIB_VER >= Version("7.4"):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=(
r"'datfix' made the change 'Set MJD-OBS to 53925\.853472 from"
r" DATE-OBS'\."
),
)
else:
ctx = nullcontext()
with ctx:
all_wcs = wcs.find_all_wcs(header)
assert len(all_wcs) == 9
def test_fixes():
"""
From github issue #36
"""
header = get_pkg_data_contents("data/nonstandard_units.hdr", encoding="binary")
with pytest.raises(wcs.InvalidTransformError), pytest.warns(
wcs.FITSFixedWarning
) as w:
wcs.WCS(header, translate_units="dhs")
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
assert len(w) == 3
assert "'datfix' made the change 'Success'." in str(w.pop().message)
else:
assert len(w) == 2
first_wmsg = str(w[0].message)
assert "unitfix" in first_wmsg and "Hz" in first_wmsg and "M/S" in first_wmsg
assert "plane angle" in str(w[1].message) and "m/s" in str(w[1].message)
# Ignore "PV2_2 = 0.209028857410973 invalid keyvalue" warning seen on Windows.
@pytest.mark.filterwarnings(r"ignore:PV2_2")
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents("data/outside_sky.hdr", encoding="binary")
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100.0, 500.0]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200.0, 200.0]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000.0, 1000.0]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
# (and compares `wcs.pc` and `result` values?)
filename = get_pkg_data_filename("data/sip2.fits")
with pytest.warns(wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than
# the image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
assert len(caught_warnings) == 2
else:
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n) * np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
# Catch #2791
ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018], [0.00023043, -0.00024997]])
assert np.allclose(ww.wcs.pc, answer, atol=1.0e-8)
answer = np.array(
[
[202.39265216, 47.17756518],
[202.39335826, 47.17754619],
[202.39406436, 47.1775272],
]
)
assert np.allclose(result, answer, atol=1.0e-8, rtol=1.0e-10)
def test_load_fits_path():
fits_name = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(fits_name)
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
with ctx_for_v71_dateref_warnings():
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41.0, 2.0, 1)
assert_array_almost_equal_nulp(xp, 41.0, 10)
assert_array_almost_equal_nulp(yp, 2.0, 10)
# Valid WCS
hdr = {
"CTYPE1": "GLON-CAR",
"CTYPE2": "GLAT-CAR",
"CUNIT1": "deg",
"CUNIT2": "deg",
"CRPIX1": 1,
"CRPIX2": 1,
"CRVAL1": 40.0,
"CRVAL2": 0.0,
"CDELT1": -0.1,
"CDELT2": 0.1,
}
if _WCSLIB_VER >= Version("7.1"):
hdr["DATEREF"] = "1858-11-17"
if _WCSLIB_VER >= Version("7.4"):
ctx = pytest.warns(
wcs.wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJDREF to 0\.000000 from DATEREF'\.",
)
else:
ctx = nullcontext()
with ctx:
w = wcs.WCS(hdr)
xp, yp = w.wcs_world2pix(41.0, 2.0, 0)
assert_array_almost_equal_nulp(xp, -10.0, 10)
assert_array_almost_equal_nulp(yp, 20.0, 10)
def test_extra_kwarg():
"""
Issue #444
"""
w = wcs.WCS()
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
with pytest.raises(TypeError):
w.wcs_pix2world(data, origin=1)
def test_3d_shapes():
"""
Issue #444
"""
w = wcs.WCS(naxis=3)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 3)
result = w.wcs_pix2world(data, 1)
assert result.shape == (100, 3)
result = w.wcs_pix2world(data[..., 0], data[..., 1], data[..., 2], 1)
assert len(result) == 3
def test_preserve_shape():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((2, 3, 4))
xw, yw = w.wcs_pix2world(x, y, 1)
assert xw.shape == (2, 3, 4)
assert yw.shape == (2, 3, 4)
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_broadcasting():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = 1
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_shape_mismatch():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((3, 2, 4))
MESSAGE = r"Coordinate arrays are not broadcastable to each other"
with pytest.raises(ValueError, match=MESSAGE):
xw, yw = w.wcs_pix2world(x, y, 1)
with pytest.raises(ValueError, match=MESSAGE):
xp, yp = w.wcs_world2pix(x, y, 1)
# There are some ambiguities that need to be worked around when
# naxis == 1
w = wcs.WCS(naxis=1)
x = np.random.random((42, 1))
xw = w.wcs_pix2world(x, 1)
assert xw.shape == (42, 1)
x = np.random.random((42,))
(xw,) = w.wcs_pix2world(x, 1)
assert xw.shape == (42,)
def test_invalid_shape():
"""Issue #1395"""
MESSAGE = r"When providing two arguments, the array must be of shape [(]N, 2[)]"
w = wcs.WCS(naxis=2)
xy = np.random.random((2, 3))
with pytest.raises(ValueError, match=MESSAGE):
w.wcs_pix2world(xy, 1)
xy = np.random.random((2, 1))
with pytest.raises(ValueError, match=MESSAGE):
w.wcs_pix2world(xy, 1)
def test_warning_about_defunct_keywords():
header = get_pkg_data_contents("data/defunct_keywords.hdr", encoding="binary")
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
n_warn = 5
else:
n_warn = 4
# Make sure the warnings come out every time...
for _ in range(2):
with pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header)
assert len(w) == n_warn
# 7.4 adds a fifth warning "'datfix' made the change 'Success'."
for item in w[:4]:
assert "PCi_ja" in str(item.message)
def test_warning_about_defunct_keywords_exception():
header = get_pkg_data_contents("data/defunct_keywords.hdr", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(header)
def test_to_header_string():
# fmt: off
hdrstr = (
"WCSAXES = 2 / Number of coordinate axes ",
"CRPIX1 = 0.0 / Pixel coordinate of reference point ",
"CRPIX2 = 0.0 / Pixel coordinate of reference point ",
"CDELT1 = 1.0 / Coordinate increment at reference point ",
"CDELT2 = 1.0 / Coordinate increment at reference point ",
"CRVAL1 = 0.0 / Coordinate value at reference point ",
"CRVAL2 = 0.0 / Coordinate value at reference point ",
"LATPOLE = 90.0 / [deg] Native latitude of celestial pole ",
)
# fmt: on
if _WCSLIB_VER >= Version("7.3"):
# fmt: off
hdrstr += (
"MJDREF = 0.0 / [d] MJD of fiducial time ",
)
# fmt: on
elif _WCSLIB_VER >= Version("7.1"):
# fmt: off
hdrstr += (
"DATEREF = '1858-11-17' / ISO-8601 fiducial time ",
"MJDREFI = 0.0 / [d] MJD of fiducial time, integer part ",
"MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part "
)
# fmt: on
hdrstr += ("END",)
header_string = "".join(hdrstr)
w = wcs.WCS()
h0 = fits.Header.fromstring(w.to_header_string().strip())
if "COMMENT" in h0:
del h0["COMMENT"]
if "" in h0:
del h0[""]
h1 = fits.Header.fromstring(header_string.strip())
assert dict(h0) == dict(h1)
def test_to_fits():
nrec = 11 if _WCSLIB_VER >= Version("7.1") else 8
if _WCSLIB_VER < Version("7.1"):
nrec = 8
elif _WCSLIB_VER < Version("7.3"):
nrec = 11
else:
nrec = 9
w = wcs.WCS()
header_string = w.to_header()
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert header_string == wfits[0].header[-nrec:]
def test_to_header_warning():
fits_name = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
x = wcs.WCS(fits_name)
with pytest.warns(AstropyWarning, match="A_ORDER") as w:
x.to_header()
assert len(w) == 1
def test_no_comments_in_header():
w = wcs.WCS()
header = w.to_header()
assert w.wcs.alt not in header
assert "COMMENT" + w.wcs.alt.strip() not in header
assert "COMMENT" not in header
wkey = "P"
header = w.to_header(key=wkey)
assert wkey not in header
assert "COMMENT" not in header
assert "COMMENT" + w.wcs.alt.strip() not in header
def test_find_all_wcs_crash():
"""
Causes a double free without a recent fix in wcslib_wrap.C
"""
with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd:
header = fd.read()
# We have to set fix=False here, because one of the fixing tasks is to
# remove redundant SCAMP distortion parameters when SIP distortion
# parameters are also present.
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning):
wcs.find_all_wcs(header, fix=False)
# NOTE: Warning bubbles up from C layer during wcs.validate() and
# is hard to catch, so we just ignore it.
@pytest.mark.filterwarnings("ignore")
def test_validate():
results = wcs.validate(get_pkg_data_filename("data/validate.fits"))
results_txt = sorted({x.strip() for x in repr(results).splitlines()})
if _WCSLIB_VER >= Version("7.6"):
filename = "data/validate.7.6.txt"
elif _WCSLIB_VER >= Version("7.4"):
filename = "data/validate.7.4.txt"
elif _WCSLIB_VER >= Version("6.0"):
filename = "data/validate.6.txt"
elif _WCSLIB_VER >= Version("5.13"):
filename = "data/validate.5.13.txt"
elif _WCSLIB_VER >= Version("5.0"):
filename = "data/validate.5.0.txt"
else:
filename = "data/validate.txt"
with open(get_pkg_data_filename(filename)) as fd:
lines = fd.readlines()
assert sorted({x.strip() for x in lines}) == results_txt
@pytest.mark.filterwarnings("ignore")
def test_validate_wcs_tab():
results = wcs.validate(get_pkg_data_filename("data/tab-time-last-axis.fits"))
results_txt = sorted({x.strip() for x in repr(results).splitlines()})
assert results_txt == [
"",
"HDU 0 (PRIMARY):",
"HDU 1 (WCS-TABLE):",
"No issues.",
"WCS key ' ':",
]
def test_validate_with_2_wcses():
# From Issue #2053
with pytest.warns(AstropyUserWarning):
results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr"))
assert "WCS key 'A':" in str(results)
def test_crpix_maps_to_crval():
twcs = wcs.WCS(naxis=2)
twcs.wcs.crval = [251.29, 57.58]
twcs.wcs.cdelt = [1, 1]
twcs.wcs.crpix = [507, 507]
twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])
twcs._naxis = [1014, 1014]
twcs.wcs.ctype = ["RA---TAN-SIP", "DEC--TAN-SIP"]
a = np.array(
[
[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0],
]
)
b = np.array(
[
[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0],
]
)
twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)
twcs.wcs.set()
pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.wcs_pix2world(*twcs.wcs.crpix, 1),
twcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.all_pix2world(*twcs.wcs.crpix, 1),
twcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
def test_all_world2pix(
fname=None,
ext=0,
tolerance=1.0e-4,
origin=0,
random_npts=25000,
adaptive=False,
maxiter=20,
detect_divergence=True,
):
"""Test all_world2pix, iterative inverse of all_pix2world"""
# Open test FITS file:
if fname is None:
fname = get_pkg_data_filename("data/j94f05bgq_flt.fits")
ext = ("SCI", 1)
if not os.path.isfile(fname):
raise OSError(f"Input file '{fname:s}' to 'test_all_world2pix' not found.")
h = fits.open(fname)
w = wcs.WCS(h[ext].header, h)
h.close()
del h
crpix = w.wcs.crpix
ncoord = crpix.shape[0]
# Assume that CRPIX is at the center of the image and that the image has
# a power-of-2 number of pixels along each axis. Only use the central
# 1/64 for this testing purpose:
naxesi_l = list((7.0 / 16 * crpix).astype(int))
naxesi_u = list((9.0 / 16 * crpix).astype(int))
# Generate integer indices of pixels (image grid):
img_pix = np.dstack(
[i.flatten() for i in np.meshgrid(*map(range, naxesi_l, naxesi_u))]
)[0]
# Generage random data (in image coordinates):
with NumpyRNGContext(123456789):
rnd_pix = np.random.rand(random_npts, ncoord)
# Scale random data to cover the central part of the image
mwidth = 2 * (crpix * 1.0 / 8)
rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix
# Reference pixel coordinates in image coordinate system (CS):
test_pix = np.append(img_pix, rnd_pix, axis=0)
# Reference pixel coordinates in sky CS using forward transformation:
all_world = w.all_pix2world(test_pix, origin)
try:
runtime_begin = datetime.now()
# Apply the inverse iterative process to pixels in world coordinates
# to recover the pixel coordinates in image space.
all_pix = w.all_world2pix(
all_world,
origin,
tolerance=tolerance,
adaptive=adaptive,
maxiter=maxiter,
detect_divergence=detect_divergence,
)
runtime_end = datetime.now()
except wcs.wcs.NoConvergence as e:
runtime_end = datetime.now()
ndiv = 0
if e.divergent is not None:
ndiv = e.divergent.shape[0]
print(f"There are {ndiv} diverging solutions.")
print(f"Indices of diverging solutions:\n{e.divergent}")
print(f"Diverging solutions:\n{e.best_solution[e.divergent]}\n")
print(
"Mean radius of the diverging solutions:"
f" {np.mean(np.linalg.norm(e.best_solution[e.divergent], axis=1))}"
)
print(
"Mean accuracy of the diverging solutions:"
f" {np.mean(np.linalg.norm(e.accuracy[e.divergent], axis=1))}\n"
)
else:
print("There are no diverging solutions.")
nslow = 0
if e.slow_conv is not None:
nslow = e.slow_conv.shape[0]
print(f"There are {nslow} slowly converging solutions.")
print(f"Indices of slowly converging solutions:\n{e.slow_conv}")
print(f"Slowly converging solutions:\n{e.best_solution[e.slow_conv]}\n")
else:
print("There are no slowly converging solutions.\n")
print(
f"There are {e.best_solution.shape[0] - ndiv - nslow} converged solutions."
)
print(f"Best solutions (all points):\n{e.best_solution}")
print(f"Accuracy:\n{e.accuracy}\n")
print(
"\nFinished running 'test_all_world2pix' with errors.\n"
f"ERROR: {e.args[0]}\nRun time: {runtime_end - runtime_begin}\n"
)
raise e
# Compute differences between reference pixel coordinates and
# pixel coordinates (in image space) recovered from reference
# pixels in world coordinates:
errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
meanerr = np.mean(errors)
maxerr = np.amax(errors)
print(
"\nFinished running 'test_all_world2pix'.\n"
f"Mean error = {meanerr:e} (Max error = {maxerr:e})\n"
f"Run time: {runtime_end - runtime_begin}\n"
)
assert maxerr < 2.0 * tolerance
def test_scamp_sip_distortion_parameters():
"""
Test parsing of WCS parameters with redundant SIP and SCAMP distortion
parameters.
"""
header = get_pkg_data_contents("data/validate.fits", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header)
# Just check that this doesn't raise an exception.
w.all_pix2world(0, 0, 0)
def test_fixes2():
"""
From github issue #1854
"""
header = get_pkg_data_contents("data/nonstandard_units.hdr", encoding="binary")
with pytest.raises(wcs.InvalidTransformError):
wcs.WCS(header, fix=False)
def test_unit_normalization():
"""
From github issue #1918
"""
header = get_pkg_data_contents("data/unit.hdr", encoding="binary")
w = wcs.WCS(header)
assert w.wcs.cunit[2] == "m/s"
def test_footprint_to_file(tmp_path):
"""
From github issue #1912
"""
# Arbitrary keywords from real data
hdr = {
"CTYPE1": "RA---ZPN",
"CRUNIT1": "deg",
"CRPIX1": -3.3495999e02,
"CRVAL1": 3.185790700000e02,
"CTYPE2": "DEC--ZPN",
"CRUNIT2": "deg",
"CRPIX2": 3.0453999e03,
"CRVAL2": 4.388538000000e01,
"PV2_1": 1.0,
"PV2_3": 220.0,
"NAXIS1": 2048,
"NAXIS2": 1024,
}
w = wcs.WCS(hdr)
testfile = tmp_path / "test.txt"
w.footprint_to_file(testfile)
with open(testfile) as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == "ICRS\n"
assert "color=green" in lines[3]
w.footprint_to_file(testfile, coordsys="FK5", color="red")
with open(testfile) as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == "FK5\n"
assert "color=red" in lines[3]
with pytest.raises(ValueError):
w.footprint_to_file(testfile, coordsys="FOO")
del hdr["NAXIS1"]
del hdr["NAXIS2"]
w = wcs.WCS(hdr)
with pytest.warns(AstropyUserWarning):
w.footprint_to_file(testfile)
# Ignore FITSFixedWarning about keyrecords following the END keyrecord were
# ignored, which comes from src/astropy_wcs.c . Only a blind catch like this
# seems to work when pytest warnings are turned into exceptions.
@pytest.mark.filterwarnings("ignore")
def test_validate_faulty_wcs():
"""
From github issue #2053
"""
h = fits.Header()
# Illegal WCS:
h["RADESYSA"] = "ICRS"
h["PV2_1"] = 1.0
hdu = fits.PrimaryHDU([[0]], header=h)
hdulist = fits.HDUList([hdu])
# Check that this doesn't raise a NameError exception
wcs.validate(hdulist)
def test_error_message():
header = get_pkg_data_contents("data/invalid_header.hdr", encoding="binary")
# make WCS transformation invalid
hdr = fits.Header.fromstring(header)
del hdr["PV?_*"]
hdr["PV1_1"] = 110
hdr["PV1_2"] = 110
hdr["PV2_1"] = -110
hdr["PV2_2"] = -110
with pytest.raises(wcs.InvalidTransformError):
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdr, _do_set=False)
w.all_pix2world([[536.0, 894.0]], 0)
def test_out_of_bounds():
# See #2107
header = get_pkg_data_contents("data/zpn-hole.hdr", encoding="binary")
w = wcs.WCS(header)
ra, dec = w.wcs_pix2world(110, 110, 0)
assert np.isnan(ra)
assert np.isnan(dec)
ra, dec = w.wcs_pix2world(0, 0, 0)
assert not np.isnan(ra)
assert not np.isnan(dec)
def test_calc_footprint_1():
fits = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array(
[
[202.39314493, 47.17753352],
[202.71885939, 46.94630488],
[202.94631893, 47.15855022],
[202.72053428, 47.37893142],
]
)
footprint = w.calc_footprint(axes=axes)
assert_allclose(footprint, ref)
def test_calc_footprint_2():
"""Test calc_footprint without distortion."""
fits = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array(
[
[202.39265216, 47.17756518],
[202.7469062, 46.91483312],
[203.11487481, 47.14359319],
[202.76092671, 47.40745948],
]
)
footprint = w.calc_footprint(axes=axes, undistort=False)
assert_allclose(footprint, ref)
def test_calc_footprint_3():
"""Test calc_footprint with corner of the pixel."""
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.crpix = [1.5, 5.5]
w.wcs.cdelt = [-0.1, 0.1]
axes = (2, 10)
ref = np.array([[0.1, -0.5], [0.1, 0.5], [359.9, 0.5], [359.9, -0.5]])
footprint = w.calc_footprint(axes=axes, undistort=False, center=False)
assert_allclose(footprint, ref)
def test_sip():
# See #2107
header = get_pkg_data_contents("data/irac_sip.hdr", encoding="binary")
w = wcs.WCS(header)
x0, y0 = w.sip_pix2foc(200, 200, 0)
assert_allclose(72, x0, 1e-3)
assert_allclose(72, y0, 1e-3)
x1, y1 = w.sip_foc2pix(x0, y0, 0)
assert_allclose(200, x1, 1e-3)
assert_allclose(200, y1, 1e-3)
def test_sub_3d_with_sip():
# See #10527
header = get_pkg_data_contents("data/irac_sip.hdr", encoding="binary")
header = fits.Header.fromstring(header)
header["NAXIS"] = 3
header.set("NAXIS3", 64, after=header.index("NAXIS2"))
w = wcs.WCS(header, naxis=2)
assert w.naxis == 2
def test_printwcs(capsys):
"""
Just make sure that it runs
"""
h = get_pkg_data_contents("data/spectra/orion-freq-1.hdr", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert "WCS Keywords" in captured.out
h = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert "WCS Keywords" in captured.out
def test_invalid_spherical():
header = """
SIMPLE = T / conforms to FITS standard
BITPIX = 8 / array data type
WCSAXES = 2 / no comment
CTYPE1 = 'RA---TAN' / TAN (gnomic) projection
CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection
EQUINOX = 2000.0 / Equatorial coordinates definition (yr)
LONPOLE = 180.0 / no comment
LATPOLE = 0.0 / no comment
CRVAL1 = 16.0531567459 / RA of reference point
CRVAL2 = 23.1148929108 / DEC of reference point
CRPIX1 = 2129 / X reference pixel
CRPIX2 = 1417 / Y reference pixel
CUNIT1 = 'deg ' / X pixel scale units
CUNIT2 = 'deg ' / Y pixel scale units
CD1_1 = -0.00912247310646 / Transformation matrix
CD1_2 = -0.00250608809647 / no comment
CD2_1 = 0.00250608809647 / no comment
CD2_2 = -0.00912247310646 / no comment
IMAGEW = 4256 / Image width, in pixels.
IMAGEH = 2832 / Image height, in pixels.
"""
f = io.StringIO(header)
header = fits.Header.fromtextfile(f)
w = wcs.WCS(header)
x, y = w.wcs_world2pix(211, -26, 0)
assert np.isnan(x) and np.isnan(y)
def test_no_iteration():
"""Regression test for #3066"""
MESSAGE = "'{}' object is not iterable"
w = wcs.WCS(naxis=2)
with pytest.raises(TypeError, match=MESSAGE.format("WCS")):
iter(w)
class NewWCS(wcs.WCS):
pass
w = NewWCS(naxis=2)
with pytest.raises(TypeError, match=MESSAGE.format("NewWCS")):
iter(w)
@pytest.mark.skipif(
_wcs.__version__[0] < "5", reason="TPV only works with wcslib 5.x or later"
)
def test_sip_tpv_agreement():
sip_header = get_pkg_data_contents(
os.path.join("data", "siponly.hdr"), encoding="binary"
)
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding="binary"
)
with pytest.warns(wcs.FITSFixedWarning):
w_sip = wcs.WCS(sip_header)
w_tpv = wcs.WCS(tpv_header)
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv.all_pix2world([w_tpv.wcs.crpix], 1),
)
w_sip2 = wcs.WCS(w_sip.to_header())
w_tpv2 = wcs.WCS(w_tpv.to_header())
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
)
assert_array_almost_equal(
w_tpv.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_sip.wcs.crpix], 1),
)
assert_array_almost_equal(
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1),
)
def test_tpv_ctype_sip():
sip_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "siponly.hdr"), encoding="binary")
)
tpv_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "tpvonly.hdr"), encoding="binary")
)
sip_header.update(tpv_header)
sip_header["CTYPE1"] = "RA---TAN-SIP"
sip_header["CTYPE2"] = "DEC--TAN-SIP"
with pytest.warns(
wcs.FITSFixedWarning,
match="Removed redundant SCAMP distortion parameters "
"because SIP parameters are also present",
):
w_sip = wcs.WCS(sip_header)
assert w_sip.sip is not None
def test_tpv_ctype_tpv():
sip_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "siponly.hdr"), encoding="binary")
)
tpv_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "tpvonly.hdr"), encoding="binary")
)
sip_header.update(tpv_header)
sip_header["CTYPE1"] = "RA---TPV"
sip_header["CTYPE2"] = "DEC--TPV"
with pytest.warns(
wcs.FITSFixedWarning,
match="Removed redundant SIP distortion parameters "
"because CTYPE explicitly specifies TPV distortions",
):
w_sip = wcs.WCS(sip_header)
assert w_sip.sip is None
def test_tpv_ctype_tan():
sip_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "siponly.hdr"), encoding="binary")
)
tpv_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "tpvonly.hdr"), encoding="binary")
)
sip_header.update(tpv_header)
sip_header["CTYPE1"] = "RA---TAN"
sip_header["CTYPE2"] = "DEC--TAN"
with pytest.warns(
wcs.FITSFixedWarning,
match="Removed redundant SIP distortion parameters "
"because SCAMP' PV distortions are also present",
):
w_sip = wcs.WCS(sip_header)
assert w_sip.sip is None
def test_car_sip_with_pv():
# https://github.com/astropy/astropy/issues/14255
header_dict = {
"SIMPLE": True,
"BITPIX": -32,
"NAXIS": 2,
"NAXIS1": 1024,
"NAXIS2": 1024,
"CRPIX1": 512.0,
"CRPIX2": 512.0,
"CDELT1": 0.01,
"CDELT2": 0.01,
"CRVAL1": 120.0,
"CRVAL2": 29.0,
"CTYPE1": "RA---CAR-SIP",
"CTYPE2": "DEC--CAR-SIP",
"PV1_1": 120.0,
"PV1_2": 29.0,
"PV1_0": 1.0,
"A_ORDER": 2,
"A_2_0": 5.0e-4,
"B_ORDER": 2,
"B_2_0": 5.0e-4,
}
w = wcs.WCS(header_dict)
assert w.sip is not None
assert w.wcs.get_pv() == [(1, 1, 120.0), (1, 2, 29.0), (1, 0, 1.0)]
assert np.allclose(
w.all_pix2world(header_dict["CRPIX1"], header_dict["CRPIX2"], 1),
[header_dict["CRVAL1"], header_dict["CRVAL2"]],
)
@pytest.mark.skipif(
_wcs.__version__[0] < "5", reason="TPV only works with wcslib 5.x or later"
)
def test_tpv_copy():
# See #3904
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding="binary"
)
with pytest.warns(wcs.FITSFixedWarning):
w_tpv = wcs.WCS(tpv_header)
ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)
assert ra[0] != ra[1] and ra[1] != ra[2]
assert dec[0] != dec[1] and dec[1] != dec[2]
def test_hst_wcs():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
with fits.open(path) as hdulist:
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist)
# Check pixel scale and area
assert_quantity_allclose(
w.proj_plane_pixel_scales(), [1.38484378e-05, 1.39758488e-05] * u.deg
)
assert_quantity_allclose(
w.proj_plane_pixel_area(), 1.93085492e-10 * (u.deg * u.deg)
)
# Exercise the main transformation functions, mainly just for
# coverage
w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)
w.det2im([0, 100, 200], [0, -100, 200], 0)
w.cpdis1 = w.cpdis1
w.cpdis2 = w.cpdis2
w.det2im1 = w.det2im1
w.det2im2 = w.det2im2
w.sip = w.sip
w.cpdis1.cdelt = w.cpdis1.cdelt
w.cpdis1.crpix = w.cpdis1.crpix
w.cpdis1.crval = w.cpdis1.crval
w.cpdis1.data = w.cpdis1.data
assert w.sip.a_order == 4
assert w.sip.b_order == 4
assert w.sip.ap_order == 0
assert w.sip.bp_order == 0
assert_array_equal(w.sip.crpix, [2048.0, 1024.0])
wcs.WCS(hdulist[1].header, hdulist)
def test_cpdis_comments():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
f = fits.open(path)
w = wcs.WCS(f[1].header, f)
hdr = w.to_fits()[0].header
f.close()
wcscards = list(hdr["CPDIS*"].cards) + list(hdr["DP*"].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
("CPDIS1", "LOOKUP", "Prior distortion function type"),
("DP1.EXTVER", 1.0, "Version number of WCSDVARR extension"),
("DP1.NAXES", 2.0, "Number of independent variables in CPDIS function"),
("DP1.AXIS.1", 1.0, "Axis number of the 1st variable in a CPDIS function"),
("DP1.AXIS.2", 2.0, "Axis number of the 2nd variable in a CPDIS function"),
("CPDIS2", "LOOKUP", "Prior distortion function type"),
("DP2.EXTVER", 2.0, "Version number of WCSDVARR extension"),
("DP2.NAXES", 2.0, "Number of independent variables in CPDIS function"),
("DP2.AXIS.1", 1.0, "Axis number of the 1st variable in a CPDIS function"),
("DP2.AXIS.2", 2.0, "Axis number of the 2nd variable in a CPDIS function"),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_d2im_comments():
path = get_pkg_data_filename("data/ie6d07ujq_wcs.fits")
f = fits.open(path)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header, f)
f.close()
wcscards = list(w.to_fits()[0].header["D2IM*"].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
("D2IMDIS1", "LOOKUP", "Detector to image correction type"),
("D2IM1.EXTVER", 1.0, "Version number of WCSDVARR extension"),
("D2IM1.NAXES", 2.0, "Number of independent variables in D2IM function"),
("D2IM1.AXIS.1", 1.0, "Axis number of the 1st variable in a D2IM function"),
("D2IM1.AXIS.2", 2.0, "Axis number of the 2nd variable in a D2IM function"),
("D2IMDIS2", "LOOKUP", "Detector to image correction type"),
("D2IM2.EXTVER", 2.0, "Version number of WCSDVARR extension"),
("D2IM2.NAXES", 2.0, "Number of independent variables in D2IM function"),
("D2IM2.AXIS.1", 1.0, "Axis number of the 1st variable in a D2IM function"),
("D2IM2.AXIS.2", 2.0, "Axis number of the 2nd variable in a D2IM function"),
# ('D2IMERR1', 0.049, 'Maximum error of D2IM correction for axis 1'),
# ('D2IMERR2', 0.035, 'Maximum error of D2IM correction for axis 2'),
# ('D2IMEXT', 'iref$y7b1516hi_d2i.fits', ''),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_sip_broken():
# This header caused wcslib to segfault because it has a SIP
# specification in a non-default keyword
hdr = get_pkg_data_contents("data/sip-broken.hdr")
wcs.WCS(hdr)
def test_no_truncate_crval():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f"CRVAL{ii + 1}"] == w.wcs.crval[ii]
assert header[f"CDELT{ii + 1}"] == w.wcs.cdelt[ii]
def test_no_truncate_crval_try2():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-5, 1e-5, 1e5]
w.wcs.ctype = ["RA---SIN", "DEC--SIN", "FREQ"]
w.wcs.cunit = ["deg", "deg", "Hz"]
w.wcs.crpix = [1, 1, 1]
w.wcs.restfrq = 2.34e11
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f"CRVAL{ii + 1}"] == w.wcs.crval[ii]
assert header[f"CDELT{ii + 1}"] == w.wcs.cdelt[ii]
def test_no_truncate_crval_p17():
"""
Regression test for https://github.com/astropy/astropy/issues/5162
"""
w = wcs.WCS(naxis=2)
w.wcs.crval = [50.1234567890123456, 50.1234567890123456]
w.wcs.cdelt = [1e-3, 1e-3]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.set()
header = w.to_header()
assert header["CRVAL1"] != w.wcs.crval[0]
assert header["CRVAL2"] != w.wcs.crval[1]
header = w.to_header(relax=wcs.WCSHDO_P17)
assert header["CRVAL1"] == w.wcs.crval[0]
assert header["CRVAL2"] == w.wcs.crval[1]
def test_no_truncate_using_compare():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
This one uses WCS.wcs.compare and some slightly different values
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [2.409303333333e02, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
def test_passing_ImageHDU():
"""
Passing ImageHDU or PrimaryHDU and comparing it with
wcs initialized from header. For #4493.
"""
path = get_pkg_data_filename("data/validate.fits")
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
wcs_hdu = wcs.WCS(hdulist[0])
wcs_header = wcs.WCS(hdulist[0].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
wcs_hdu = wcs.WCS(hdulist[1])
wcs_header = wcs.WCS(hdulist[1].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
def test_inconsistent_sip():
"""
Test for #4814
"""
hdr = get_pkg_data_contents("data/sip-broken.hdr")
ctx = ctx_for_v71_dateref_warnings()
with ctx:
w = wcs.WCS(hdr)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(relax=None)
# CTYPE should not include "-SIP" if relax is None
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(relax=False)
assert "A_0_2" not in newhdr
# CTYPE should not include "-SIP" if relax is False
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key="C")
assert "A_0_2" not in newhdr
# Test writing header with a different key
with ctx:
wnew = wcs.WCS(newhdr, key="C")
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key=" ")
# Test writing a primary WCS to header
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
# Test that "-SIP" is kept into CTYPE if relax=True and
# "-SIP" was in the original header
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
assert "A_0_2" in newhdr
# Test that SIP coefficients are also written out.
assert wnew.sip is not None
# ######### broken header ###########
# Test that "-SIP" is added to CTYPE if relax=True and
# "-SIP" was not in the original header but SIP coefficients
# are present.
with ctx:
w = wcs.WCS(hdr)
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
def test_bounds_check():
"""Test for #4957"""
w = wcs.WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.cdelt = [10, 10]
w.wcs.crval = [-90, 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
ra, dec = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, -180)
assert_allclose(dec, -30)
def test_naxis():
w = wcs.WCS(naxis=2)
w.wcs.crval = [1, 1]
w.wcs.cdelt = [0.1, 0.1]
w.wcs.crpix = [1, 1]
w._naxis = [1000, 500]
assert w.pixel_shape == (1000, 500)
assert w.array_shape == (500, 1000)
w.pixel_shape = (99, 59)
assert w._naxis == [99, 59]
w.array_shape = (45, 23)
assert w._naxis == [23, 45]
assert w.pixel_shape == (23, 45)
w.pixel_shape = None
assert w.pixel_bounds is None
def test_sip_with_altkey():
"""
Test that when creating a WCS object using a key, CTYPE with
that key is looked at and not the primary CTYPE.
fix for #5443.
"""
with fits.open(get_pkg_data_filename("data/sip.fits")) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
# create a header with two WCSs.
h1 = w.to_header(relax=True, key="A")
h2 = w.to_header(relax=False)
h1["CTYPE1A"] = "RA---SIN-SIP"
h1["CTYPE2A"] = "DEC--SIN-SIP"
h1.update(h2)
with ctx_for_v71_dateref_warnings():
w = wcs.WCS(h1, key="A")
assert (w.wcs.ctype == np.array(["RA---SIN-SIP", "DEC--SIN-SIP"])).all()
def test_to_fits_1():
"""
Test to_fits() with LookupTable distortion.
"""
fits_name = get_pkg_data_filename("data/dist.fits")
with pytest.warns(AstropyDeprecationWarning):
w = wcs.WCS(fits_name)
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert isinstance(wfits[1], fits.ImageHDU)
def test_keyedsip():
"""
Test sip reading with extra key.
"""
hdr_name = get_pkg_data_filename("data/sip-broken.hdr")
header = fits.Header.fromfile(hdr_name)
del header["CRPIX1"]
del header["CRPIX2"]
w = wcs.WCS(header=header, key="A")
assert isinstance(w.sip, wcs.Sip)
assert w.sip.crpix[0] == 2048
assert w.sip.crpix[1] == 1026
def test_zero_size_input():
with fits.open(get_pkg_data_filename("data/sip.fits")) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
inp = np.zeros((0, 2))
assert_array_equal(inp, w.all_pix2world(inp, 0))
assert_array_equal(inp, w.all_world2pix(inp, 0))
inp = [], [1]
result = w.all_pix2world([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
result = w.all_world2pix([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
def test_scalar_inputs():
"""
Issue #7845
"""
wcsobj = wcs.WCS(naxis=1)
result = wcsobj.all_pix2world(2, 1)
assert_array_equal(result, [np.array(2.0)])
assert result[0].shape == ()
result = wcsobj.all_pix2world([2], 1)
assert_array_equal(result, [np.array([2.0])])
assert result[0].shape == (1,)
# Ignore RuntimeWarning raised on s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in.*")
def test_footprint_contains():
"""
Test WCS.footprint_contains(skycoord)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
header = fits.Header.fromstring(header.strip(), "\n")
test_wcs = wcs.WCS(header)
hasCoord = test_wcs.footprint_contains(SkyCoord(254, 2, unit="deg"))
assert hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(240, 2, unit="deg"))
assert not hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(24, 2, unit="deg"))
assert not hasCoord
def test_cunit():
# Initializing WCS
w1 = wcs.WCS(naxis=2)
w2 = wcs.WCS(naxis=2)
w3 = wcs.WCS(naxis=2)
w4 = wcs.WCS(naxis=2)
# Initializing the values of cunit
w1.wcs.cunit = ["deg", "m/s"]
w2.wcs.cunit = ["km/h", "km/h"]
w3.wcs.cunit = ["deg", "m/s"]
w4.wcs.cunit = ["deg", "deg"]
# Equality checking a cunit with itself
assert w1.wcs.cunit == w1.wcs.cunit
assert not w1.wcs.cunit != w1.wcs.cunit
# Equality checking of two different cunit object having same values
assert w1.wcs.cunit == w3.wcs.cunit
assert not w1.wcs.cunit != w3.wcs.cunit
# Equality checking of two different cunit object having the same first unit
# but different second unit (see #9154)
assert not w1.wcs.cunit == w4.wcs.cunit
assert w1.wcs.cunit != w4.wcs.cunit
# Inequality checking of two different cunit object having different values
assert not w1.wcs.cunit == w2.wcs.cunit
assert w1.wcs.cunit != w2.wcs.cunit
# Inequality checking of cunit with a list of literals
assert not w1.wcs.cunit == [1, 2, 3]
assert w1.wcs.cunit != [1, 2, 3]
# Inequality checking with some characters
assert not w1.wcs.cunit == ["a", "b", "c"]
assert w1.wcs.cunit != ["a", "b", "c"]
# Comparison is not implemented TypeError will raise
with pytest.raises(TypeError):
w1.wcs.cunit < w2.wcs.cunit # noqa: B015
class TestWcsWithTime:
def setup_method(self):
if _WCSLIB_VER >= Version("7.1"):
fname = get_pkg_data_filename("data/header_with_time_wcslib71.fits")
else:
fname = get_pkg_data_filename("data/header_with_time.fits")
self.header = fits.Header.fromfile(fname)
with pytest.warns(wcs.FITSFixedWarning):
self.w = wcs.WCS(self.header, key="A")
def test_keywods2wcsprm(self):
"""Make sure Wcsprm is populated correctly from the header."""
ctype = [self.header[val] for val in self.header["CTYPE*"]]
crval = [self.header[val] for val in self.header["CRVAL*"]]
crpix = [self.header[val] for val in self.header["CRPIX*"]]
cdelt = [self.header[val] for val in self.header["CDELT*"]]
cunit = [self.header[val] for val in self.header["CUNIT*"]]
assert list(self.w.wcs.ctype) == ctype
time_axis_code = 4000 if _WCSLIB_VER >= Version("7.9") else 0
assert list(self.w.wcs.axis_types) == [2200, 2201, 3300, time_axis_code]
assert_allclose(self.w.wcs.crval, crval)
assert_allclose(self.w.wcs.crpix, crpix)
assert_allclose(self.w.wcs.cdelt, cdelt)
assert list(self.w.wcs.cunit) == cunit
naxis = self.w.naxis
assert naxis == 4
pc = np.zeros((naxis, naxis), dtype=np.float64)
for i in range(1, 5):
for j in range(1, 5):
if i == j:
pc[i - 1, j - 1] = self.header.get(f"PC{i}_{j}A", 1)
else:
pc[i - 1, j - 1] = self.header.get(f"PC{i}_{j}A", 0)
assert_allclose(self.w.wcs.pc, pc)
char_keys = [
"timesys",
"trefpos",
"trefdir",
"plephem",
"timeunit",
"dateref",
"dateobs",
"datebeg",
"dateavg",
"dateend",
]
for key in char_keys:
assert getattr(self.w.wcs, key) == self.header.get(key, "")
num_keys = [
"mjdref",
"mjdobs",
"mjdbeg",
"mjdend",
"jepoch",
"bepoch",
"tstart",
"tstop",
"xposure",
"timsyer",
"timrder",
"timedel",
"timepixr",
"timeoffs",
"telapse",
"czphs",
"cperi",
]
for key in num_keys:
if key.upper() == "MJDREF":
hdrv = [
self.header.get("MJDREFIA", np.nan),
self.header.get("MJDREFFA", np.nan),
]
else:
hdrv = self.header.get(key, np.nan)
assert_allclose(getattr(self.w.wcs, key), hdrv)
def test_transforms(self):
assert_allclose(self.w.all_pix2world(*self.w.wcs.crpix, 1), self.w.wcs.crval)
def test_invalid_coordinate_masking():
# Regression test for an issue which caused all coordinates to be set to NaN
# after a transformation rather than just the invalid ones as reported by
# WCSLIB. A specific example of this is that when considering an all-sky
# spectral cube with a spectral axis that is not correlated with the sky
# axes, if transforming pixel coordinates that did not fall 'in' the sky,
# the spectral world value was also masked even though that coordinate
# was valid.
w = wcs.WCS(naxis=3)
w.wcs.ctype = "VELO_LSR", "GLON-CAR", "GLAT-CAR"
w.wcs.crval = -20, 0, 0
w.wcs.crpix = 1, 1441, 241
w.wcs.cdelt = 1.3, -0.125, 0.125
px = [-10, -10, 20]
py = [-10, 10, 20]
pz = [-10, 10, 20]
wx, wy, wz = w.wcs_pix2world(px, py, pz, 0)
# Before fixing this, wx used to return np.nan for the first element
assert_allclose(wx, [-33, -33, 6])
assert_allclose(wy, [np.nan, 178.75, 177.5])
assert_allclose(wz, [np.nan, -28.75, -27.5])
def test_no_pixel_area():
w = wcs.WCS(naxis=3)
# Pixel area cannot be computed
with pytest.raises(ValueError, match="Pixel area is defined only for 2D pixels"):
w.proj_plane_pixel_area()
# Pixel scales still possible
assert_quantity_allclose(w.proj_plane_pixel_scales(), 1)
def test_distortion_header(tmp_path):
"""
Test that plate distortion model is correctly described by `wcs.to_header()`
and preserved when creating a Cutout2D from the image, writing it to FITS,
and reading it back from the file.
"""
path = get_pkg_data_filename("data/dss.14.29.56-62.41.05.fits.gz")
cen = np.array((50, 50))
siz = np.array((20, 20))
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
cut = Cutout2D(hdulist[0].data, position=cen, size=siz, wcs=w)
# This converts the DSS plate solution model with AMD[XY]n coefficients into a
# Template Polynomial Distortion model (TPD.FWD.n coefficients);
# not testing explicitly for the header keywords here.
if _WCSLIB_VER < Version("7.4"):
with pytest.warns(
AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"
):
w0 = wcs.WCS(w.to_header_string())
with pytest.warns(
AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"
):
w1 = wcs.WCS(cut.wcs.to_header_string())
if _WCSLIB_VER >= Version("7.1"):
pytest.xfail("TPD coefficients incomplete with WCSLIB >= 7.1 < 7.4")
else:
w0 = wcs.WCS(w.to_header_string())
w1 = wcs.WCS(cut.wcs.to_header_string())
assert w.pixel_to_world(0, 0).separation(w0.pixel_to_world(0, 0)) < 1.0e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w0.pixel_to_world(*cen)) < 1.0e-3 * u.mas
assert (
w.pixel_to_world(*cen).separation(w1.pixel_to_world(*(siz / 2)))
< 1.0e-3 * u.mas
)
cutfile = tmp_path / "cutout.fits"
fits.writeto(cutfile, cut.data, cut.wcs.to_header())
with fits.open(cutfile) as hdulist:
w2 = wcs.WCS(hdulist[0].header)
assert (
w.pixel_to_world(*cen).separation(w2.pixel_to_world(*(siz / 2)))
< 1.0e-3 * u.mas
)
def test_pixlist_wcs_colsel():
"""
Test selection of a specific pixel list WCS using ``colsel``. See #11412.
"""
hdr_file = get_pkg_data_filename("data/chandra-pixlist-wcs.hdr")
hdr = fits.Header.fromtextfile(hdr_file)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdr, keysel=["image", "pixel"], colsel=[11, 12])
assert w.naxis == 2
assert list(w.wcs.ctype) == ["RA---TAN", "DEC--TAN"]
assert np.allclose(w.wcs.crval, [229.38051931869, -58.81108068885])
assert np.allclose(w.wcs.pc, [[1, 0], [0, 1]])
assert np.allclose(w.wcs.cdelt, [-0.00013666666666666, 0.00013666666666666])
assert np.allclose(w.wcs.lonpole, 180.0)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="TIME axis extraction only works with wcslib 7.8 or later",
)
def test_time_axis_selection():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "TIME"]
w.wcs.set()
assert list(w.sub([wcs.WCSSUB_TIME]).wcs.ctype) == ["TIME"]
assert (
w.wcs_pix2world([[1, 2, 3]], 0)[0, 2]
== w.sub([wcs.WCSSUB_TIME]).wcs_pix2world([[3]], 0)[0, 0]
)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="TIME axis extraction only works with wcslib 7.8 or later",
)
def test_temporal():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "TIME"]
w.wcs.set()
assert w.has_temporal
assert w.sub([wcs.WCSSUB_TIME]).is_temporal
assert (
w.wcs_pix2world([[1, 2, 3]], 0)[0, 2]
== w.temporal.wcs_pix2world([[3]], 0)[0, 0]
)
def test_swapaxes_same_val_roundtrip():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.crpix = [32.5, 16.5, 1.0]
w.wcs.crval = [5.63, -72.05, 1.0]
w.wcs.pc = [[5.9e-06, 1.3e-05, 0.0], [-1.2e-05, 5.0e-06, 0.0], [0.0, 0.0, 1.0]]
w.wcs.cdelt = [1.0, 1.0, 1.0]
w.wcs.set()
axes_order = [3, 2, 1]
axes_order0 = [i - 1 for i in axes_order]
ws = w.sub(axes_order)
imcoord = np.array([3, 5, 7])
imcoords = imcoord[axes_order0]
val_ref = w.wcs_pix2world([imcoord], 0)[0]
val_swapped = ws.wcs_pix2world([imcoords], 0)[0]
# check original axis and swapped give same results
assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)
# check round-tripping:
assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)
|
9ee1f6cf49de8a085849fcb455b047be2b2f48a1fcf57bae0df09c2eec02ee98 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
import pytest
from packaging.version import Version
from astropy import wcs
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs import _wcs
from .helper import SimModelTAB
_WCSLIB_VER = Version(_wcs.__version__)
def test_2d_spatial_tab_roundtrip(tab_wcs_2di):
nx, ny = tab_wcs_2di.pixel_shape
# generate "random" test coordinates:
np.random.seed(1)
xy = 0.51 + [nx + 0.99, ny + 0.99] * np.random.random((100, 2))
rd = tab_wcs_2di.wcs_pix2world(xy, 1)
xy_roundtripped = tab_wcs_2di.wcs_world2pix(rd, 1)
m = np.logical_and(*(np.isfinite(xy_roundtripped).T))
assert np.allclose(xy[m], xy_roundtripped[m], rtol=0, atol=1e-7)
def test_2d_spatial_tab_vs_model():
nx = 150
ny = 200
model = SimModelTAB(nx=nx, ny=ny)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
# generate "random" test coordinates:
np.random.seed(1)
xy = 0.51 + [nx + 0.99, ny + 0.99] * np.random.random((100, 2))
rd = w.wcs_pix2world(xy, 1)
rd_model = model.fwd_eval(xy)
assert np.allclose(rd, rd_model, rtol=0, atol=1e-7)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.6"),
reason="Only in WCSLIB 7.6 a 1D -TAB axis roundtrips unless first axis",
)
def test_mixed_celest_and_1d_tab_roundtrip():
# Tests WCS roundtripping for the case when there is one -TAB axis and
# this axis is not the first axis. This tests a bug fixed in WCSLIB 7.6.
filename = get_pkg_data_filename("data/tab-time-last-axis.fits")
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
pts = np.random.random((10, 3)) * [[2047, 2047, 127]]
assert np.allclose(pts, w.wcs_world2pix(w.wcs_pix2world(pts, 0), 0))
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="Requires WCSLIB >= 7.8 for swapping -TAB axes to work.",
)
def test_wcstab_swapaxes():
# Crash on deepcopy of swapped -TAB axes reported in #13036.
# Fixed in #13063.
filename = get_pkg_data_filename("data/tab-time-last-axis.fits")
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
w.wcs.ctype[-1] = "FREQ-TAB"
w.wcs.set()
wswp = w.swapaxes(2, 0)
deepcopy(wswp)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="Requires WCSLIB >= 7.8 for swapping -TAB axes to work.",
)
@pytest.mark.xfail(
Version("7.8") <= _WCSLIB_VER < Version("7.10"),
reason="Requires WCSLIB >= 7.10 for swapped -TAB axes to produce same results.",
)
def test_wcstab_swapaxes_same_val_roundtrip():
filename = get_pkg_data_filename("data/tab-time-last-axis.fits")
axes_order = [3, 2, 1]
axes_order0 = [i - 1 for i in axes_order]
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
w.wcs.ctype[-1] = "FREQ-TAB"
w.wcs.set()
ws = w.sub(axes_order)
imcoord = np.array([3, 5, 7])
imcoords = imcoord[axes_order0]
val_ref = w.wcs_pix2world([imcoord], 0)[0]
val_swapped = ws.wcs_pix2world([imcoords], 0)[0]
# check original axis and swapped give same results
assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)
# check round-tripping:
assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)
|
3204afded05b92a2385f6961cd0c2b36879da369793e52640ebd77eb3f416b84 | # Note that we test the main astropy.wcs.WCS class directly rather than testing
# the mix-in class on its own (since it's not functional without being used as
# a mix-in)
import warnings
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal, assert_equal
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import (
FK5,
ICRS,
ITRS,
EarthLocation,
Galactic,
SkyCoord,
SpectralCoord,
StokesCoord,
)
from astropy.io import fits
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import Quantity
from astropy.units.core import UnitsWarning
from astropy.utils import iers
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs._wcs import __version__ as wcsver
from astropy.wcs.wcs import WCS, FITSFixedWarning, NoConvergence, Sip
from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES, custom_ctype_to_ucd_mapping
###############################################################################
# The following example is the simplest WCS with default values
###############################################################################
WCS_EMPTY = WCS(naxis=1)
WCS_EMPTY.wcs.crpix = [1]
def test_empty():
wcs = WCS_EMPTY
# Low-level API
assert wcs.pixel_n_dim == 1
assert wcs.world_n_dim == 1
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [None]
assert wcs.world_axis_units == [""]
assert wcs.pixel_axis_names == [""]
assert wcs.world_axis_names == [""]
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [("world", 0, "value")]
assert wcs.world_axis_object_classes["world"][0] is Quantity
assert wcs.world_axis_object_classes["world"][1] == ()
assert wcs.world_axis_object_classes["world"][2]["unit"] is u.one
assert_allclose(wcs.pixel_to_world_values(29), 29)
assert_allclose(wcs.array_index_to_world_values(29), 29)
assert np.ndim(wcs.pixel_to_world_values(29)) == 0
assert np.ndim(wcs.array_index_to_world_values(29)) == 0
assert_allclose(wcs.world_to_pixel_values(29), 29)
assert_equal(wcs.world_to_array_index_values(29), (29,))
assert np.ndim(wcs.world_to_pixel_values(29)) == 0
assert np.ndim(wcs.world_to_array_index_values(29)) == 0
# High-level API
coord = wcs.pixel_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = wcs.array_index_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = 15 * u.one
x = wcs.world_to_pixel(coord)
assert_allclose(x, 15.0)
assert np.ndim(x) == 0
i = wcs.world_to_array_index(coord)
assert_equal(i, 15)
assert np.ndim(i) == 0
###############################################################################
# The following example is a simple 2D image with celestial coordinates
###############################################################################
HEADER_SIMPLE_CELESTIAL = """
WCSAXES = 2
CTYPE1 = RA---TAN
CTYPE2 = DEC--TAN
CRVAL1 = 10
CRVAL2 = 20
CRPIX1 = 30
CRPIX2 = 40
CDELT1 = -0.1
CDELT2 = 0.1
CROTA2 = 0.
CUNIT1 = deg
CUNIT2 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring(HEADER_SIMPLE_CELESTIAL, sep="\n"))
def test_simple_celestial():
wcs = WCS_SIMPLE_CELESTIAL
# Low-level API
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ["pos.eq.ra", "pos.eq.dec"]
assert wcs.world_axis_units == ["deg", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["", ""]
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20))
assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20))
assert_allclose(wcs.world_to_pixel_values(10, 20), (29.0, 39.0))
assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29))
# High-level API
coord = wcs.pixel_to_world(29, 39)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = wcs.array_index_to_world(39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = SkyCoord(10, 20, unit="deg", frame="icrs")
x, y = wcs.world_to_pixel(coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
i, j = wcs.world_to_array_index(coord)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that if the coordinates are passed in a different frame things still
# work properly
coord_galactic = coord.galactic
x, y = wcs.world_to_pixel(coord_galactic)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
i, j = wcs.world_to_array_index(coord_galactic)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that we can actually index the array
data = np.arange(3600).reshape((60, 60))
coord = SkyCoord(10, 20, unit="deg", frame="icrs")
index = wcs.world_to_array_index(coord)
assert_equal(data[index], 2369)
coord = SkyCoord([10, 12], [20, 22], unit="deg", frame="icrs")
index = wcs.world_to_array_index(coord)
assert_equal(data[index], [2369, 3550])
###############################################################################
# The following example is a spectral cube with axes in an unusual order
###############################################################################
HEADER_SPECTRAL_CUBE = """
WCSAXES = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
def test_spectral_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_SPECTRAL_CUBE
# Low-level API
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
# High-level API
coord, spec = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord, spec = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord = SkyCoord(25, 10, unit="deg", frame="galactic")
spec = 20 * u.Hz
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
x, y, z = wcs.world_to_pixel(coord, spec)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
x, y, z = wcs.world_to_pixel(spec, coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
i, j, k = wcs.world_to_array_index(coord, spec)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
i, j, k = wcs.world_to_array_index(spec, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
HEADER_SPECTRAL_CUBE_NONALIGNED = (
HEADER_SPECTRAL_CUBE.strip()
+ "\n"
+ """
PC2_3 = -0.5
PC3_2 = +0.5
"""
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE_NONALIGNED = WCS(
Header.fromstring(HEADER_SPECTRAL_CUBE_NONALIGNED, sep="\n")
)
def test_spectral_cube_nonaligned():
# Make sure that correlation matrix gets adjusted if there are non-identity
# CD matrix terms.
wcs = WCS_SPECTRAL_CUBE_NONALIGNED
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[
[True, True, True],
[False, True, True],
[True, True, True],
],
)
# NOTE: we check world_axis_object_components and world_axis_object_classes
# again here because in the past this failed when non-aligned axes were
# present, so this serves as a regression test.
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
###############################################################################
# The following example is from Rots et al (2015), Table 5. It represents a
# cube with two spatial dimensions and one time dimension
###############################################################################
HEADER_TIME_CUBE = """
SIMPLE = T / Fits standard
BITPIX = -32 / Bits per pixel
NAXIS = 3 / Number of axes
NAXIS1 = 2048 / Axis length
NAXIS2 = 2048 / Axis length
NAXIS3 = 11 / Axis length
DATE = '2008-10-28T14:39:06' / Date FITS file was generated
OBJECT = '2008 TC3' / Name of the object observed
EXPTIME = 1.0011 / Integration time
MJD-OBS = 54746.02749237 / Obs start
DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date
TELESCOP= 'VISTA' / ESO Telescope Name
INSTRUME= 'VIRCAM' / Instrument used.
TIMESYS = 'UTC' / From Observatory Time System
TREFPOS = 'TOPOCENT' / Topocentric
MJDREF = 54746.0 / Time reference point in MJD
RADESYS = 'ICRS' / Not equinoctal
CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection
CRVAL2 = 2.01824372640628 / RA at ref pixel
CUNIT2 = 'deg' / Angles are degrees always
CRPIX2 = 2956.6 / Pixel coordinate at ref point
CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection
CRVAL1 = 14.8289418840003 / Dec at ref pixel
CUNIT1 = 'deg' / Angles are degrees always
CRPIX1 = -448.2 / Pixel coordinate at ref point
CTYPE3 = 'UTC' / linear time (UTC)
CRVAL3 = 2375.341 / Relative time of first frame
CUNIT3 = 's' / Time unit
CRPIX3 = 1.0 / Pixel coordinate at ref point
CTYPE3A = 'TT' / alternative linear time (TT)
CRVAL3A = 2440.525 / Relative time of first frame
CUNIT3A = 's' / Time unit
CRPIX3A = 1.0 / Pixel coordinate at ref point
OBSGEO-B= -24.6157 / [deg] Tel geodetic latitude (=North)+
OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+
OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid
CRDER3 = 0.0819 / random error in timings from fit
CSYER3 = 0.0100 / absolute time error
PC1_1 = 0.999999971570892 / WCS transform matrix element
PC1_2 = 0.000238449608932 / WCS transform matrix element
PC2_1 = -0.000621542859395 / WCS transform matrix element
PC2_2 = 0.999999806842218 / WCS transform matrix element
CDELT1 = -9.48575432499806E-5 / Axis scale at reference point
CDELT2 = 9.48683176211164E-5 / Axis scale at reference point
CDELT3 = 13.3629 / Axis scale at reference point
PV1_1 = 1. / ZPN linear term
PV1_3 = 42. / ZPN cubic term
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep="\n"))
def test_time_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_TIME_CUBE
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (11, 2048, 2048)
assert wcs.pixel_shape == (2048, 2048, 11)
assert wcs.world_axis_physical_types == ["pos.eq.dec", "pos.eq.ra", "time"]
assert wcs.world_axis_units == ["deg", "deg", "s"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["", "", ""]
assert_equal(
wcs.axis_correlation_matrix,
[[True, True, False], [True, True, False], [False, False, True]],
)
components = wcs.world_axis_object_components
assert components[0] == ("celestial", 1, "spherical.lat.degree")
assert components[1] == ("celestial", 0, "spherical.lon.degree")
assert components[2][:2] == ("time", 0)
assert callable(components[2][2])
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["time"][0] is Time
assert wcs.world_axis_object_classes["time"][1] == ()
assert wcs.world_axis_object_classes["time"][2] == {}
assert callable(wcs.world_axis_object_classes["time"][3])
assert_allclose(
wcs.pixel_to_world_values(-449.2, 2955.6, 0),
(14.8289418840003, 2.01824372640628, 2375.341),
)
assert_allclose(
wcs.array_index_to_world_values(0, 2955.6, -449.2),
(14.8289418840003, 2.01824372640628, 2375.341),
)
assert_allclose(
wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341),
(-449.2, 2955.6, 0),
)
assert_equal(
wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341),
(0, 2956, -449),
)
# High-level API
coord, time = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
coord, time = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
x, y, z = wcs.world_to_pixel(coord, time)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
# Order of world coordinates shouldn't matter
x, y, z = wcs.world_to_pixel(time, coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
i, j, k = wcs.world_to_array_index(coord, time)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
i, j, k = wcs.world_to_array_index(time, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
###############################################################################
# The following tests are to make sure that Time objects are constructed
# correctly for a variety of combinations of WCS keywords
###############################################################################
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
if Version(wcsver) >= Version("7.1"):
HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n"
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep="\n")
def assert_time_at(header, position, jd1, jd2, scale, format):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(position)
assert_allclose(time.jd1, jd1, rtol=1e-10)
assert_allclose(time.jd2, jd2, rtol=1e-10)
assert time.format == format
assert time.scale == scale
@pytest.mark.parametrize(
"scale", ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc", "local")
)
def test_time_1d_values(header_time_1d, scale):
# Check that Time objects are instantiated with the correct values,
# scales, and formats.
header_time_1d["CTYPE1"] = scale.upper()
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, "mjd")
def test_time_1d_values_gps(header_time_1d):
# Special treatment for GPS scale
header_time_1d["CTYPE1"] = "GPS"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, "tai", "mjd")
def test_time_1d_values_deprecated(header_time_1d):
# Deprecated (in FITS) scales
header_time_1d["CTYPE1"] = "TDT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tt", "mjd")
header_time_1d["CTYPE1"] = "IAT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tai", "mjd")
header_time_1d["CTYPE1"] = "GMT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "utc", "mjd")
header_time_1d["CTYPE1"] = "ET"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tt", "mjd")
def test_time_1d_values_time(header_time_1d):
header_time_1d["CTYPE1"] = "TIME"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "utc", "mjd")
header_time_1d["TIMESYS"] = "TAI"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tai", "mjd")
@pytest.mark.remote_data
@pytest.mark.parametrize("scale", ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc"))
def test_time_1d_roundtrip(header_time_1d, scale):
# Check that coordinates round-trip
pixel_in = np.arange(3, 10)
header_time_1d["CTYPE1"] = scale.upper()
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
# Simple test
time = wcs.pixel_to_world(pixel_in)
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
# Test with an intermediate change to a different scale/format
time = wcs.pixel_to_world(pixel_in).tdb
time.format = "isot"
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
def test_time_1d_high_precision(header_time_1d):
# Case where the MJDREF is split into two for high precision
del header_time_1d["MJDREF"]
header_time_1d["MJDREFI"] = 52000.0
header_time_1d["MJDREFF"] = 1e-11
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
# Here we have to use a very small rtol to really test that MJDREFF is
# taken into account
assert_allclose(time.jd1, 2452001.0, rtol=1e-12)
assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13)
def test_time_1d_location_geodetic(header_time_1d):
# Make sure that the location is correctly returned (geodetic case)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
lon, lat, alt = time.location.to_geodetic()
# FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976
# ellipsoid (https://github.com/astropy/astropy/issues/9420)
assert_allclose(lon.degree, -20)
assert_allclose(lat.degree, -70)
# assert_allclose(alt.to_value(u.m), 2530.)
@pytest.fixture
def header_time_1d_no_obs():
header = Header.fromstring(HEADER_TIME_1D, sep="\n")
del header["OBSGEO-L"]
del header["OBSGEO-B"]
del header["OBSGEO-H"]
return header
def test_time_1d_location_geocentric(header_time_1d_no_obs):
# Make sure that the location is correctly returned (geocentric case)
header = header_time_1d_no_obs
header["OBSGEO-X"] = 10
header["OBSGEO-Y"] = -20
header["OBSGEO-Z"] = 30
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 10)
assert_allclose(y.to_value(u.m), -20)
assert_allclose(z.to_value(u.m), 30)
def test_time_1d_location_geocenter(header_time_1d_no_obs):
header_time_1d_no_obs["TREFPOS"] = "GEOCENTER"
wcs = WCS(header_time_1d_no_obs)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 0)
assert_allclose(y.to_value(u.m), 0)
assert_allclose(z.to_value(u.m), 0)
def test_time_1d_location_missing(header_time_1d_no_obs):
# Check what happens when no location is present
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Missing or incomplete observer location "
"information, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_incomplete(header_time_1d_no_obs):
# Check what happens when location information is incomplete
header_time_1d_no_obs["OBSGEO-L"] = 10.0
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Missing or incomplete observer location "
"information, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_unsupported(header_time_1d_no_obs):
# Check what happens when TREFPOS is unsupported
header_time_1d_no_obs["TREFPOS"] = "BARYCENTER"
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Observation location 'barycenter' is not "
"supported, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_unsupported_ctype(header_time_1d_no_obs):
# For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale
# Case where the MJDREF is split into two for high precision
header_time_1d_no_obs["CTYPE1"] = "UT(WWV)"
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"
):
time = wcs.pixel_to_world(10)
assert isinstance(time, Time)
###############################################################################
# Extra corner cases
###############################################################################
def test_unrecognized_unit():
# TODO: Determine whether the following behavior is desirable
wcs = WCS(naxis=1)
with pytest.warns(UnitsWarning):
wcs.wcs.cunit = ["bananas // sekonds"]
assert wcs.world_axis_units == ["bananas // sekonds"]
def test_distortion_correlations():
filename = get_pkg_data_filename("../../tests/data/sip.fits")
with pytest.warns(FITSFixedWarning):
w = WCS(filename)
assert_equal(w.axis_correlation_matrix, True)
# Changing PC to an identity matrix doesn't change anything since
# distortions are still present.
w.wcs.pc = [[1, 0], [0, 1]]
assert_equal(w.axis_correlation_matrix, True)
# Nor does changing the name of the axes to make them non-celestial
w.wcs.ctype = ["X", "Y"]
assert_equal(w.axis_correlation_matrix, True)
# However once we turn off the distortions the matrix changes
w.sip = None
assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]])
# If we go back to celestial coordinates then the matrix is all True again
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
assert_equal(w.axis_correlation_matrix, True)
# Or if we change to X/Y but have a non-identity PC
w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]]
w.wcs.ctype = ["X", "Y"]
assert_equal(w.axis_correlation_matrix, True)
def test_custom_ctype_to_ucd_mappings():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["SPAM"]
assert wcs.world_axis_physical_types == [None]
# Check simple behavior
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
assert wcs.world_axis_physical_types == [None]
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit", "SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
# Check nesting
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
assert wcs.world_axis_physical_types == ["food.spam"]
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
# Check priority in nesting
with custom_ctype_to_ucd_mapping({"SPAM": "notfood"}):
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
with custom_ctype_to_ucd_mapping({"SPAM": "notfood"}):
assert wcs.world_axis_physical_types == ["notfood"]
def test_caching_components_and_classes():
# Make sure that when we change the WCS object, the classes and components
# are updated (we use a cache internally, so we need to make sure the cache
# is invalidated if needed)
wcs = WCS_SIMPLE_CELESTIAL.deepcopy()
assert wcs.world_axis_object_components == [
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
wcs.wcs.radesys = "FK5"
frame = wcs.world_axis_object_classes["celestial"][2]["frame"]
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2000.0
wcs.wcs.equinox = 2010
frame = wcs.world_axis_object_classes["celestial"][2]["frame"]
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2010.0
def test_sub_wcsapi_attributes():
# Regression test for a bug that caused some of the WCS attributes to be
# incorrect when using WCS.sub or WCS.celestial (which is an alias for sub
# with lon/lat types).
wcs = WCS_SPECTRAL_CUBE.deepcopy()
wcs.pixel_shape = (30, 40, 50)
wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
# Use celestial shortcut
wcs_sub1 = wcs.celestial
assert wcs_sub1.pixel_n_dim == 2
assert wcs_sub1.world_n_dim == 2
assert wcs_sub1.array_shape == (50, 30)
assert wcs_sub1.pixel_shape == (30, 50)
assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)]
assert wcs_sub1.world_axis_physical_types == [
"pos.galactic.lat",
"pos.galactic.lon",
]
assert wcs_sub1.world_axis_units == ["deg", "deg"]
assert wcs_sub1.world_axis_names == ["Latitude", "Longitude"]
# Try adding axes
wcs_sub2 = wcs.sub([0, 2, 0])
assert wcs_sub2.pixel_n_dim == 3
assert wcs_sub2.world_n_dim == 3
assert wcs_sub2.array_shape == (None, 40, None)
assert wcs_sub2.pixel_shape == (None, 40, None)
assert wcs_sub2.pixel_bounds == [None, (-2, 18), None]
assert wcs_sub2.world_axis_physical_types == [None, "em.freq", None]
assert wcs_sub2.world_axis_units == ["", "Hz", ""]
assert wcs_sub2.world_axis_names == ["", "Frequency", ""]
# Use strings
wcs_sub3 = wcs.sub(["longitude", "latitude"])
assert wcs_sub3.pixel_n_dim == 2
assert wcs_sub3.world_n_dim == 2
assert wcs_sub3.array_shape == (30, 50)
assert wcs_sub3.pixel_shape == (50, 30)
assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub3.world_axis_physical_types == [
"pos.galactic.lon",
"pos.galactic.lat",
]
assert wcs_sub3.world_axis_units == ["deg", "deg"]
assert wcs_sub3.world_axis_names == ["Longitude", "Latitude"]
# Now try without CNAME set
wcs.wcs.cname = [""] * wcs.wcs.naxis
wcs_sub4 = wcs.sub(["longitude", "latitude"])
assert wcs_sub4.pixel_n_dim == 2
assert wcs_sub4.world_n_dim == 2
assert wcs_sub4.array_shape == (30, 50)
assert wcs_sub4.pixel_shape == (50, 30)
assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub4.world_axis_physical_types == [
"pos.galactic.lon",
"pos.galactic.lat",
]
assert wcs_sub4.world_axis_units == ["deg", "deg"]
assert wcs_sub4.world_axis_names == ["", ""]
###############################################################################
# Spectral transformations
###############################################################################
HEADER_SPECTRAL_FRAMES = """
BUNIT = 'Jy/beam'
EQUINOX = 2.000000000E+03
CTYPE1 = 'RA---SIN'
CRVAL1 = 2.60108333333E+02
CDELT1 = -2.777777845E-04
CRPIX1 = 1.0
CUNIT1 = 'deg'
CTYPE2 = 'DEC--SIN'
CRVAL2 = -9.75000000000E-01
CDELT2 = 2.777777845E-04
CRPIX2 = 1.0
CUNIT2 = 'deg'
CTYPE3 = 'FREQ'
CRVAL3 = 1.37835117405E+09
CDELT3 = 9.765625000E+04
CRPIX3 = 32.0
CUNIT3 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_frames():
return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep="\n")
def test_spectralcoord_frame(header_spectral_frames):
# This is a test to check the numerical results of transformations between
# different velocity frames. We simply make sure that the returned
# SpectralCoords are in the right frame but don't check the transformations
# since this is already done in test_spectralcoord_accuracy
# in astropy.coordinates.
with iers.conf.set_temp("auto_download", False):
obstime = Time("2009-05-04T04:44:23", scale="utc")
header = header_spectral_frames.copy()
header["MJD-OBS"] = obstime.mjd
header["CRVAL1"] = 16.33211
header["CRVAL2"] = -34.2221
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
# We start off with a WCS defined in topocentric frequency
with pytest.warns(FITSFixedWarning):
wcs_topo = WCS(header)
# We convert a single pixel coordinate to world coordinates and keep only
# the second high level object - a SpectralCoord:
sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1]
# We check that this is in topocentric frame with zero velocities
assert isinstance(sc_topo, SpectralCoord)
assert isinstance(sc_topo.observer, ITRS)
assert sc_topo.observer.obstime.isot == obstime.isot
assert_equal(sc_topo.observer.data.differentials["s"].d_xyz.value, 0)
observatory = (
EarthLocation.from_geodetic(144.2, -20.2)
.get_itrs(obstime=obstime)
.transform_to(ICRS())
)
assert (
observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km
)
for specsys, expected_frame in VELOCITY_FRAMES.items():
header["SPECSYS"] = specsys
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
sc = wcs.pixel_to_world(0, 0, 31)[1]
# Now transform to the expected velocity frame, which should leave
# the spectral coordinate unchanged
sc_check = sc.with_observer_stationary_relative_to(expected_frame)
assert_quantity_allclose(sc.quantity, sc_check.quantity)
@pytest.mark.parametrize(
("ctype3", "observer"),
product(["ZOPT", "BETA", "VELO", "VRAD", "VOPT"], [False, True]),
)
def test_different_ctypes(header_spectral_frames, ctype3, observer):
header = header_spectral_frames.copy()
header["CTYPE3"] = ctype3
header["CRVAL3"] = 0.1
header["CDELT3"] = 0.001
if ctype3[0] == "V":
header["CUNIT3"] = "m s-1"
else:
header["CUNIT3"] = ""
header["RESTWAV"] = 1.420405752e09
header["MJD-OBS"] = 55197
if observer:
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
header["SPECSYS"] = "BARYCENT"
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31)
assert isinstance(spectralcoord, SpectralCoord)
if observer:
pix = wcs.world_to_pixel(skycoord, spectralcoord)
else:
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
pix = wcs.world_to_pixel(skycoord, spectralcoord)
assert_allclose(pix, [0, 0, 31], rtol=1e-6, atol=1e-9)
def test_non_convergence_warning():
"""Test case for issue #11446
Since we can't define a target accuracy when plotting a WCS `all_world2pix`
should not error but only warn when the default accuracy can't be reached.
"""
# define a minimal WCS where convergence fails for certain image positions
wcs = WCS(naxis=2)
crpix = [0, 0]
a = b = ap = bp = np.zeros((4, 4))
a[3, 0] = -1.20116753e-07
test_pos_x = [1000, 1]
test_pos_y = [0, 2]
wcs.sip = Sip(a, b, ap, bp, crpix)
# first make sure the WCS works when using a low accuracy
expected = wcs.all_world2pix(test_pos_x, test_pos_y, 0, tolerance=1e-3)
# then check that it fails when using the default accuracy
with pytest.raises(NoConvergence):
wcs.all_world2pix(test_pos_x, test_pos_y, 0)
# at last check that world_to_pixel_values raises a warning but returns
# the same 'low accuray' result
with pytest.warns(UserWarning):
assert_allclose(wcs.world_to_pixel_values(test_pos_x, test_pos_y), expected)
HEADER_SPECTRAL_1D = """
CTYPE1 = 'FREQ'
CRVAL1 = 1.37835117405E+09
CDELT1 = 9.765625000E+04
CRPIX1 = 32.0
CUNIT1 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_1d():
return Header.fromstring(HEADER_SPECTRAL_1D, sep="\n")
@pytest.mark.parametrize(
("ctype1", "observer"),
product(["ZOPT", "BETA", "VELO", "VRAD", "VOPT"], [False, True]),
)
def test_spectral_1d(header_spectral_1d, ctype1, observer):
# This is a regression test for issues that happened with 1-d WCS
# where the target is not defined but observer is.
header = header_spectral_1d.copy()
header["CTYPE1"] = ctype1
header["CRVAL1"] = 0.1
header["CDELT1"] = 0.001
if ctype1[0] == "V":
header["CUNIT1"] = "m s-1"
else:
header["CUNIT1"] = ""
header["RESTWAV"] = 1.420405752e09
header["MJD-OBS"] = 55197
if observer:
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
header["SPECSYS"] = "BARYCENT"
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
# First ensure that transformations round-trip
spectralcoord = wcs.pixel_to_world(31)
assert isinstance(spectralcoord, SpectralCoord)
assert spectralcoord.target is None
assert (spectralcoord.observer is not None) is observer
if observer:
expected_message = "No target defined on SpectralCoord"
else:
expected_message = "No observer defined on WCS"
with pytest.warns(AstropyUserWarning, match=expected_message):
pix = wcs.world_to_pixel(spectralcoord)
assert_allclose(pix, [31], rtol=1e-6)
# Also make sure that we can convert a SpectralCoord on which the observer
# is not defined but the target is.
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spectralcoord_no_obs = SpectralCoord(
spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc),
)
if observer:
expected_message = "No observer defined on SpectralCoord"
else:
expected_message = "No observer defined on WCS"
with pytest.warns(AstropyUserWarning, match=expected_message):
pix2 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix2, [31], rtol=1e-6)
# And finally check case when both observer and target are defined on the
# SpectralCoord
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spectralcoord_no_obs = SpectralCoord(
spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
observer=ICRS(10 * u.deg, 20 * u.deg, distance=0 * u.kpc),
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc),
)
if observer:
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
else:
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix3, [31], rtol=1e-6)
HEADER_SPECTRAL_WITH_TIME = """
WCSAXES = 3
CTYPE1 = 'RA---TAN'
CTYPE2 = 'DEC--TAN'
CTYPE3 = 'WAVE'
CRVAL1 = 98.83153
CRVAL2 = -66.818
CRVAL3 = 6.4205
CRPIX1 = 21.
CRPIX2 = 22.
CRPIX3 = 1.
CDELT1 = 3.6111E-05
CDELT2 = 3.6111E-05
CDELT3 = 0.001
CUNIT1 = 'deg'
CUNIT2 = 'deg'
CUNIT3 = 'um'
MJD-AVG = 59045.41466
RADESYS = 'ICRS'
SPECSYS = 'BARYCENT'
TIMESYS = 'UTC'
"""
@pytest.fixture
def header_spectral_with_time():
return Header.fromstring(HEADER_SPECTRAL_WITH_TIME, sep="\n")
def test_spectral_with_time_kw(header_spectral_with_time):
def check_wcs(header):
assert_allclose(w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval)
sky, spec = w.pixel_to_world(*w.wcs.crpix)
assert_allclose(
(sky.spherical.lon.degree, sky.spherical.lat.degree, spec.value),
w.wcs.crval,
rtol=1e-3,
)
# Check with MJD-AVG and TIMESYS
hdr = header_spectral_with_time.copy()
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
# Make sure the correct keyword is used in a test
assert ~np.isnan(w.wcs.mjdavg)
assert np.isnan(w.wcs.mjdobs)
check_wcs(w)
# Check fall back to MJD-OBS
hdr["MJD-OBS"] = hdr["MJD-AVG"]
del hdr["MJD-AVG"]
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
# Make sure the correct keyword is used in a test
assert ~np.isnan(w.wcs.mjdobs)
assert np.isnan(w.wcs.mjdavg)
check_wcs(w)
# Check fall back to DATE--OBS
hdr["DATE-OBS"] = "2020-07-15"
del hdr["MJD-OBS"]
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
w.wcs.mjdobs = np.nan
# Make sure the correct keyword is used in a test
assert np.isnan(w.wcs.mjdobs)
assert np.isnan(w.wcs.mjdavg)
assert w.wcs.dateobs != ""
check_wcs(hdr)
# Check fall back to scale='utc'
del hdr["TIMESYS"]
check_wcs(hdr)
def test_fits_tab_time_and_units():
"""
This test is a regression test for https://github.com/astropy/astropy/issues/12095
It checks the following:
- If a spatial WCS isn't converted to units of deg by wcslib it still works.
- If TIMESYS is upper case we parse it correctly
- If a TIME CTYPE key has an algorithm code (in this case -TAB) it still works.
The file used here was generated by gWCS and then edited to add the TIMESYS key.
"""
with fits.open(
get_pkg_data_filename("data/example_4d_tab.fits")
) as hdul, pytest.warns(FITSFixedWarning):
w = WCS(header=hdul[0].header, fobj=hdul)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=r".*dubious year \(Note \d\)")
world = w.pixel_to_world(0, 0, 0, 0)
assert isinstance(world[0], SkyCoord)
assert world[0].data.lat.unit is u.arcsec
assert world[0].data.lon.unit is u.arcsec
assert u.allclose(world[0].l, 0.06475506 * u.deg)
assert u.allclose(world[0].b, -0.02430561 * u.deg)
assert isinstance(world[1], SpectralCoord)
assert u.allclose(world[1], 24.96 * u.Hz)
assert isinstance(world[2], Time)
assert world[2].scale == "utc"
assert u.allclose(world[2].mjd, 0.00032986111111110716)
################################################################################
# Tests with Stokes
################################################################################
HEADER_POLARIZED = """
CTYPE1 = 'HPLT-TAN'
CTYPE2 = 'HPLN-TAN'
CTYPE3 = 'STOKES'
"""
@pytest.fixture
def header_polarized():
return Header.fromstring(HEADER_POLARIZED, sep="\n")
@pytest.fixture()
def wcs_polarized(header_polarized):
return WCS(header_polarized)
def test_phys_type_polarization(wcs_polarized):
w = wcs_polarized
assert w.world_axis_physical_types[2] == "phys.polarization.stokes"
def test_pixel_to_world_stokes(wcs_polarized):
w = wcs_polarized
world = w.pixel_to_world(0, 0, 0)
assert world[2] == 1
assert isinstance(world[2], StokesCoord)
assert_equal(world[2].symbol, "I")
world = w.pixel_to_world(0, 0, [0, 1, 2, 3])
assert isinstance(world[2], StokesCoord)
assert_array_equal(world[2], [1, 2, 3, 4])
assert_array_equal(world[2].symbol, ["I", "Q", "U", "V"])
|
1aaf60094ad51c907af050b9964e18c626a35e8cbfe5679ec71e89f3b2784f64 | import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.coordinates import ICRS, Galactic, SkyCoord
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.time import Time
from astropy.units import Quantity
from astropy.wcs.wcs import WCS, FITSFixedWarning
from astropy.wcs.wcsapi.wrappers.sliced_wcs import (
SlicedLowLevelWCS,
combine_slices,
sanitize_slices,
)
# To test the slicing we start off from standard FITS WCS
# objects since those implement the low-level API. We create
# a WCS for a spectral cube with axes in non-standard order
# and with correlated celestial axes and an uncorrelated
# spectral axis.
HEADER_SPECTRAL_CUBE = """
NAXIS = 3
NAXIS1 = 10
NAXIS2 = 20
NAXIS3 = 30
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
WCS_SPECTRAL_CUBE.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
def test_invalid_slices():
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, [False, False, False]])
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, slice(None, None, 2)])
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, 1000.100])
@pytest.mark.parametrize(
"item, ndim, expected",
(
([Ellipsis, 10], 4, [slice(None)] * 3 + [10]),
([10, slice(20, 30)], 5, [10, slice(20, 30)] + [slice(None)] * 3),
([10, Ellipsis, 8], 10, [10] + [slice(None)] * 8 + [8]),
),
)
def test_sanitize_slice(item, ndim, expected):
new_item = sanitize_slices(item, ndim)
# FIXME: do we still need the first two since the third assert
# should cover it all?
assert len(new_item) == ndim
assert all(isinstance(i, (slice, int)) for i in new_item)
assert new_item == expected
EXPECTED_ELLIPSIS_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_ELLIPSIS_REPR.strip()
assert EXPECTED_ELLIPSIS_REPR.strip() in repr(wcs)
def test_pixel_to_world_broadcasting():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert_allclose(
wcs.pixel_to_world_values((29, 29), 39, 44), ((10, 10), (20, 20), (25, 25))
)
def test_world_to_pixel_broadcasting():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert_allclose(
wcs.world_to_pixel_values((10, 10), 20, 25),
((29.0, 29.0), (39.0, 39.0), (44.0, 44.0)),
)
EXPECTED_SPECTRAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 2 world dimensions
Array shape (Numpy order): (30, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 yes yes
1 yes yes
"""
def test_spectral_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), 10])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape == (30, 10)
assert wcs.pixel_shape == (10, 30)
assert wcs.world_axis_physical_types == ["pos.galactic.lat", "pos.galactic.lon"]
assert wcs.world_axis_units == ["deg", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["Latitude", "Longitude"]
assert_equal(wcs.axis_correlation_matrix, [[True, True], [True, True]])
assert wcs.world_axis_object_components == [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert_allclose(wcs.pixel_to_world_values(29, 44), (10, 25))
assert_allclose(wcs.array_index_to_world_values(44, 29), (10, 25))
assert_allclose(wcs.world_to_pixel_values(10, 25), (29.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 25), (44, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (5, 15)])
assert str(wcs) == EXPECTED_SPECTRAL_SLICE_REPR.strip()
assert EXPECTED_SPECTRAL_SLICE_REPR.strip() in repr(wcs)
EXPECTED_SPECTRAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 6, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 6 (-6, 14)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_spectral_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), slice(4, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 6, 10)
assert wcs.pixel_shape == (10, 6, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 35, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 35, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 35.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 35, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-6, 14), (5, 15)])
assert str(wcs) == EXPECTED_SPECTRAL_RANGE_REPR.strip()
assert EXPECTED_SPECTRAL_RANGE_REPR.strip() in repr(wcs)
EXPECTED_CELESTIAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20)
Pixel Dim Axis Name Data size Bounds
0 None 20 (-2, 18)
1 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 no yes
1 yes no
2 no yes
"""
def test_celestial_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, 5])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20)
assert wcs.pixel_shape == (20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix, [[False, True], [True, False], [False, True]]
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(39, 44), (12.4, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39), (12.4, 20, 25))
assert_allclose(wcs.world_to_pixel_values(12.4, 20, 25), (39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(12.4, 20, 25), (44, 39))
assert_equal(wcs.pixel_bounds, [(-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_SLICE_REPR.strip()
assert EXPECTED_CELESTIAL_SLICE_REPR.strip() in repr(wcs)
EXPECTED_CELESTIAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Axis Name Data size Bounds
0 None 5 (-6, 6)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(24, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 24), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (24.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 24))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_RANGE_REPR.strip()
assert EXPECTED_CELESTIAL_RANGE_REPR.strip() in repr(wcs)
# Now try with a 90 degree rotation
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE_ROT = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
WCS_SPECTRAL_CUBE_ROT.wcs.pc = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
WCS_SPECTRAL_CUBE_ROT.wcs.crval[0] = 0
WCS_SPECTRAL_CUBE_ROT.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_CELESTIAL_RANGE_ROT_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Axis Name Data size Bounds
0 None 5 (-6, 6)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range_rot():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_ROT, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(14, 29, 34), (1, 15, 24))
assert_allclose(wcs.array_index_to_world_values(34, 29, 14), (1, 15, 24))
assert_allclose(wcs.world_to_pixel_values(1, 15, 24), (14.0, 29.0, 34.0))
assert_equal(wcs.world_to_array_index_values(1, 15, 24), (34, 29, 14))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_RANGE_ROT_REPR.strip()
assert EXPECTED_CELESTIAL_RANGE_ROT_REPR.strip() in repr(wcs)
HEADER_NO_SHAPE_CUBE = """
NAXIS = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_NO_SHAPE_CUBE = WCS(Header.fromstring(HEADER_NO_SHAPE_CUBE, sep="\n"))
EXPECTED_NO_SHAPE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): None
Pixel Dim Axis Name Data size Bounds
0 None None None
1 None None None
2 None None None
World Dim Axis Name Physical Type Units
0 None pos.galactic.lat deg
1 None em.freq Hz
2 None pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_no_array_shape():
wcs = SlicedLowLevelWCS(WCS_NO_SHAPE_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert str(wcs) == EXPECTED_NO_SHAPE_REPR.strip()
assert EXPECTED_NO_SHAPE_REPR.strip() in repr(wcs)
# Testing the WCS object having some physical types as None/Unknown
HEADER_SPECTRAL_CUBE_NONE_TYPES = {
"CTYPE1": "GLAT-CAR",
"CUNIT1": "deg",
"CDELT1": -0.1,
"CRPIX1": 30,
"CRVAL1": 10,
"NAXIS1": 10,
"CTYPE2": "",
"CUNIT2": "Hz",
"CDELT2": 0.5,
"CRPIX2": 40,
"CRVAL2": 20,
"NAXIS2": 20,
"CTYPE3": "GLON-CAR",
"CUNIT3": "deg",
"CDELT3": 0.1,
"CRPIX3": 45,
"CRVAL3": 25,
"NAXIS3": 30,
}
WCS_SPECTRAL_CUBE_NONE_TYPES = WCS(header=HEADER_SPECTRAL_CUBE_NONE_TYPES)
WCS_SPECTRAL_CUBE_NONE_TYPES.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_ELLIPSIS_REPR_NONE_TYPES = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 None pos.galactic.lat deg
1 None None Hz
2 None pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis_none_types():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_NONE_TYPES, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
None,
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert wcs.world_axis_object_components == [
("celestial", 1, "spherical.lat.degree"),
("world", 0, "value"),
("celestial", 0, "spherical.lon.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_ELLIPSIS_REPR_NONE_TYPES.strip()
assert EXPECTED_ELLIPSIS_REPR_NONE_TYPES.strip() in repr(wcs)
CASES = [
(slice(None), slice(None), slice(None)),
(slice(None), slice(3, None), slice(3, None)),
(slice(None), slice(None, 16), slice(None, 16)),
(slice(None), slice(3, 16), slice(3, 16)),
(slice(2, None), slice(None), slice(2, None)),
(slice(2, None), slice(3, None), slice(5, None)),
(slice(2, None), slice(None, 16), slice(2, 18)),
(slice(2, None), slice(3, 16), slice(5, 18)),
(slice(None, 10), slice(None), slice(None, 10)),
(slice(None, 10), slice(3, None), slice(3, 10)),
(slice(None, 10), slice(None, 16), slice(None, 10)),
(slice(None, 10), slice(3, 16), slice(3, 10)),
(slice(2, 10), slice(None), slice(2, 10)),
(slice(2, 10), slice(3, None), slice(5, 10)),
(slice(2, 10), slice(None, 16), slice(2, 10)),
(slice(2, 10), slice(3, 16), slice(5, 10)),
(slice(None), 3, 3),
(slice(2, None), 3, 5),
(slice(None, 10), 3, 3),
(slice(2, 10), 3, 5),
]
@pytest.mark.parametrize(("slice1", "slice2", "expected"), CASES)
def test_combine_slices(slice1, slice2, expected):
assert combine_slices(slice1, slice2) == expected
def test_nested_slicing():
# Make sure that if we call slicing several times, the result is the same
# as calling the slicing once with the final slice settings.
wcs = WCS_SPECTRAL_CUBE
sub1 = SlicedLowLevelWCS(
SlicedLowLevelWCS(
SlicedLowLevelWCS(wcs, [slice(None), slice(1, 10), slice(None)]),
[3, slice(2, None)],
),
[slice(None), slice(2, 8)],
)
sub2 = wcs[3, 3:10, 2:8]
assert_allclose(sub1.pixel_to_world_values(3, 5), sub2.pixel_to_world_values(3, 5))
assert not isinstance(sub1._wcs, SlicedLowLevelWCS)
def test_too_much_slicing():
wcs = WCS_SPECTRAL_CUBE
with pytest.raises(
ValueError,
match=(
"Cannot slice WCS: the resulting WCS "
"should have at least one pixel and "
"one world dimension"
),
):
wcs[0, 1, 2]
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep="\n")
@pytest.fixture
def time_1d_wcs(header_time_1d):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
return WCS(header_time_1d)
def test_1d_sliced_low_level(time_1d_wcs):
sll = SlicedLowLevelWCS(time_1d_wcs, np.s_[10:20])
world = sll.pixel_to_world_values([1, 2])
assert isinstance(world, np.ndarray)
assert np.allclose(world, [27, 29])
def validate_info_dict(result, expected):
result_value = result.pop("value")
expected_value = expected.pop("value")
np.testing.assert_allclose(result_value, expected_value)
assert result == expected
def test_dropped_dimensions():
wcs = WCS_SPECTRAL_CUBE
sub = SlicedLowLevelWCS(wcs, np.s_[:, :, :])
assert sub.dropped_world_dimensions == {}
sub = SlicedLowLevelWCS(wcs, np.s_[:, 2:5, :])
assert sub.dropped_world_dimensions == {}
sub = SlicedLowLevelWCS(wcs, np.s_[:, 0])
waocomp = sub.dropped_world_dimensions.pop("world_axis_object_components")
assert len(waocomp) == 1 and waocomp[0][0] == "spectral" and waocomp[0][1] == 0
waocls = sub.dropped_world_dimensions.pop("world_axis_object_classes")
assert (
len(waocls) == 1
and "spectral" in waocls
and waocls["spectral"][0] == u.Quantity
)
validate_info_dict(
sub.dropped_world_dimensions,
{
"value": [0.5],
"world_axis_physical_types": ["em.freq"],
"world_axis_names": ["Frequency"],
"world_axis_units": ["Hz"],
"serialized_classes": False,
},
)
sub = SlicedLowLevelWCS(wcs, np.s_[:, 0, 0])
waocomp = sub.dropped_world_dimensions.pop("world_axis_object_components")
assert len(waocomp) == 1 and waocomp[0][0] == "spectral" and waocomp[0][1] == 0
waocls = sub.dropped_world_dimensions.pop("world_axis_object_classes")
assert (
len(waocls) == 1
and "spectral" in waocls
and waocls["spectral"][0] == u.Quantity
)
validate_info_dict(
sub.dropped_world_dimensions,
{
"value": [0.5],
"world_axis_physical_types": ["em.freq"],
"world_axis_names": ["Frequency"],
"world_axis_units": ["Hz"],
"serialized_classes": False,
},
)
sub = SlicedLowLevelWCS(wcs, np.s_[0, :, 0])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
validate_info_dict(
dwd,
{
"value": [12.86995801, 20.49217541],
"world_axis_physical_types": ["pos.galactic.lat", "pos.galactic.lon"],
"world_axis_names": ["Latitude", "Longitude"],
"world_axis_units": ["deg", "deg"],
"serialized_classes": False,
"world_axis_object_components": [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
],
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], Galactic)
assert wao_classes["celestial"][2]["unit"] == (u.deg, u.deg)
sub = SlicedLowLevelWCS(wcs, np.s_[5, :5, 12])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
validate_info_dict(
dwd,
{
"value": [11.67648267, 21.01921192],
"world_axis_physical_types": ["pos.galactic.lat", "pos.galactic.lon"],
"world_axis_names": ["Latitude", "Longitude"],
"world_axis_units": ["deg", "deg"],
"serialized_classes": False,
"world_axis_object_components": [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
],
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], Galactic)
assert wao_classes["celestial"][2]["unit"] == (u.deg, u.deg)
def test_dropped_dimensions_4d(cube_4d_fitswcs):
sub = SlicedLowLevelWCS(cube_4d_fitswcs, np.s_[:, 12, 5, 5])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
wao_components = dwd.pop("world_axis_object_components")
validate_info_dict(
dwd,
{
"value": [4.0e00, -2.0e00, 1.0e10],
"world_axis_physical_types": ["pos.eq.ra", "pos.eq.dec", "em.freq"],
"world_axis_names": ["Right Ascension", "Declination", "Frequency"],
"world_axis_units": ["deg", "deg", "Hz"],
"serialized_classes": False,
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], ICRS)
assert wao_classes["celestial"][2]["unit"] == (u.deg, u.deg)
assert wao_classes["spectral"][0:3] == (u.Quantity, (), {})
assert wao_components[0] == ("celestial", 0, "spherical.lon.degree")
assert wao_components[1] == ("celestial", 1, "spherical.lat.degree")
assert wao_components[2][0:2] == ("spectral", 0)
sub = SlicedLowLevelWCS(cube_4d_fitswcs, np.s_[12, 12])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
wao_components = dwd.pop("world_axis_object_components")
validate_info_dict(
dwd,
{
"value": [1.0e10, 5.0e00],
"world_axis_physical_types": ["em.freq", "time"],
"world_axis_names": ["Frequency", "Time"],
"world_axis_units": ["Hz", "s"],
"serialized_classes": False,
},
)
assert wao_components[0][0:2] == ("spectral", 0)
assert wao_components[1][0] == "time"
assert wao_components[1][1] == 0
assert wao_classes["spectral"][0:3] == (u.Quantity, (), {})
assert wao_classes["time"][0:3] == (Time, (), {})
def test_pixel_to_world_values_different_int_types():
int_sliced = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, np.s_[:, 0, :])
np64_sliced = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, np.s_[:, np.int64(0), :])
pixel_arrays = ([0, 1], [0, 1])
for int_coord, np64_coord in zip(
int_sliced.pixel_to_world_values(*pixel_arrays),
np64_sliced.pixel_to_world_values(*pixel_arrays),
):
assert all(int_coord == np64_coord)
COUPLED_WCS_HEADER = {
"WCSAXES": 3,
"CRPIX1": (100 + 1) / 2,
"CRPIX2": (25 + 1) / 2,
"CRPIX3": 1.0,
"PC1_1": 0.0,
"PC1_2": -1.0,
"PC1_3": 0.0,
"PC2_1": 1.0,
"PC2_2": 0.0,
"PC2_3": -1.0,
"CDELT1": 5,
"CDELT2": 5,
"CDELT3": 0.055,
"CUNIT1": "arcsec",
"CUNIT2": "arcsec",
"CUNIT3": "Angstrom",
"CTYPE1": "HPLN-TAN",
"CTYPE2": "HPLT-TAN",
"CTYPE3": "WAVE",
"CRVAL1": 0.0,
"CRVAL2": 0.0,
"CRVAL3": 1.05,
}
def test_coupled_world_slicing():
fits_wcs = WCS(header=COUPLED_WCS_HEADER)
sl = SlicedLowLevelWCS(fits_wcs, 0)
world = fits_wcs.pixel_to_world_values(0, 0, 0)
out_pix = sl.world_to_pixel_values(world[0], world[1])
assert np.allclose(out_pix[0], 0)
|
01acb7d967eb038a305776a5028e0b1dec556e1bbee36cb2baec0de27336951f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import datetime
import functools
import os
from copy import deepcopy
from decimal import Decimal, localcontext
from io import StringIO
import erfa
import numpy as np
import pytest
from erfa import ErfaWarning
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, Table
from astropy.time import (
STANDARD_TIME_SCALES,
TIME_FORMATS,
AstropyDatetimeLeapSecondWarning,
ScaleValueError,
Time,
TimeDelta,
TimeString,
TimezoneInfo,
conf,
)
from astropy.utils import iers, isiterable
from astropy.utils.compat.optional_deps import HAS_H5PY, HAS_PYTZ
from astropy.utils.exceptions import AstropyDeprecationWarning
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps * 24 * 3600
)
allclose_year = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=0.0
) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t = Time(times, format="iso", scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2, np.array([-0.5 + 1.4288980208333335e-06, -0.50000000e00])
)
# Set scale to TAI
t = t.tai
assert (
repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2,
np.array([-0.5 + 0.00037179926839122024, -0.5 + 0.00039351851851851852]),
)
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (
repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>"
)
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(
t.cxcsec, np.array([31536064.307456788, 378691266.18400002])
)
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format="jd")
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000.0, 2450010.0)
t2 = Time(val, format="jd")
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.0
t3 = Time(val, val2, format="jd")
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.0) / 10.0).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format="jd")
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize("format_", Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == "tai"
@pytest.mark.parametrize("value", [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format="jd", scale="utc")
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format="iso", scale="tai", precision=1)
assert t2.value == "2010-01-01 00:00:34.0"
t2 = Time(t, format="iso", scale="tai", out_subfmt="date")
assert t2.value == "2010-01-01"
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format="mjd", scale="utc", location=("45d", "50d"))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format="mjd", scale="utc")
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.arange(len(mjd)), np.arange(len(mjd))),
)
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0.0, 0.0, 0.0), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0.0, 0.999, 0.2)
t7 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
)
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
)
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == "2010-01-01 00:00:00.000"
assert t.tt.iso == "2010-01-01 00:01:06.184"
assert t.tai.fits == "2010-01-01T00:00:34.000"
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == "2010-01-01T00:01:06.910"
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
# Uses initial class-defined precision=3
assert t.iso == "2010-01-01 00:00:00.000"
# Set instance precision to 9
t.precision = 9
assert t.iso == "2010-01-01 00:00:00.000000000"
assert t.tai.utc.iso == "2010-01-01 00:00:00.000000000"
def test_precision_input(self):
"""Verifies that precision can only be 0-9 (inclusive). Any other
value should raise a ValueError exception."""
err_message = "precision attribute must be an int"
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=10)
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.precision = -1
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=7,
location=(lon, lat),
)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843728"
assert t.tcb.iso == "2006-01-15 21:25:56.8939523"
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time("2006-01-15 21:24:37.5", format="iso", scale="utc", precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843725"
assert t.tcb.iso == "2006-01-15 21:25:56.8939519"
# Check we get the same result
t2 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
location=(0 * u.m, 0 * u.m, 0 * u.m),
)
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=location,
)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(location.x, location.y, location.z),
)
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert np.all(t.utc.iso == "2006-01-15 21:24:37.500000")
assert np.all(t.tdb.iso[0] == "2006-01-15 21:25:42.684373")
t2 = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert np.all(t2.utc.iso == "2006-01-15 21:24:37.500000")
assert t2.tdb.iso[0] == "2006-01-15 21:25:42.684373"
assert t2.tdb.iso[1] != "2006-01-15 21:25:42.684373"
with pytest.raises(ValueError): # 1 time, but two locations
Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
with pytest.raises(ValueError): # 3 times, but two locations
Time(
["2006-01-15 21:24:37.5"] * 3,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
# multidimensional
mjd = np.arange(50000.0, 50008.0).reshape(4, 2)
t3 = Time(mjd, format="mjd", scale="utc", location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(
mjd,
format="mjd",
scale="utc",
location=(
np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]]),
),
)
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp("auto_download", False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale=scale1,
location=(lon, lat),
)
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = "local"
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format="decimalyear")
Time(100.0, format="cxcsec")
Time(100.0, format="unix")
Time(100.0, format="gps")
Time(1950.0, format="byear", scale="tai")
Time(2000.0, format="jyear", scale="tai")
Time("B1950.0", format="byear_str", scale="tai")
Time("J2000.0", format="jyear_str", scale="tai")
Time("2000-01-01 12:23:34.0", format="iso", scale="tai")
Time("2000-01-01 12:23:34.0Z", format="iso", scale="utc")
Time("2000-01-01T12:23:34.0", format="isot", scale="tai")
Time("2000-01-01T12:23:34.0Z", format="isot", scale="utc")
Time("2000-01-01T12:23:34.0", format="fits")
Time("2000-01-01T12:23:34.0", format="fits", scale="tdb")
Time(2400000.5, 51544.0333981, format="jd", scale="tai")
Time(0.0, 51544.0333981, format="mjd", scale="tai")
Time("2000:001:12:23:34.0", format="yday", scale="tai")
Time("2000:001:12:23:34.0Z", format="yday", scale="utc")
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format="datetime", scale="tai")
Time([dt, dt], format="datetime", scale="tai")
dt64 = np.datetime64("2012-06-18T02:00:05.453000000")
Time(dt64, format="datetime64", scale="tai")
Time([dt64, dt64], format="datetime64", scale="tai")
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time("2006-01-15 21:24:37.5", scale="local")
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(
t.decimalyear,
2006.0408002758752,
atol=0.001 / 3600.0 / 24.0 / 365.0,
rtol=0.0,
)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == "2006-01-15T21:24:37.500"
assert t.yday == "2006:015:21:24:37.500"
assert t.fits == "2006-01-15T21:24:37.500"
assert_allclose(
t.byear, 2006.04217888831, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert_allclose(
t.jyear, 2006.0407723496082, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert t.byear_str == "B2006.042"
assert t.jyear_str == "J2006.041"
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456000"
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale="utc")
assert t2.datetime == dt
t = Time([dt, dt2], scale="utc")
assert np.all(t.value == [dt, dt2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2 - dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
dt64_2 = np.datetime64("2000-01-02")
t = Time(dt64, scale="utc", precision=9, format="datetime64")
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64
t = Time(dt64_2, scale="utc", precision=3, format="datetime64")
assert t.iso == "2000-01-02 00:00:00.000"
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale="utc", format="datetime64")
assert np.all(t.value == [dt64, dt64_2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime64 == np.datetime64("2000-01-01T01:01:01.123456789")
# broadcasting
dt3 = (dt64 + (dt64_2 - dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc", format="datetime64")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format="datetime64")
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format="datetime64"))
assert Time(t3[2, 0], format="datetime64") == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format="jd", scale="tai", precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == "B2015.136594"
assert t.jyear_str == "J2015.134993"
t2 = Time(t.byear, format="byear", scale="tai")
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format="jyear", scale="tai")
assert allclose_jd(t2.jd, jd)
t = Time("J2015.134993", scale="tai", precision=6)
assert np.allclose(
t.jd, jd, rtol=1e-10, atol=0
) # J2015.134993 has 10 digit precision
assert t.byear_str == "B2015.136594"
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format="iso", scale="utc")
with pytest.raises(ValueError):
Time("2000:001", format="jd", scale="utc")
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ["bad"], format="mjd", scale="tai")
with pytest.raises(ValueError):
Time(50000.0, "bad", format="mjd", scale="tai")
with pytest.raises(ValueError):
Time("2005-08-04T00:01:02.000Z", scale="tai")
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format="jd", scale="utc")
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time("2000-01-02T03:04:05(TAI)", scale="utc")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(TAI")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(UT(NIST)")
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f"{year:04d}-{month:02d}"
yyyy_mm_dd = f"{year:04d}-{month:02d}-{day:02d}"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + "-01 23:59:60.0", scale="utc")
assert t1.iso == yyyy_mm + "-02 00:00:00.000"
# Leap second is different
t1 = Time(yyyy_mm_dd + " 23:59:59.900", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:59.900"
t1 = Time(yyyy_mm_dd + " 23:59:60.000", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.000"
t1 = Time(yyyy_mm_dd + " 23:59:60.999", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.999"
if month == 6:
yyyy_mm_dd_plus1 = f"{year:04d}-07-01"
else:
yyyy_mm_dd_plus1 = f"{year + 1:04d}-01-01"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + " 23:59:61.0", scale="utc")
assert t1.iso == yyyy_mm_dd_plus1 + " 00:00:00.000"
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + " 23:59:59", scale="utc")
t1 = Time(yyyy_mm_dd_plus1 + " 00:00:00", scale="utc")
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time("2007:001", scale="tai")
t2 = Time(["2007-01-02", "2007-01-03"], scale="utc")
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale="utc")
assert t3.scale == "utc"
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale="tt")
assert t3.scale == "tt"
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000.0, 50006.0)
frac = np.arange(0.0, 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc")
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale="local")
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize(
"d",
[
dict(val="2001:001", val2="ignored", scale="utc"),
dict(
val={
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
val2="ignored",
scale="utc",
),
dict(val=np.datetime64("2005-02-25"), val2="ignored", scale="utc"),
dict(
val=datetime.datetime(2000, 1, 2, 12, 0, 0), val2="ignored", scale="utc"
),
],
)
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format="mjd", scale="tai")
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000.0, 50007.0)
frac = np.arange(0.0, 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format="mjd", scale="utc")
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format="mjd", scale="tai")
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = 2458000 + np.arange(3)
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00:00.000",
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
# Heterogeneous input formats with in_subfmt='date_*'
times = ["2000-01-01 01:01", "2000-01-01 01:01:01", "2000-01-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai", in_subfmt="date_*")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time("2000-01-01 01:01", format="iso", scale="tai", in_subfmt="date")
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time(
"2000-01-01 01:01", format="iso", scale="tai", in_subfmt="doesnt exist"
)
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai", out_subfmt="date_hm")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00",
"2000-01-01 01:01",
"2000-01-01 01:01",
"2000-01-01 01:01",
]
)
)
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-01-01", "2000-01-01T01:01:01", "2000-01-01T01:01:01.123"]
t = Time(times, format="fits", scale="tai")
assert np.all(
t.fits
== np.array(
[
"2000-01-01T00:00:00.000",
"2000-01-01T01:01:01.000",
"2000-01-01T01:01:01.123",
]
)
)
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format="fits", out_subfmt="long*")
assert np.all(
t2.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+02000-01-01T01:01:01.123",
]
)
)
# Implicit long format for output, because of negative year.
times[2] = "-00594-01-01"
t3 = Time(times, format="fits", scale="tai")
assert np.all(
t3.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"-00594-01-01T00:00:00.000",
]
)
)
# Implicit long format for output, because of large positive year.
times[2] = "+10594-01-01"
t4 = Time(times, format="fits", scale="tai")
assert np.all(
t4.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+10594-01-01T00:00:00.000",
]
)
)
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-12-01", "2001-12-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai")
t.out_subfmt = "date_hm"
assert np.all(t.yday == np.array(["2000:336:00:00", "2001:335:01:01"]))
t.out_subfmt = "*"
assert np.all(
t.yday == np.array(["2000:336:00:00:00.000", "2001:335:01:01:01.123"])
)
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format="cxcsec", scale="utc")
assert t.scale == "utc"
t = Time(100.0, format="unix", scale="tai")
assert t.scale == "tai"
t = Time(100.0, format="gps", scale="utc")
assert t.scale == "utc"
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format="byear", scale="bad scale")
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time("2000:001:00:00:00", scale="bad scale")
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (
("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc"),
):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][: inputs[0].index("(")], format="isot", scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:00.123456789(UTC)")
t = t.tai
assert t.isot == "1999-01-01T00:00:32.123"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)")
t = t.utc
assert t.isot == "1999-01-01T00:00:00.123"
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(ET)", scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format="cxcsec")
assert t.scale == "tt"
t = Time(100.0, format="unix")
assert t.scale == "utc"
t = Time(100.0, format="gps")
assert t.scale == "tai"
for date in ("2000:001", "2000-01-01T00:00:00"):
t = Time(date)
assert t.scale == "utc"
t = Time(2000.1, format="byear")
assert t.scale == "tt"
t = Time("J2000")
assert t.scale == "tt"
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format="cxcsec", scale="tai")
assert t.tt.iso == "1998-01-01 00:00:00.000"
# Create new time object from this one and change scale, format
t2 = Time(t, scale="tt", format="iso")
assert t2.value == "1998-01-01 00:00:00.000"
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format="cxcsec", scale="utc")
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == "2010:001:00:00:00.000"
t = Time("2010:001:00:00:00.000", scale="utc")
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ("utc", "tt"):
t = Time("2000:001", scale=scale)
t2 = Time(t.unix, scale=scale, format="unix")
assert getattr(t2, scale).iso == "2000-01-01 00:00:00.000"
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time("2013-05-20 21:18:46", scale="utc")
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time("2004-09-16T23:59:59", scale="utc")
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time("2000-01-01 00:00:00", scale="utc")
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time("54321.000000000001", format="mjd")
assert t == Time(54321, 1e-12, format="mjd")
assert t.mjd == 54321.0 # Lost precision!
assert t.value == 54321.0 # Lost precision!
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", "bytes") == b"54321.000000000001"
expected_long = np.longdouble(54321.0) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(
t.to_value("mjd", subfmt="long"),
expected_long,
rtol=0,
atol=np.finfo(float).eps,
)
t.out_subfmt = "str"
assert t.value == "54321.000000000001"
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.mjd == "54321.000000000001"
assert t.to_value("mjd", subfmt="bytes") == b"54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
t.out_subfmt = "long"
assert np.allclose(t.value, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert np.allclose(
t.to_value("mjd", subfmt=None),
expected_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
assert np.allclose(t.mjd, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format="mjd")
expected = Time(i, f, format="mjd")
assert abs(t - expected) <= 20.0 * u.ps
t_float = Time(i + f, format="mjd")
assert t_float == Time(i, format="mjd")
assert t_float != t
assert t.value == 54321.0 # Lost precision!
assert np.allclose(
t.to_value("mjd", subfmt="long"),
mjd_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
t2 = Time(mjd_long, format="mjd", out_subfmt="long")
assert np.allclose(t2.value, mjd_long, rtol=0.0, atol=np.finfo(float).eps)
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
t1 = Time(i, f, format="mjd")
t2 = Time(np.longdouble(i), f, format="mjd")
t3 = Time(i, np.longdouble(f), format="mjd")
t4 = Time(np.longdouble(i), np.longdouble(f), format="mjd")
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1.0 if fmt == "mjd" else 24.0 * 3600.0)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol
)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt="long")
assert np.allclose(tm_long2, t_fmt_long2, rtol=0.0, atol=atol)
def test_subformat_input(self):
s = "54321.01234567890123456789"
i, f = s.split(".") # Note, OK only for fraction < 0.5
t = Time(float(i), float("." + f), format="mjd")
t_str = Time(s, format="mjd")
t_bytes = Time(s.encode("ascii"), format="mjd")
t_decimal = Time(Decimal(s), format="mjd")
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize("out_subfmt", ("str", "bytes"))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0.0, 1e-9, 1e-12])
t = Time(i, f, format="mjd", out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(
["54321.0", "54321.000000001", "54321.000000000001"], dtype=out_subfmt
)
assert np.all(t_value == expected)
assert np.all(Time(expected, format="mjd") == t)
# Explicit sub-format.
t = Time(i, f, format="mjd")
t_mjd_subfmt = t.to_value("mjd", subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize(
"fmt,string,val1,val2",
[
("jd", "2451544.5333981", 2451544.5, 0.0333981),
("decimalyear", "2000.54321", 2000.0, 0.54321),
("cxcsec", "100.0123456", 100.0123456, None),
("unix", "100.0123456", 100.0123456, None),
("gps", "100.0123456", 100.0123456, None),
("byear", "1950.1", 1950.1, None),
("jyear", "2000.1", 2000.1, None),
],
)
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt="str") == string
def test_basic_subformat_setting(self):
t = Time("2001", format="jyear", scale="tai")
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time("2001", format="jyear", scale="tai")
t.to_value("mjd", subfmt="str")
assert ("mjd", "str") in t.cache["format"]
t.to_value("mjd", "str")
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time("2001", format="jyear", scale="tai")
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time("2001", format="jyear", scale="tai")
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert (
t_s_2 == t2_s_40
), "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value("mjd", subfmt="decimal")
t2 = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value("mjd", subfmt="decimal")
t2_s_40 = t2.to_value("mjd", subfmt="decimal")
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize(
"f, s, t",
[
("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str),
],
)
def test_timedelta_basic(self, f, s, t):
dt = Time("58000", format="mjd", scale="tai") - Time(
"58001", format="mjd", scale="tai"
)
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time("J2000")
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match="format must be one of"):
t.to_value("julian")
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match="not among selected"):
Time("58000", format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(np.longdouble(58000), format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="str")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="long")
def test_wrong_subfmt(self):
t = Time(58000.0, format="mjd")
with pytest.raises(ValueError, match="must match one"):
t.to_value("mjd", subfmt="parrot")
with pytest.raises(ValueError, match="must match one"):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match="must match one"):
t.in_subfmt = "parrot"
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time("J2000")
match = "subformat not allowed for format jyear_str"
with pytest.raises(ValueError, match=match):
t.to_value("jyear_str", subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", out_subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.in_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", format="jyear_str", in_subfmt="parrot")
def test_switch_to_format_with_no_out_subfmt(self):
t = Time("2001-01-01", out_subfmt="date_hm")
assert t.out_subfmt == "date_hm"
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = "jyear_str"
assert t.out_subfmt == "*"
assert t.value == "J2001.001"
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r"bad day \(JD computed\)") as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.0])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format="jd", scale="tai")
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format="mjd", scale="tai")
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(["2000:001"], format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time("2000:001", format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time("2320-01-01", scale="tai").stardate)[:7] == "1368.99"
assert str(Time("2330-01-01", scale="tai").stardate)[:8] == "10552.76"
assert str(Time("2340-01-01", scale="tai").stardate)[:8] == "19734.02"
@pytest.mark.parametrize(
"dates",
[
(10000, "2329-05-26 03:02"),
(20000, "2340-04-15 19:05"),
(30000, "2351-03-07 11:08"),
],
)
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format="stardate")
t_iso = Time(t_star, format="iso", out_subfmt="date_hm")
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time("2000:001", format="yday", scale="tai")
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == "datetime"
assert t.scale == "utc"
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time("2001:001", format="yday")
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format="decimalyear")
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time("2000:001").jd
jd1 = Time("2001:001").jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd, jd0 + 0.75 * d_jd])
def test_decimalyear_no_quantity():
with pytest.raises(ValueError, match="cannot use Quantities"):
Time(2005.5 * u.yr, format="decimalyear")
def test_fits_year0():
t = Time(1721425.5, format="jd", scale="tai")
assert t.fits == "0001-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0, format="jd", scale="tai")
assert t.fits == "+00000-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0 - 365.0, format="jd", scale="tai")
assert t.fits == "-00001-01-01T00:00:00.000"
def test_fits_year10000():
t = Time(5373484.5, format="jd", scale="tai")
assert t.fits == "+10000-01-01T00:00:00.000"
t = Time(5373484.5 - 365.0, format="jd", scale="tai")
assert t.fits == "9999-01-01T00:00:00.000"
t = Time(5373484.5, -1.0 / 24.0 / 3600.0, format="jd", scale="tai")
assert t.fits == "9999-12-31T23:59:59.000"
def test_dir():
t = Time("2000:001", format="yday", scale="tai")
assert "utc" in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format="cxcsec")
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format="cxcsec")
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format="mjd", scale="utc")
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert "Time" in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time("1900-01-01", scale="ut1")
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
Time(Time.now().cxcsec, format="cxcsec", scale="ut1")
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype(">f8")
little_endian = mjd.astype("<f8")
time_mjd = Time(mjd, format="mjd")
time_big = Time(big_endian, format="mjd")
time_little = Time(little_endian, format="mjd")
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = "longyear"
subfmts = (
(
"date",
r"(?P<year>[+-]\d{5})-%m-%d", # hybrid
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
t = Time("+02000-02-03", format="longyear")
assert t.value == "+02000-02-03"
assert t.jd == Time("2000-02-03").jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (
("jd", 2451577.5),
("mjd", 51577.0),
("cxcsec", 65923264.184), # confirmed with Chandra.Time
("datetime", datetime.datetime(2000, 2, 3, 0, 0)),
("iso", "2000-02-03 00:00:00.000"),
):
t = Time("+02000-02-03", format="fits")
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time("2020-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time("1970-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="date_hms", precision=5)
tc = t.copy()
t.format = "isot"
assert t.precision == 5
assert t.out_subfmt == "date_hms"
assert t.value == "2000-02-03T00:00:00.00000"
t.format = "fits"
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="longdate")
t.format = "isot"
assert t.out_subfmt == "*" # longdate_hms not there, goes to default
assert t.value == "2000-02-03T00:00:00.000"
t.format = "fits"
assert t.out_subfmt == "*"
assert t.value == "2000-02-03T00:00:00.000" # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time("2007:001", scale="tai")
with pytest.raises(ValueError) as err:
t1.replicate(format="definitely_not_a_valid_format")
assert "format must be one of" in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time("2007:001", scale="tai")
assert "astropy_time" not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format="astropy_time")
assert "format must be one of" in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(
["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"],
format="iso",
scale="utc",
)
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname="US/Hawaii")
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_to_datetime_leap_second_strict():
t = Time("2015-06-30 23:59:60.000")
dt_exp = datetime.datetime(2015, 7, 1, 0, 0, 0)
with pytest.raises(ValueError, match=r"does not support leap seconds"):
t.to_datetime()
with pytest.warns(
AstropyDatetimeLeapSecondWarning, match=r"does not support leap seconds"
):
dt = t.to_datetime(leap_second_strict="warn")
assert dt == dt_exp
dt = t.to_datetime(leap_second_strict="silent")
assert dt == dt_exp
with pytest.raises(ValueError, match=r"leap_second_strict must be 'raise'"):
t.to_datetime(leap_second_strict="invalid")
@pytest.mark.skipif(not HAS_PYTZ, reason="requires pytz")
def test_to_datetime_pytz():
import pytz
tz = pytz.timezone("US/Hawaii")
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time("2010-09-03 00:00:00")
t2 = Time("2010-09-03 00:00:00")
# Time starts out without a cache
assert "cache" not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache["format"]["iso"] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache["scale"]["tai"] == t2.tai
# New Time object after scale transform does not have a cache yet
assert "cache" not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert "cache" not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert "cache" in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [
[[f"{y:04d}-{m:02d}-{d:02d}" for d in range(1, 3)] for m in range(5, 7)]
for y in range(2012, 2014)
]
cutf32 = Column(times)
cbytes = cutf32.astype("S")
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(["B1950"]))
tbytes = Time(Column([b"B1950"]))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b"2012-01-01", b"2012-01-01T00:00:00"])
assert np.all(Time(times) == Time(["2012-01-01", "2012-01-01T00:00:00"]))
def test_bytes_input():
tstring = "2011-01-02T03:04:05"
tbytes = b"2011-01-02T03:04:05"
assert tbytes.decode("ascii") == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == "S"
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format="cxcsec")
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time("2000:001", scale="utc")
t[()] = "2000:002"
assert t.value.startswith("2000:002")
# Transformed attribute is not writeable
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = "2005:001"
assert "Time object is read-only. Make a copy()" in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format="cxcsec")
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location=None".format(loc[0]) in str(err.value)
)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format="cxcsec", location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location={}".format(loc[0], loc[1]) in str(err.value)
)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format="cxcsec")
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location=None and "
"got location={}".format(loc[1]) in str(err.value)
)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
t[0, :] = Time([-3, -4], format="cxcsec", location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format="cxcsec")
assert t.cache == {}
t.iso
assert "iso" in t.cache["format"]
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:00:02.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [3, 4]])
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:01:40.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [200, 200]])
# Array of strings in yday format
t[:, 1] = ["1998:002", "1998:003"]
assert allclose_sec(t.value, [[1, 86400 * 1], [200, 86400 * 2]])
# Incompatible numeric value
t = Time(["2000:001", "2000:002"])
t[0] = "2001:001"
with pytest.raises(ValueError) as err:
t[0] = 100
assert "cannot convert value to a compatible Time object" in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object."""
# Set from time object with different scale
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = Time(["2000:010"], scale="tai")
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(["2000:001", "2000:002"], scale="utc")
t2.format = "jyear"
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format="cxcsec")
with pytest.raises(IndexError):
t["asdf"] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format="cxcsec")
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, "_delta_tdb_tt")
assert not hasattr(t, "_delta_ut1_utc")
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time("1999-01-01T01:01:01")
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strftime_array():
tstrings = ["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1995-12-31 23:59:60"]
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S").tolist() == tstrings
def test_strftime_array_2():
tstrings = [
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1995-12-31 23:59:60"],
]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime("%Y-%m-%d %H:%M:%S") == tstrings)
assert t.strftime("%Y-%m-%d %H:%M:%S").shape == tstrings.shape
def test_strftime_leapsecond():
time_string = "1995-12-31 23:59:60"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strptime_scalar():
"""Test of Time.strptime"""
time_string = "2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01", "1998-Jan-01 00:00:02"],
["1998-Jan-01 00:00:03", "1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, "%S")
def test_strptime_input_bytes_scalar():
time_string = b"2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [
[b"1998-Jan-01 00:00:01", b"1998-Jan-01 00:00:02"],
[b"1998-Jan-01 00:00:03", b"1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time("1995-12-31T23:59:60", format="isot")
time_obj2 = Time.strptime("1995-Dec-31 23:59:60", "%Y-%b-%d %H:%M:%S")
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time("0995-12-31T00:00:00", format="isot", scale="tai")
time_obj2 = Time.strptime("0995-Dec-31 00:00:00", "%Y-%b-%d %H:%M:%S", scale="tai")
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = "2007-May-04 21:08:12.123"
time_object = Time("2007-05-04 21:08:12.123")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S.%f")
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01.123", "1998-Jan-01 00:00:02.000001"],
["1998-Jan-01 00:00:03.000900", "1998-Jan-01 00:00:04.123456"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01.123", "1998-01-01 00:00:02.000001"],
["1998-01-01 00:00:03.000900", "1998-01-01 00:00:04.123456"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S.%f")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00.123"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == time_string
def test_strftime_scalar_fracsec_precision():
time_string = "2010-09-03 06:00:00.123123123"
t = Time(time_string)
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123"
t.precision = 9
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123123123"
def test_strftime_array_fracsec():
tstrings = [
"2010-09-03 00:00:00.123000",
"2005-09-03 06:00:00.000001",
"1995-12-31 23:59:60.000900",
]
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f").tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format="unix")
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, "1970-01-01 00:01:00")
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time("1970-01-01 00:01:00"))
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time("1970-01-01 00:01:00")])
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format="unix"))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format="unix"))
def test_insert_time_out_subfmt():
# Check insert() with out_subfmt set
T = Time(["1999-01-01", "1999-01-02"], out_subfmt="date")
T = T.insert(0, T[0])
assert T.out_subfmt == "date"
assert T[0] == T[1]
T = T.insert(1, "1999-01-03")
assert T.out_subfmt == "date"
assert str(T[1]) == "1999-01-03"
def test_insert_exceptions():
tm = Time(1, format="unix")
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert "cannot insert into scalar" in str(err.value)
tm = Time([1, 2], format="unix")
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert "axis must be 0" in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert "obj arg must be an integer" in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert "index -100 is out of bounds for axis 0 with size 2" in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
t = Time(dt64, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format="cxcsec", location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format="cxcsec", location=loc)
t2 = Time(1, format="cxcsec")
assert hash(t) != hash(t2)
t = Time("2000:180", scale="utc")
t2 = Time(t, scale="tai")
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format="sec")
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time("2000:001", format="not-a-format")
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200")
assert "Input values did not match any of the formats where" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200", format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "ValueError: Time 200 does not match iso format"
) == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "TypeError: Input values for iso class must be strings"
) == str(err.value)
def test_ymdhms_defaults():
t1 = Time({"year": 2001}, format="ymdhms")
assert t1 == Time("2001-01-01")
times_dict_ns = {
"year": [2001, 2002],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [8, 9],
"second": [10, 11],
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ("year", "month", "day", "hour", "minute", "second")
@pytest.mark.parametrize("tm_input", [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
@pytest.mark.parametrize("as_row", [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(["2001-02-04 06:08:10", "2002-03-05 07:09:11"])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {"year": [[2001, 2002], [2003, 2004]], "month": [2, 3], "day": 4}
time_shape = Time([["2001-02-04", "2002-03-04"], ["2003-02-04", "2004-03-04"]])
time = Time(times_dict_shape, format="ymdhms")
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
"year": 2016,
"month": 12,
"day": 31,
"hour": 23,
"minute": 59,
"second": 60.123456789,
}
tm = Time(time_dict, **kwargs)
assert tm == Time("2016-12-31T23:59:60.123456789")
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == "second":
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match="input must be dict or table-like"):
Time(10, format="ymdhms")
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({"year": 2019, "wrong": 1}, format="ymdhms")
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({"year": 2019, "minute": 1}, format="ymdhms")
def test_ymdhms_masked():
tm = Time({"year": [2000, 2001]}, format="ymdhms")
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time(
{
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
scale="utc",
)
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_ecsv(fmt):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t["a"].format == t2["a"].format
# Some loss of precision in the serialization
assert not np.all(t["a"] == t2["a"])
# But no loss in the format representation
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_fits(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.fits"
t.write(out, format="fits")
t2 = Table.read(out, format="fits", astropy_native=True)
# Currently the format is lost in FITS so set it back
t2["a"].format = fmt
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.skipif(not HAS_H5PY, reason="Needs h5py")
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_hdf5(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.h5"
t.write(str(out), format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(str(out), format="hdf5", path="root")
assert t["a"].format == t2["a"].format
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time("J2015") + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time("2019-12-20", out_subfmt="date_??")
assert t.mjd == 58837.0
assert t.yday == "2019:354:00:00" # Preserves out_subfmt
t2 = t.replicate(format="mjd")
assert t2.out_subfmt == "*" # Changes to default
t2 = t.copy(format="mjd")
assert t2.out_subfmt == "*"
t2 = Time(t, format="mjd")
assert t2.out_subfmt == "*"
t2 = t.copy(format="yday")
assert t2.out_subfmt == "date_??"
assert t2.value == "2019:354:00:00"
t.format = "yday"
assert t.value == "2019:354:00:00"
assert t.out_subfmt == "date_??"
t = Time("2019-12-20", out_subfmt="date")
assert t.mjd == 58837.0
assert t.yday == "2019:354"
@pytest.mark.parametrize("use_fast_parser", ["force", "False"])
def test_format_fractional_string_parsing(use_fast_parser):
"""Test that string like "2022-08-01.123" does not parse as ISO.
See #6476 and the fix."""
with pytest.raises(
ValueError, match=r"Input values did not match the format class iso"
):
with conf.set_temp("use_fast_parser", use_fast_parser):
Time("2022-08-01.123", format="iso")
@pytest.mark.parametrize("fmt_name,fmt_class", TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time("2000-01-01")
subfmts = [subfmt[0] for subfmt in fmt_class.subfmts] + [None, "*"]
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize("location", [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time("J2010", location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location)
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location)
# Effectively the same as a list of Times, but just to be sure that
# Table mixin initialization is working as expected.
tm2 = Table([[tm, tm]])["col0"]
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location)
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time("J2010", location=(45, 45))
tm2 = Time("J2010")
with pytest.raises(
ValueError, match="cannot concatenate times unless all locations"
):
Time([tm, tm2])
def test_linspace():
"""Test `np.linspace` `__array_func__` implementation for scalar and arrays."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"])
t2 = Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"])
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts = np.linspace(t1[0], t2[0], 3)
assert ts[0].isclose(Time("2021-01-01 00:00:00"), atol=atol)
assert ts[1].isclose(Time("2021-01-01 00:30:00"), atol=atol)
assert ts[2].isclose(Time("2021-01-01 01:00:00"), atol=atol)
ts = np.linspace(t1, t2[0], 2, endpoint=False)
assert ts.shape == (2, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:30:00", "2021-01-01 12:30:00"]), atol=atol)
)
ts = np.linspace(t1, t2, 7)
assert ts.shape == (7, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:10:00", "2021-03-03 00:00:00"]), atol=atol)
)
assert all(
ts[5].isclose(Time(["2021-01-01 00:50:00", "2021-10-29 00:00:00"]), atol=atol)
)
assert all(
ts[6].isclose(Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"]), atol=atol)
)
def test_linspace_steps():
"""Test `np.linspace` `retstep` option."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-01 12:00:00"])
t2 = Time("2021-01-02 00:00:00")
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts, st = np.linspace(t1, t2, 7, retstep=True)
assert ts.shape == (7, 2)
assert st.shape == (2,)
assert all(ts[1].isclose(ts[0] + st, atol=atol))
assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))
assert all(st.isclose(TimeDelta([14400, 7200], format="sec"), atol=atol))
def test_linspace_fmts():
"""Test `np.linspace` `__array_func__` implementation for start/endpoints
from different formats/systems.
"""
t1 = Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"])
t2 = Time(2458850, format="jd")
t3 = Time(1578009600, format="unix")
atol = 2 * np.finfo(float).eps * abs(t1 - Time([t2, t3])).max()
ts = np.linspace(t1, t2, 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-01 18:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-01 12:00:00"]), atol=atol)
)
ts = np.linspace(t1, Time([t2, t3]), 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-02 12:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-03 00:00:00"]), atol=atol)
)
def test_to_string():
dims = [8, 2, 8]
dx = np.arange(np.prod(dims)).reshape(dims)
tm = Time("2020-01-01", out_subfmt="date") + dx * u.day
exp_lines = [
"[[['2020-01-01' '2020-01-02' ... '2020-01-07' '2020-01-08']",
" ['2020-01-09' '2020-01-10' ... '2020-01-15' '2020-01-16']]",
"",
" [['2020-01-17' '2020-01-18' ... '2020-01-23' '2020-01-24']",
" ['2020-01-25' '2020-01-26' ... '2020-01-31' '2020-02-01']]",
"",
" ...",
"",
" [['2020-04-06' '2020-04-07' ... '2020-04-12' '2020-04-13']",
" ['2020-04-14' '2020-04-15' ... '2020-04-20' '2020-04-21']]",
"",
" [['2020-04-22' '2020-04-23' ... '2020-04-28' '2020-04-29']",
" ['2020-04-30' '2020-05-01' ... '2020-05-06' '2020-05-07']]]",
]
exp_str = "\n".join(exp_lines)
with np.printoptions(threshold=100, edgeitems=2, linewidth=75):
out_str = str(tm)
out_repr = repr(tm)
assert out_str == exp_str
exp_repr = f"<Time object: scale='utc' format='iso' value={exp_str}>"
assert out_repr == exp_repr
|
39f56d654c240c86489799445ab1e906f121d1dd0658015fae49aa1476fe1e13 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Column
from astropy.time import Time, TimeDelta
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
class TestTimeQuantity:
"""Test Interaction of Time with Quantities"""
def test_valid_quantity_input(self):
"""Test Time formats that are allowed to take quantity input."""
q = 2450000.125 * u.day
t1 = Time(q, format="jd", scale="utc")
assert t1.value == q.value
q2 = q.to(u.second)
t2 = Time(q2, format="jd", scale="utc")
assert t2.value == q.value == q2.to_value(u.day)
q3 = q - 2400000.5 * u.day
t3 = Time(q3, format="mjd", scale="utc")
assert t3.value == q3.value
# test we can deal with two quantity arguments, with different units
qs = 24.0 * 36.0 * u.second
t4 = Time(q3, qs, format="mjd", scale="utc")
assert t4.value == (q3 + qs).to_value(u.day)
qy = 1990.0 * u.yr
ty1 = Time(qy, format="jyear", scale="utc")
assert ty1.value == qy.value
ty2 = Time(qy.to(u.day), format="jyear", scale="utc")
assert ty2.value == qy.value
qy2 = 10.0 * u.yr
tcxc = Time(qy2, format="cxcsec")
assert tcxc.value == qy2.to_value(u.second)
tgps = Time(qy2, format="gps")
assert tgps.value == qy2.to_value(u.second)
tunix = Time(qy2, format="unix")
assert tunix.value == qy2.to_value(u.second)
qd = 2000.0 * 365.0 * u.day
tplt = Time(qd, format="plot_date", scale="utc")
assert tplt.value == qd.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.m, format="jd", scale="utc")
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc")
def test_column_with_and_without_units(self):
"""Ensure a Column without a unit is treated as an array [#3648]"""
a = np.arange(50000.0, 50010.0)
ta = Time(a, format="mjd")
c1 = Column(np.arange(50000.0, 50010.0), name="mjd")
tc1 = Time(c1, format="mjd")
assert np.all(ta == tc1)
c2 = Column(np.arange(50000.0, 50010.0), name="mjd", unit="day")
tc2 = Time(c2, format="mjd")
assert np.all(ta == tc2)
c3 = Column(np.arange(50000.0, 50010.0), name="mjd", unit="m")
with pytest.raises(u.UnitsError):
Time(c3, format="mjd")
def test_no_quantity_input_allowed(self):
"""Time formats that are not allowed to take Quantity input."""
qy = 1990.0 * u.yr
for fmt in ("iso", "yday", "datetime", "byear", "byear_str", "jyear_str"):
with pytest.raises(ValueError):
Time(qy, format=fmt, scale="utc")
def test_valid_quantity_operations(self):
"""Check that adding a time-valued quantity to a Time gives a Time"""
t0 = Time(100000.0, format="cxcsec")
q1 = 10.0 * u.second
t1 = t0 + q1
assert isinstance(t1, Time)
assert t1.value == t0.value + q1.to_value(u.second)
q2 = 1.0 * u.day
t2 = t0 - q2
assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))
# check broadcasting
q3 = np.arange(15.0).reshape(3, 5) * u.hour
t3 = t0 - q3
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value - q3.to_value(u.second))
def test_invalid_quantity_operations(self):
"""Check that comparisons of Time with quantities does not work
(even for time-like, since we cannot compare Time to TimeDelta)"""
with pytest.raises(TypeError):
Time(100000.0, format="cxcsec") > 10.0 * u.m # noqa: B015
with pytest.raises(TypeError):
Time(100000.0, format="cxcsec") > 10.0 * u.second # noqa: B015
class TestTimeDeltaQuantity:
"""Test interaction of TimeDelta with Quantities"""
def test_valid_quantity_input(self):
"""Test that TimeDelta can take quantity input."""
q = 500.25 * u.day
dt1 = TimeDelta(q, format="jd")
assert dt1.value == q.value
dt2 = TimeDelta(q, format="sec")
assert dt2.value == q.to_value(u.second)
dt3 = TimeDelta(q)
assert dt3.value == q.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
TimeDelta(2450000.0 * u.m, format="jd")
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc")
with pytest.raises(TypeError):
TimeDelta(100, format="sec") > 10.0 * u.m # noqa: B015
def test_quantity_output(self):
q = 500.25 * u.day
dt = TimeDelta(q)
assert dt.to(u.day) == q
assert dt.to_value(u.day) == q.value
assert dt.to_value("day") == q.value
assert dt.to(u.second).value == q.to_value(u.second)
assert dt.to_value(u.second) == q.to_value(u.second)
assert dt.to_value("s") == q.to_value(u.second)
# Following goes through "format", but should be the same.
assert dt.to_value("sec") == q.to_value(u.second)
def test_quantity_output_errors(self):
dt = TimeDelta(250.0, format="sec")
with pytest.raises(u.UnitsError):
dt.to(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(unit=u.m)
with pytest.raises(
ValueError,
match="not one of the known formats.*failed to parse as a unit",
):
dt.to_value("parrot")
with pytest.raises(TypeError):
dt.to_value("sec", unit=u.s)
with pytest.raises(TypeError):
# TODO: would be nice to make this work!
dt.to_value(u.s, subfmt="str")
def test_valid_quantity_operations1(self):
"""Check adding/subtracting/comparing a time-valued quantity works
with a TimeDelta. Addition/subtraction should give TimeDelta"""
t0 = TimeDelta(106400.0, format="sec")
q1 = 10.0 * u.second
t1 = t0 + q1
assert isinstance(t1, TimeDelta)
assert t1.value == t0.value + q1.to_value(u.second)
q2 = 1.0 * u.day
t2 = t0 - q2
assert isinstance(t2, TimeDelta)
assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))
# now comparisons
assert t0 > q1
assert t0 < 1.0 * u.yr
# and broadcasting
q3 = np.arange(12.0).reshape(4, 3) * u.hour
t3 = t0 + q3
assert isinstance(t3, TimeDelta)
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value + q3.to_value(u.second))
def test_valid_quantity_operations2(self):
"""Check that TimeDelta is treated as a quantity where possible."""
t0 = TimeDelta(100000.0, format="sec")
f = 1.0 / t0
assert isinstance(f, u.Quantity)
assert f.unit == 1.0 / u.day
g = 10.0 * u.m / u.second**2
v = t0 * g
assert isinstance(v, u.Quantity)
assert u.allclose(v, t0.sec * g.value * u.m / u.second)
q = np.log10(t0 / u.second)
assert isinstance(q, u.Quantity)
assert q.value == np.log10(t0.sec)
s = 1.0 * u.m
v = s / t0
assert isinstance(v, u.Quantity)
assert u.allclose(v, 1.0 / t0.sec * u.m / u.s)
t = 1.0 * u.s
t2 = t0 * t
assert isinstance(t2, u.Quantity)
assert u.allclose(t2, t0.sec * u.s**2)
t3 = [1] / t0
assert isinstance(t3, u.Quantity)
assert u.allclose(t3, 1 / (t0.sec * u.s))
# broadcasting
t1 = TimeDelta(np.arange(100000.0, 100012.0).reshape(6, 2), format="sec")
f = np.array([1.0, 2.0]) * u.cycle * u.Hz
phase = f * t1
assert isinstance(phase, u.Quantity)
assert phase.shape == t1.shape
assert u.allclose(phase, t1.sec * f.value * u.cycle)
q = t0 * t1
assert isinstance(q, u.Quantity)
assert np.all(q == t0.to(u.day) * t1.to(u.day))
q = t1 / t0
assert isinstance(q, u.Quantity)
assert np.all(q == t1.to(u.day) / t0.to(u.day))
def test_valid_quantity_operations3(self):
"""Test a TimeDelta remains one if possible."""
t0 = TimeDelta(10.0, format="jd")
q = 10.0 * u.one
t1 = q * t0
assert isinstance(t1, TimeDelta)
assert t1 == TimeDelta(100.0, format="jd")
t2 = t0 * q
assert isinstance(t2, TimeDelta)
assert t2 == TimeDelta(100.0, format="jd")
t3 = t0 / q
assert isinstance(t3, TimeDelta)
assert t3 == TimeDelta(1.0, format="jd")
q2 = 1.0 * u.percent
t4 = t0 * q2
assert isinstance(t4, TimeDelta)
assert abs(t4 - TimeDelta(0.1, format="jd")) < 1.0 * u.ns
q3 = 1.0 * u.hr / (36.0 * u.s)
t5 = q3 * t0
assert isinstance(t4, TimeDelta)
assert abs(t5 - TimeDelta(1000.0, format="jd")) < 1.0 * u.ns
# Test multiplication with a unit.
t6 = t0 * u.one
assert isinstance(t6, TimeDelta)
assert t6 == TimeDelta(10.0, format="jd")
t7 = u.one * t0
assert isinstance(t7, TimeDelta)
assert t7 == TimeDelta(10.0, format="jd")
t8 = t0 * ""
assert isinstance(t8, TimeDelta)
assert t8 == TimeDelta(10.0, format="jd")
t9 = "" * t0
assert isinstance(t9, TimeDelta)
assert t9 == TimeDelta(10.0, format="jd")
t10 = t0 / u.one
assert isinstance(t10, TimeDelta)
assert t6 == TimeDelta(10.0, format="jd")
t11 = t0 / ""
assert isinstance(t11, TimeDelta)
assert t11 == TimeDelta(10.0, format="jd")
t12 = t0 / [1]
assert isinstance(t12, TimeDelta)
assert t12 == TimeDelta(10.0, format="jd")
t13 = [1] * t0
assert isinstance(t13, TimeDelta)
assert t13 == TimeDelta(10.0, format="jd")
def test_invalid_quantity_operations(self):
"""Check comparisons of TimeDelta with non-time quantities fails."""
with pytest.raises(TypeError):
TimeDelta(100000.0, format="sec") > 10.0 * u.m # noqa: B015
def test_invalid_quantity_operations2(self):
"""Check that operations with non-time/quantity fail."""
td = TimeDelta(100000.0, format="sec")
with pytest.raises(TypeError):
td * object()
with pytest.raises(TypeError):
td / object()
def test_invalid_quantity_broadcast(self):
"""Check broadcasting rules in interactions with Quantity."""
t0 = TimeDelta(np.arange(12.0).reshape(4, 3), format="sec")
with pytest.raises(ValueError):
t0 + np.arange(4.0) * u.s
class TestDeltaAttributes:
def test_delta_ut1_utc(self):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=6)
t.delta_ut1_utc = 0.3 * u.s
assert t.ut1.iso == "2010-01-01 00:00:00.300000"
t.delta_ut1_utc = 0.4 / 60.0 * u.minute
assert t.ut1.iso == "2010-01-01 00:00:00.400000"
with pytest.raises(u.UnitsError):
t.delta_ut1_utc = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_ut1_utc = TimeDelta(0.3, format="sec")
assert t.ut1.iso == "2010-01-01 00:00:00.300000"
t.delta_ut1_utc = TimeDelta(0.5 / 24.0 / 3600.0, format="jd")
assert t.ut1.iso == "2010-01-01 00:00:00.500000"
def test_delta_tdb_tt(self):
t = Time("2010-01-01 00:00:00", format="iso", scale="tt", precision=6)
t.delta_tdb_tt = 20.0 * u.second
assert t.tdb.iso == "2010-01-01 00:00:20.000000"
t.delta_tdb_tt = 30.0 / 60.0 * u.minute
assert t.tdb.iso == "2010-01-01 00:00:30.000000"
with pytest.raises(u.UnitsError):
t.delta_tdb_tt = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_tdb_tt = TimeDelta(40.0, format="sec")
assert t.tdb.iso == "2010-01-01 00:00:40.000000"
t.delta_tdb_tt = TimeDelta(50.0 / 24.0 / 3600.0, format="jd")
assert t.tdb.iso == "2010-01-01 00:00:50.000000"
@pytest.mark.parametrize(
"q1, q2",
(
(5e8 * u.s, None),
(5e17 * u.ns, None),
(4e8 * u.s, 1e17 * u.ns),
(4e14 * u.us, 1e17 * u.ns),
),
)
def test_quantity_conversion_rounding(q1, q2):
"""Check that no rounding errors are incurred by unit conversion.
This occurred before as quantities in seconds were converted to days
before trying to split them into two-part doubles. See gh-7622.
"""
t = Time("2001-01-01T00:00:00.", scale="tai")
expected = Time("2016-11-05T00:53:20.", scale="tai")
if q2 is None:
t0 = t + q1
else:
t0 = t + q1 + q2
assert abs(t0 - expected) < 20 * u.ps
dt1 = TimeDelta(q1, q2)
t1 = t + dt1
assert abs(t1 - expected) < 20 * u.ps
dt2 = TimeDelta(q1, q2, format="sec")
t2 = t + dt2
assert abs(t2 - expected) < 20 * u.ps
|
a7e5844dc77f7d792254aa1706039bcb072460fd01ba697060efcaa4b83f397b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time, TimeDelta
class TestTimeComparisons:
"""Test Comparisons of Time and TimeDelta classes"""
def setup_method(self):
self.t1 = Time(np.arange(49995, 50005), format="mjd", scale="utc")
self.t2 = Time(np.arange(49000, 51000, 200), format="mjd", scale="utc")
def test_miscompares(self):
"""
If an incompatible object is compared to a Time object, == should
return False and != should return True. All other comparison
operators should raise a TypeError.
"""
t1 = Time("J2000", scale="utc")
for op, op_str in (
(operator.ge, ">="),
(operator.gt, ">"),
(operator.le, "<="),
(operator.lt, "<"),
):
with pytest.raises(TypeError):
op(t1, None)
# Keep == and != as they are specifically meant to test Time.__eq__
# and Time.__ne__
assert (t1 == None) is False
assert (t1 != None) is True
def test_time(self):
t1_lt_t2 = self.t1 < self.t2
assert np.all(
t1_lt_t2
== np.array(
[False, False, False, False, False, False, True, True, True, True]
)
)
t1_ge_t2 = self.t1 >= self.t2
assert np.all(t1_ge_t2 != t1_lt_t2)
t1_le_t2 = self.t1 <= self.t2
assert np.all(
t1_le_t2
== np.array(
[False, False, False, False, False, True, True, True, True, True]
)
)
t1_gt_t2 = self.t1 > self.t2
assert np.all(t1_gt_t2 != t1_le_t2)
t1_eq_t2 = self.t1 == self.t2
assert np.all(
t1_eq_t2
== np.array(
[False, False, False, False, False, True, False, False, False, False]
)
)
t1_ne_t2 = self.t1 != self.t2
assert np.all(t1_ne_t2 != t1_eq_t2)
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
t1_0_gt_t2 = self.t1[0] > self.t2
assert np.all(
t1_0_gt_t2
== np.array(
[True, True, True, True, True, False, False, False, False, False]
)
)
t1_gt_t2_0 = self.t1 > self.t2[0]
assert np.all(
t1_gt_t2_0
== np.array([True, True, True, True, True, True, True, True, True, True])
)
def test_time_boolean(self):
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
def test_timedelta(self):
dt = self.t2 - self.t1
with pytest.raises(TypeError):
self.t1 > dt # noqa: B015
dt_gt_td0 = dt > TimeDelta(0.0, format="sec")
assert np.all(
dt_gt_td0
== np.array(
[False, False, False, False, False, False, True, True, True, True]
)
)
@pytest.mark.parametrize("swap", [True, False])
@pytest.mark.parametrize("time_delta", [True, False])
def test_isclose_time(swap, time_delta):
"""Test functionality of Time.isclose() method.
Run every test with 2 args in original order and swapped, and using
Quantity or TimeDelta for atol (when provided)."""
def isclose_swap(t1, t2, **kwargs):
if swap:
t1, t2 = t2, t1
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
# Start with original demonstration from #8742. In this issue both t2 == t1
# and t3 == t1 give False, but this may change with a newer ERFA.
t1 = Time("2018-07-24T10:41:56.807015240")
t2 = t1 + 0.0 * u.s
t3 = t1 + TimeDelta(0.0 * u.s)
assert isclose_swap(t1, t2)
assert isclose_swap(t1, t3)
t2 = t1 + 1 * u.s
assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day) # Test different unit
assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day)
t2 = t1 + [-1, 0, 2] * u.s
assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False])
t2 = t1 + 3 * np.finfo(float).eps * u.day
assert not isclose_swap(t1, t2)
def test_isclose_time_exceptions():
t1 = Time("2020:001")
t2 = t1 + 1 * u.s
match = "'other' argument must support subtraction with Time"
with pytest.raises(TypeError, match=match):
t1.isclose(1.5)
match = (
"'atol' argument must be a Quantity or TimeDelta instance, got float instead"
)
with pytest.raises(TypeError, match=match):
t1.isclose(t2, 1.5)
@pytest.mark.parametrize("swap", [True, False])
@pytest.mark.parametrize("time_delta", [True, False])
@pytest.mark.parametrize("other_quantity", [True, False])
def test_isclose_timedelta(swap, time_delta, other_quantity):
"""Test functionality of TimeDelta.isclose() method.
Run every test with 2 args in original order and swapped, and using
Quantity or TimeDelta for atol (when provided), and using Quantity or
TimeDelta for the other argument."""
def isclose_swap(t1, t2, **kwargs):
if swap:
t1, t2 = t2, t1
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
def isclose_other_quantity(t1, t2, **kwargs):
if other_quantity:
t2 = t2.to(u.day)
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
t1 = TimeDelta(1.0 * u.s)
t2 = t1 + 0.0 * u.s
t3 = t1 + TimeDelta(0.0 * u.s)
assert isclose_swap(t1, t2)
assert isclose_swap(t1, t3)
assert isclose_other_quantity(t1, t2)
assert isclose_other_quantity(t1, t3)
t2 = t1 + 1 * u.s
assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day)
assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day)
assert isclose_other_quantity(t1, t2, atol=1.5 / 86400 * u.day)
assert not isclose_other_quantity(t1, t2, atol=0.5 / 86400 * u.day)
t1 = TimeDelta(0 * u.s)
t2 = t1 + [-1, 0, 2] * u.s
assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False])
assert np.all(isclose_other_quantity(t1, t2, atol=1.5 * u.s) == [True, True, False])
# Check with rtol
# 1 * 0.6 + 0.5 = 1.1 --> 1 <= 1.1 --> True
# 0 * 0.6 + 0.5 = 0.5 --> 0 <= 0.5 --> True
# 2 * 0.6 + 0.5 = 1.7 --> 2 <= 1.7 --> False
assert np.all(t1.isclose(t2, atol=0.5 * u.s, rtol=0.6) == [True, True, False])
t2 = t1 + 2 * np.finfo(float).eps * u.day
assert not isclose_swap(t1, t2)
assert not isclose_other_quantity(t1, t2)
def test_isclose_timedelta_exceptions():
t1 = TimeDelta(1 * u.s)
t2 = t1 + 1 * u.s
match = "other' argument must support conversion to days"
with pytest.raises(TypeError, match=match):
t1.isclose(1.5)
match = (
"'atol' argument must be a Quantity or TimeDelta instance, got float instead"
)
with pytest.raises(TypeError, match=match):
t1.isclose(t2, 1.5)
|
d6f5ab1c0d5353890b7e5c21af58fb659dec000a5be9c395ec4f0f9b00c00b0b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Table
from astropy.time import Time
from astropy.utils import iers
from astropy.utils.compat import PYTHON_LT_3_11
from astropy.utils.compat.optional_deps import HAS_H5PY
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
is_masked = np.ma.is_masked
# The first form is expanded to r"can't set attribute '{0}'" in Python 3.10, and replaced
# with the more informative second form as of 3.11 (python/cpython#31311).
no_setter_err = (
r"can't set attribute"
if PYTHON_LT_3_11
else r"property '{0}' of '{1}' object has no setter"
)
def test_simple():
t = Time([1, 2, 3], format="cxcsec")
assert t.masked is False
assert np.all(t.mask == [False, False, False])
# Before masking, format output is not a masked array (it is an ndarray
# like always)
assert not isinstance(t.value, np.ma.MaskedArray)
assert not isinstance(t.unix, np.ma.MaskedArray)
t[2] = np.ma.masked
assert t.masked is True
assert np.all(t.mask == [False, False, True])
assert allclose_sec(t.value[:2], [1, 2])
assert is_masked(t.value[2])
assert is_masked(t[2].value)
# After masking format output is a masked array
assert isinstance(t.value, np.ma.MaskedArray)
assert isinstance(t.unix, np.ma.MaskedArray)
# TODO : test all formats
def test_scalar_init():
t = Time("2000:001")
assert t.masked is False
assert t.mask == np.array(False)
def test_mask_not_writeable():
t = Time("2000:001")
with pytest.raises(
AttributeError, match=no_setter_err.format("mask", t.__class__.__name__)
):
t.mask = True
t = Time(["2000:001"])
with pytest.raises(ValueError) as err:
t.mask[0] = True
assert "assignment destination is read-only" in str(err.value)
def test_str():
t = Time(["2000:001", "2000:002"])
t[1] = np.ma.masked
assert str(t) == "['2000:001:00:00:00.000' --]"
assert (
repr(t)
== "<Time object: scale='utc' format='yday' value=['2000:001:00:00:00.000' --]>"
)
expected = [
"masked_array(data=['2000-01-01 00:00:00.000', --],",
" mask=[False, True],",
" fill_value='N/A',",
" dtype='<U23')",
]
# Note that we need to take care to allow for big-endian platforms,
# for which the dtype will be >U23 instead of <U23, which we do with
# the call to replace().
assert repr(t.iso).replace(">U23", "<U23").splitlines() == expected
# Assign value to unmask
t[1] = "2000:111"
assert str(t) == "['2000:001:00:00:00.000' '2000:111:00:00:00.000']"
assert t.masked is False
def test_transform():
with iers.conf.set_temp("auto_download", False):
t = Time(["2000:001", "2000:002"])
t[1] = np.ma.masked
# Change scale (this tests the ERFA machinery with masking as well)
t_ut1 = t.ut1
assert is_masked(t_ut1.value[1])
assert not is_masked(t_ut1.value[0])
assert np.all(t_ut1.mask == [False, True])
# Change format
t_unix = t.unix
assert is_masked(t_unix[1])
assert not is_masked(t_unix[0])
assert np.all(t_unix.mask == [False, True])
def test_masked_input():
v0 = np.ma.MaskedArray([[1, 2], [3, 4]]) # No masked elements
v1 = np.ma.MaskedArray([[1, 2], [3, 4]], mask=[[True, False], [False, False]])
v2 = np.ma.MaskedArray([[10, 20], [30, 40]], mask=[[False, False], [False, True]])
# Init from various combinations of masked arrays
t = Time(v0, format="cxcsec")
assert np.ma.allclose(t.value, v0)
assert np.all(t.mask == [[False, False], [False, False]])
assert t.masked is False
t = Time(v1, format="cxcsec")
assert np.ma.allclose(t.value, v1)
assert np.all(t.mask == v1.mask)
assert np.all(t.value.mask == v1.mask)
assert t.masked is True
t = Time(v1, v2, format="cxcsec")
assert np.ma.allclose(t.value, v1 + v2)
assert np.all(t.mask == (v1 + v2).mask)
assert t.masked is True
t = Time(v0, v1, format="cxcsec")
assert np.ma.allclose(t.value, v0 + v1)
assert np.all(t.mask == (v0 + v1).mask)
assert t.masked is True
t = Time(0, v2, format="cxcsec")
assert np.ma.allclose(t.value, v2)
assert np.all(t.mask == v2.mask)
assert t.masked is True
# Init from a string masked array
t_iso = t.iso
t2 = Time(t_iso)
assert np.all(t2.value == t_iso)
assert np.all(t2.mask == v2.mask)
assert t2.masked is True
def test_all_masked_input():
"""Fix for #9612"""
# Test with jd=0 and jd=np.nan. Both triggered an exception prior to #9624
# due to astropy.utils.exceptions.ErfaError.
for val in (0, np.nan):
t = Time(np.ma.masked_array([val], mask=[True]), format="jd")
assert str(t.iso) == "[--]"
def test_serialize_fits_masked(tmp_path):
tm = Time([1, 2, 3], format="cxcsec")
tm[1] = np.ma.masked
fn = tmp_path / "tempfile.fits"
t = Table([tm])
t.write(fn)
t2 = Table.read(fn, astropy_native=True)
# Time FITS handling does not current round-trip format in FITS
t2["col0"].format = tm.format
assert t2["col0"].masked
assert np.all(t2["col0"].mask == [False, True, False])
assert np.all(t2["col0"].value == t["col0"].value)
@pytest.mark.skipif(not HAS_H5PY, reason="Needs h5py")
def test_serialize_hdf5_masked(tmp_path):
tm = Time([1, 2, 3], format="cxcsec")
tm[1] = np.ma.masked
fn = tmp_path / "tempfile.hdf5"
t = Table([tm])
t.write(fn, path="root", serialize_meta=True)
t2 = Table.read(fn)
assert t2["col0"].masked
assert np.all(t2["col0"].mask == [False, True, False])
assert np.all(t2["col0"].value == t["col0"].value)
# Ignore warning in MIPS https://github.com/astropy/astropy/issues/9750
@pytest.mark.filterwarnings("ignore:invalid value encountered")
@pytest.mark.parametrize("serialize_method", ["jd1_jd2", "formatted_value"])
def test_serialize_ecsv_masked(serialize_method, tmp_path):
tm = Time([1, 2, 3], format="cxcsec")
tm[1] = np.ma.masked
tm.info.serialize_method["ecsv"] = serialize_method
fn = tmp_path / "tempfile.ecsv"
t = Table([tm])
t.write(fn)
t2 = Table.read(fn)
assert t2["col0"].masked
assert np.all(t2["col0"].mask == [False, True, False])
# Serializing formatted_value loses some precision.
atol = 0.1 * u.us if serialize_method == "formatted_value" else 1 * u.ps
assert np.all(abs(t2["col0"] - t["col0"]) <= atol)
|
d8c6057fdf371d2314345a29cf1e431c5a741227fcbea35839b608b61f1cdfb9 | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""Handles the CDS string format for units."""
import re
from astropy.units.utils import is_effectively_unity
from astropy.utils import classproperty, parsing
from astropy.utils.misc import did_you_mean
from . import core, utils
from .base import Base
class CDS(Base):
"""
Support the `Centre de Données astronomiques de Strasbourg
<https://cds.unistra.fr/>`_ `Standards for Astronomical
Catalogues 2.0 <https://vizier.unistra.fr/vizier/doc/catstd-3.2.htx>`_
format, and the `complete set of supported units
<https://vizier.unistra.fr/viz-bin/Unit>`_. This format is used
by VOTable up to version 1.2.
"""
_space = "."
_times = "x"
_scale_unit_separator = ""
_tokens = (
"PRODUCT",
"DIVISION",
"OPEN_PAREN",
"CLOSE_PAREN",
"OPEN_BRACKET",
"CLOSE_BRACKET",
"X",
"SIGN",
"UINT",
"UFLOAT",
"UNIT",
"DIMENSIONLESS",
)
@classproperty(lazy=True)
def _units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import cds
names = {}
for key, val in cds.__dict__.items():
if isinstance(val, u.UnitBase):
names[key] = val
return names
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_PRODUCT = r"\."
t_DIVISION = r"/"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
t_OPEN_BRACKET = r"\["
t_CLOSE_BRACKET = r"\]"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?"
if not re.search(r"[eE\.]", t.value):
t.type = "UINT"
t.value = int(t.value)
else:
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = float(t.value + "1")
return t
def t_X(t): # multiplication for factor in front of unit
r"[x×]"
return t
def t_UNIT(t):
r"\%|°|\\h|((?!\d)\w)+"
t.value = cls._get_unit(t)
return t
def t_DIMENSIONLESS(t):
r"---|-"
# These are separate from t_UNIT since they cannot have a prefactor.
t.value = cls._get_unit(t)
return t
t_ignore = ""
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(
lextab="cds_lextab", package="astropy/units", reflags=int(re.UNICODE)
)
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `Standards
for Astronomical Catalogues 2.0
<https://vizier.unistra.fr/vizier/doc/catstd-3.2.htx>`_, which is not
terribly precise. The exact grammar is here is based on the
YACC grammar in the `unity library <https://purl.org/nxg/dist/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
"""
main : factor combined_units
| combined_units
| DIMENSIONLESS
| OPEN_BRACKET combined_units CLOSE_BRACKET
| OPEN_BRACKET DIMENSIONLESS CLOSE_BRACKET
| factor
"""
from astropy.units import dex
from astropy.units.core import Unit
if len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = dex(p[2])
else:
p[0] = Unit(p[1])
def p_combined_units(p):
"""
combined_units : product_of_units
| division_of_units
"""
p[0] = p[1]
def p_product_of_units(p):
"""
product_of_units : unit_expression PRODUCT combined_units
| unit_expression
"""
if len(p) == 4:
p[0] = p[1] * p[3]
else:
p[0] = p[1]
def p_division_of_units(p):
"""
division_of_units : DIVISION unit_expression
| combined_units DIVISION unit_expression
"""
if len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1] / p[3]
def p_unit_expression(p):
"""
unit_expression : unit_with_power
| OPEN_PAREN combined_units CLOSE_PAREN
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_factor(p):
"""
factor : signed_float X UINT signed_int
| UINT X UINT signed_int
| UINT signed_int
| UINT
| signed_float
"""
if len(p) == 5:
if p[3] != 10:
raise ValueError("Only base ten exponents are allowed in CDS")
p[0] = p[1] * 10.0 ** p[4]
elif len(p) == 3:
if p[1] != 10:
raise ValueError("Only base ten exponents are allowed in CDS")
p[0] = 10.0 ** p[2]
elif len(p) == 2:
p[0] = p[1]
def p_unit_with_power(p):
"""
unit_with_power : UNIT numeric_power
| UNIT
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] ** p[2]
def p_numeric_power(p):
"""
numeric_power : sign UINT
"""
p[0] = p[1] * p[2]
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="cds_parsetab", package="astropy/units")
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the CDS SAC standard. {}".format(
unit, did_you_mean(unit, cls._units)
)
)
else:
raise ValueError()
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
if " " in s:
raise ValueError("CDS unit must not contain whitespace")
if not isinstance(s, str):
s = s.decode("ascii")
# This is a short circuit for the case where the string
# is just a single unit name
try:
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(str(e))
else:
raise ValueError("Syntax error")
@classmethod
def format_exponential_notation(cls, val, format_spec=".8g"):
m, ex = utils.split_mantissa_exponent(val)
parts = []
if m not in ("", "1"):
parts.append(m)
if ex:
if not ex.startswith("-"):
ex = "+" + ex
parts.append(f"10{cls._format_superscript(ex)}")
return cls._times.join(parts)
@classmethod
def _format_superscript(cls, number):
return number
@classmethod
def to_string(cls, unit, fraction=False):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if not unit.bases:
if unit.scale == 1:
return "---"
elif is_effectively_unity(unit.scale * 100.0):
return "%"
return super().to_string(unit, fraction=fraction)
|
8ec757d89766477a1db5f42b3a5a77c9449b27af3b909f3bf21f6d4f35e98693 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "VOUnit" unit format.
"""
import copy
import keyword
import re
import warnings
from . import core, generic, utils
class VOUnit(generic.Generic):
"""
The IVOA standard for units used by the VO.
This is an implementation of `Units in the VO 1.0
<http://www.ivoa.net/documents/VOUnits/>`_.
"""
_explicit_custom_unit_regex = re.compile(r"^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$")
_custom_unit_regex = re.compile(r"^((?!\d)\w)+$")
_custom_units = {}
_space = "."
_scale_unit_separator = ""
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import required_by_vounit as uvo
names = {}
deprecated_names = set()
# The tropical year is missing here compared to the standard
bases = [
"A", "a", "adu", "arcmin", "arcsec", "barn", "beam", "bin",
"C", "cd", "chan", "count", "ct", "d", "D", "deg", "erg", "eV",
"F", "g", "G", "H", "h", "Hz", "J", "Jy", "K", "lm", "lx", "lyr",
"m", "mag", "min", "mol", "N", "Ohm", "Pa", "pc", "ph", "photon",
"pix", "pixel", "R", "rad", "Ry", "s", "S", "solLum", "solMass",
"solRad", "sr", "T", "u", "V", "voxel", "W", "Wb", "yr",
] # fmt: skip
binary_bases = ["bit", "byte", "B"]
simple_units = ["Angstrom", "angstrom", "AU", "au", "Ba", "dB", "mas"]
si_prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y"
] # fmt: skip
# While zebi and yobi are part of the standard for binary prefixes,
# they are not implemented here due to computation limitations
binary_prefixes = ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei"]
deprecated_units = {
"angstrom", "Angstrom", "Ba", "barn", "erg", "G", "ta",
} # fmt: skip
def do_defines(bases, prefixes, skips=[]):
for base in bases:
for prefix in prefixes:
key = prefix + base
if key in skips:
continue
if keyword.iskeyword(key):
continue
names[key] = getattr(u if hasattr(u, key) else uvo, key)
if base in deprecated_units:
deprecated_names.add(key)
do_defines(bases, si_prefixes, ["pct", "pcount", "yd"])
do_defines(binary_bases, si_prefixes + binary_prefixes, ["dB", "dbyte"])
do_defines(simple_units, [""])
return names, deprecated_names, []
@classmethod
def parse(cls, s, debug=False):
if s in ("unknown", "UNKNOWN"):
return None
if s == "":
return core.dimensionless_unscaled
# Check for excess solidi, but exclude fractional exponents (allowed)
if s.count("/") > 1 and s.count("/") - len(re.findall(r"\(\d+/\d+\)", s)) > 1:
raise core.UnitsError(
f"'{s}' contains multiple slashes, which is "
"disallowed by the VOUnit standard."
)
result = cls._do_parse(s, debug=debug)
if hasattr(result, "function_unit"):
raise ValueError("Function units are not yet supported in VOUnit.")
return result
@classmethod
def _get_unit(cls, t):
try:
return super()._get_unit(t)
except ValueError:
if cls._explicit_custom_unit_regex.match(t.value):
return cls._def_custom_unit(t.value)
if cls._custom_unit_regex.match(t.value):
warnings.warn(
f"Unit {t.value!r} not supported by the VOUnit standard. "
+ utils.did_you_mean_units(
t.value,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
core.UnitsWarning,
)
return cls._def_custom_unit(t.value)
raise
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "VOUnit", cls._to_decomposed_alternative
)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
# The da- and d- prefixes are discouraged. This has the
# effect of adding a scale to value in the result.
if isinstance(unit, core.PrefixUnit):
if unit._represents.scale == 10.0:
raise ValueError(
f"In '{unit}': VOUnit can not represent units with the 'da' "
"(deka) prefix"
)
elif unit._represents.scale == 0.1:
raise ValueError(
f"In '{unit}': VOUnit can not represent units with the 'd' "
"(deci) prefix"
)
name = super()._get_unit_name(unit)
if unit in cls._custom_units.values():
return name
if name not in cls._units:
raise ValueError(f"Unit {name!r} is not part of the VOUnit standard")
if name in cls._deprecated_units:
utils.unit_deprecation_warning(
name, unit, "VOUnit", cls._to_decomposed_alternative
)
return name
@classmethod
def _def_custom_unit(cls, unit):
def def_base(name):
if name in cls._custom_units:
return cls._custom_units[name]
if name.startswith("'"):
return core.def_unit(
[name[1:-1], name],
format={"vounit": name},
namespace=cls._custom_units,
)
else:
return core.def_unit(name, namespace=cls._custom_units)
if unit in cls._custom_units:
return cls._custom_units[unit]
for short, full, factor in core.si_prefixes:
for prefix in short:
if unit.startswith(prefix):
base_name = unit[len(prefix) :]
base_unit = def_base(base_name)
return core.PrefixUnit(
[prefix + x for x in base_unit.names],
core.CompositeUnit(
factor, [base_unit], [1], _error_check=False
),
format={"vounit": prefix + base_unit.names[-1]},
namespace=cls._custom_units,
)
return def_base(unit)
@classmethod
def _format_superscript(cls, number):
return f"({number})" if "/" in number or "." in number else f"**{number}"
@classmethod
def format_exponential_notation(cls, val, format_spec=".8g"):
return super().format_exponential_notation(val, format_spec)
@classmethod
def to_string(cls, unit, fraction=False):
from astropy.units import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if unit.physical_type == "dimensionless" and unit.scale != 1:
raise core.UnitScaleError(
"The VOUnit format is not able to "
"represent scale for dimensionless units. "
f"Multiply your data by {unit.scale:e}."
)
return super().to_string(unit, fraction=fraction)
@classmethod
def _to_decomposed_alternative(cls, unit):
from astropy.units import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f"{cls.to_string(unit)} (with data multiplied by {scale})"
return s
|
10d62bb96deec05f8b46a1bcc0f524e04a62f6f233d0732a8aa36c016581558f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Quantity helpers for the ERFA ufuncs."""
# Tests for these are in coordinates, not in units.
from erfa import dt_eraASTROM, dt_eraLDBODY, dt_pv
from erfa import ufunc as erfa_ufunc
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from astropy.units.structured import StructuredUnit
from . import UFUNC_HELPERS
from .helpers import (
_d,
get_converter,
helper_invariant,
helper_multiplication,
helper_twoarg_invariant,
)
erfa_ufuncs = (
"s2c", "s2p", "c2s", "p2s", "pm", "pdp", "pxp", "rxp", "cpv", "p2pv", "pv2p",
"pv2s", "pvdpv", "pvm", "pvmpv", "pvppv", "pvstar", "pvtob", "pvu", "pvup",
"pvxpv", "rxpv", "s2pv", "s2xpv", "starpv", "sxpv", "trxpv", "gd2gc", "gd2gce",
"gc2gd", "gc2gde", "ldn", "aper", "apio", "atciq", "atciqn", "atciqz", "aticq",
"atioq", "atoiq",
) # fmt: skip
def has_matching_structure(unit, dtype):
dtype_fields = dtype.fields
if dtype_fields:
return (
isinstance(unit, StructuredUnit)
and len(unit) == len(dtype_fields)
and all(
has_matching_structure(u, df_v[0])
for (u, df_v) in zip(unit.values(), dtype_fields.values())
)
)
else:
return not isinstance(unit, StructuredUnit)
def check_structured_unit(unit, dtype):
if not has_matching_structure(unit, dtype):
msg = {dt_pv: "pv", dt_eraLDBODY: "ldbody", dt_eraASTROM: "astrom"}.get(
dtype, "function"
)
raise UnitTypeError(f"{msg} input needs unit matching dtype={dtype}.")
def helper_s2c(f, unit1, unit2):
from astropy.units.si import radian
try:
return [
get_converter(unit1, radian),
get_converter(unit2, radian),
], dimensionless_unscaled
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_s2p(f, unit1, unit2, unit3):
from astropy.units.si import radian
try:
return [get_converter(unit1, radian), get_converter(unit2, radian), None], unit3
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_c2s(f, unit1):
from astropy.units.si import radian
return [None], (radian, radian)
def helper_p2s(f, unit1):
from astropy.units.si import radian
return [None], (radian, radian, unit1)
def helper_gc2gd(f, nounit, unit1):
from astropy.units.si import m, radian
if nounit is not None:
raise UnitTypeError("ellipsoid cannot be a quantity.")
try:
return [None, get_converter(unit1, m)], (radian, radian, m, None)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with length units"
)
def helper_gc2gde(f, unit_r, unit_flat, unit_xyz):
from astropy.units.si import m, radian
return [
get_converter(unit_r, m),
get_converter(_d(unit_flat), dimensionless_unscaled),
get_converter(unit_xyz, m),
], (
radian,
radian,
m,
None,
)
def helper_gd2gc(f, nounit, unit1, unit2, unit3):
from astropy.units.si import m, radian
if nounit is not None:
raise UnitTypeError("ellipsoid cannot be a quantity.")
try:
return [
None,
get_converter(unit1, radian),
get_converter(unit2, radian),
get_converter(unit3, m),
], (m, None)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to lon, lat "
"with angle and height with length units"
)
def helper_gd2gce(f, unit_r, unit_flat, unit_long, unit_lat, unit_h):
from astropy.units.si import m, radian
return [
get_converter(unit_r, m),
get_converter(_d(unit_flat), dimensionless_unscaled),
get_converter(unit_long, radian),
get_converter(unit_lat, radian),
get_converter(unit_h, m),
], (m, None)
def helper_p2pv(f, unit1):
from astropy.units.si import s
if isinstance(unit1, StructuredUnit):
raise UnitTypeError("p vector unit cannot be a structured unit.")
return [None], StructuredUnit((unit1, unit1 / s))
def helper_pv2p(f, unit1):
check_structured_unit(unit1, dt_pv)
return [None], unit1[0]
def helper_pv2s(f, unit_pv):
from astropy.units.si import radian
check_structured_unit(unit_pv, dt_pv)
ang_unit = radian * unit_pv[1] / unit_pv[0]
return [None], (radian, radian, unit_pv[0], ang_unit, ang_unit, unit_pv[1])
def helper_s2pv(f, unit_theta, unit_phi, unit_r, unit_td, unit_pd, unit_rd):
from astropy.units.si import radian
time_unit = unit_r / unit_rd
return [
get_converter(unit_theta, radian),
get_converter(unit_phi, radian),
None,
get_converter(unit_td, radian / time_unit),
get_converter(unit_pd, radian / time_unit),
None,
], StructuredUnit((unit_r, unit_rd))
def helper_pv_multiplication(f, unit1, unit2):
check_structured_unit(unit1, dt_pv)
check_structured_unit(unit2, dt_pv)
result_unit = StructuredUnit((unit1[0] * unit2[0], unit1[1] * unit2[0]))
converter = get_converter(
unit2, StructuredUnit((unit2[0], unit1[1] * unit2[0] / unit1[0]))
)
return [None, converter], result_unit
def helper_pvm(f, unit1):
check_structured_unit(unit1, dt_pv)
return [None], (unit1[0], unit1[1])
def helper_pvstar(f, unit1):
from astropy.units.astrophys import AU
from astropy.units.si import arcsec, day, km, radian, s, year
return [get_converter(unit1, StructuredUnit((AU, AU / day)))], (
radian,
radian,
radian / year,
radian / year,
arcsec,
km / s,
None,
)
def helper_starpv(f, unit_ra, unit_dec, unit_pmr, unit_pmd, unit_px, unit_rv):
from astropy.units.astrophys import AU
from astropy.units.si import arcsec, day, km, radian, s, year
return [
get_converter(unit_ra, radian),
get_converter(unit_dec, radian),
get_converter(unit_pmr, radian / year),
get_converter(unit_pmd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
], (StructuredUnit((AU, AU / day)), None)
def helper_pvtob(
f, unit_elong, unit_phi, unit_hm, unit_xp, unit_yp, unit_sp, unit_theta
):
from astropy.units.si import m, radian, s
return [
get_converter(unit_elong, radian),
get_converter(unit_phi, radian),
get_converter(unit_hm, m),
get_converter(unit_xp, radian),
get_converter(unit_yp, radian),
get_converter(unit_sp, radian),
get_converter(unit_theta, radian),
], StructuredUnit((m, m / s))
def helper_pvu(f, unit_t, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [get_converter(unit_t, unit_pv[0] / unit_pv[1]), None], unit_pv
def helper_pvup(f, unit_t, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [get_converter(unit_t, unit_pv[0] / unit_pv[1]), None], unit_pv[0]
def helper_s2xpv(f, unit1, unit2, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [None, None, None], StructuredUnit(
(_d(unit1) * unit_pv[0], _d(unit2) * unit_pv[1])
)
def ldbody_unit():
from astropy.units.astrophys import AU, Msun
from astropy.units.si import day, radian
return StructuredUnit((Msun, radian, (AU, AU / day)), erfa_ufunc.dt_eraLDBODY)
def astrom_unit():
from astropy.units.astrophys import AU
from astropy.units.si import rad, year
one = rel2c = dimensionless_unscaled
return StructuredUnit(
(
year,
AU,
one,
AU,
rel2c,
one,
one,
rad,
rad,
rad,
rad,
one,
one,
rel2c,
rad,
rad,
rad,
),
erfa_ufunc.dt_eraASTROM,
)
def helper_ldn(f, unit_b, unit_ob, unit_sc):
from astropy.units.astrophys import AU
return [
get_converter(unit_b, ldbody_unit()),
get_converter(unit_ob, AU),
get_converter(_d(unit_sc), dimensionless_unscaled),
], dimensionless_unscaled
def helper_aper(f, unit_theta, unit_astrom):
check_structured_unit(unit_astrom, dt_eraASTROM)
unit_along = unit_astrom[7] # along
if unit_astrom[14] is unit_along: # eral
result_unit = unit_astrom
else:
result_units = tuple(
(unit_along if i == 14 else v) for i, v in enumerate(unit_astrom.values())
)
result_unit = unit_astrom.__class__(result_units, names=unit_astrom)
return [get_converter(unit_theta, unit_along), None], result_unit
def helper_apio(
f,
unit_sp,
unit_theta,
unit_elong,
unit_phi,
unit_hm,
unit_xp,
unit_yp,
unit_refa,
unit_refb,
):
from astropy.units.si import m, radian
return [
get_converter(unit_sp, radian),
get_converter(unit_theta, radian),
get_converter(unit_elong, radian),
get_converter(unit_phi, radian),
get_converter(unit_hm, m),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
], astrom_unit()
def helper_atciq(f, unit_rc, unit_dc, unit_pr, unit_pd, unit_px, unit_rv, unit_astrom):
from astropy.units.si import arcsec, km, radian, s, year
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_pr, radian / year),
get_converter(unit_pd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
get_converter(unit_astrom, astrom_unit()),
], (radian, radian)
def helper_atciqn(
f, unit_rc, unit_dc, unit_pr, unit_pd, unit_px, unit_rv, unit_astrom, unit_b
):
from astropy.units.si import arcsec, km, radian, s, year
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_pr, radian / year),
get_converter(unit_pd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
get_converter(unit_astrom, astrom_unit()),
get_converter(unit_b, ldbody_unit()),
], (radian, radian)
def helper_atciqz_aticq(f, unit_rc, unit_dc, unit_astrom):
from astropy.units.si import radian
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit()),
], (radian, radian)
def helper_aticqn(f, unit_rc, unit_dc, unit_astrom, unit_b):
from astropy.units.si import radian
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit()),
get_converter(unit_b, ldbody_unit()),
], (radian, radian)
def helper_atioq(f, unit_rc, unit_dc, unit_astrom):
from astropy.units.si import radian
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit()),
], (radian,) * 5
def helper_atoiq(f, unit_type, unit_ri, unit_di, unit_astrom):
from astropy.units.si import radian
if unit_type is not None:
raise UnitTypeError("argument 'type' should not have a unit")
return [
None,
get_converter(unit_ri, radian),
get_converter(unit_di, radian),
get_converter(unit_astrom, astrom_unit()),
], (radian, radian)
def get_erfa_helpers():
ERFA_HELPERS = {}
ERFA_HELPERS[erfa_ufunc.s2c] = helper_s2c
ERFA_HELPERS[erfa_ufunc.s2p] = helper_s2p
ERFA_HELPERS[erfa_ufunc.c2s] = helper_c2s
ERFA_HELPERS[erfa_ufunc.p2s] = helper_p2s
ERFA_HELPERS[erfa_ufunc.pm] = helper_invariant
ERFA_HELPERS[erfa_ufunc.cpv] = helper_invariant
ERFA_HELPERS[erfa_ufunc.p2pv] = helper_p2pv
ERFA_HELPERS[erfa_ufunc.pv2p] = helper_pv2p
ERFA_HELPERS[erfa_ufunc.pv2s] = helper_pv2s
ERFA_HELPERS[erfa_ufunc.pvdpv] = helper_pv_multiplication
ERFA_HELPERS[erfa_ufunc.pvxpv] = helper_pv_multiplication
ERFA_HELPERS[erfa_ufunc.pvm] = helper_pvm
ERFA_HELPERS[erfa_ufunc.pvmpv] = helper_twoarg_invariant
ERFA_HELPERS[erfa_ufunc.pvppv] = helper_twoarg_invariant
ERFA_HELPERS[erfa_ufunc.pvstar] = helper_pvstar
ERFA_HELPERS[erfa_ufunc.pvtob] = helper_pvtob
ERFA_HELPERS[erfa_ufunc.pvu] = helper_pvu
ERFA_HELPERS[erfa_ufunc.pvup] = helper_pvup
ERFA_HELPERS[erfa_ufunc.pdp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.pxp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.rxp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.rxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.s2pv] = helper_s2pv
ERFA_HELPERS[erfa_ufunc.s2xpv] = helper_s2xpv
ERFA_HELPERS[erfa_ufunc.starpv] = helper_starpv
ERFA_HELPERS[erfa_ufunc.sxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.trxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.gc2gd] = helper_gc2gd
ERFA_HELPERS[erfa_ufunc.gc2gde] = helper_gc2gde
ERFA_HELPERS[erfa_ufunc.gd2gc] = helper_gd2gc
ERFA_HELPERS[erfa_ufunc.gd2gce] = helper_gd2gce
ERFA_HELPERS[erfa_ufunc.ldn] = helper_ldn
ERFA_HELPERS[erfa_ufunc.aper] = helper_aper
ERFA_HELPERS[erfa_ufunc.apio] = helper_apio
ERFA_HELPERS[erfa_ufunc.atciq] = helper_atciq
ERFA_HELPERS[erfa_ufunc.atciqn] = helper_atciqn
ERFA_HELPERS[erfa_ufunc.atciqz] = helper_atciqz_aticq
ERFA_HELPERS[erfa_ufunc.aticq] = helper_atciqz_aticq
ERFA_HELPERS[erfa_ufunc.aticqn] = helper_aticqn
ERFA_HELPERS[erfa_ufunc.atioq] = helper_atioq
ERFA_HELPERS[erfa_ufunc.atoiq] = helper_atoiq
return ERFA_HELPERS
UFUNC_HELPERS.register_module("erfa.ufunc", erfa_ufuncs, get_erfa_helpers)
|
e56c54fce00c50d9b7953599344c2bde8820a95fba7a7689d61d7ba390c4ebdc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Separate tests specifically for equivalencies."""
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_allclose
# LOCAL
from astropy import constants
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units.equivalencies import Equivalency
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_dimensionless_angles():
# test that the angles_dimensionless option allows one to change
# by any order in radian in the unit (#1161)
rad1 = u.dimensionless_angles()
assert u.radian.to(1, equivalencies=rad1) == 1.0
assert u.deg.to(1, equivalencies=rad1) == u.deg.to(u.rad)
assert u.steradian.to(1, equivalencies=rad1) == 1.0
assert u.dimensionless_unscaled.to(u.steradian, equivalencies=rad1) == 1.0
# now quantities
assert (1.0 * u.radian).to_value(1, equivalencies=rad1) == 1.0
assert (1.0 * u.deg).to_value(1, equivalencies=rad1) == u.deg.to(u.rad)
assert (1.0 * u.steradian).to_value(1, equivalencies=rad1) == 1.0
# more complicated example
I = 1.0e45 * u.g * u.cm**2
Omega = u.cycle / (1.0 * u.s)
Erot = 0.5 * I * Omega**2
# check that equivalency makes this work
Erot_in_erg1 = Erot.to(u.erg, equivalencies=rad1)
# and check that value is correct
assert_allclose(Erot_in_erg1.value, (Erot / u.radian**2).to_value(u.erg))
# test built-in equivalency in subclass
class MyRad1(u.Quantity):
_equivalencies = rad1
phase = MyRad1(1.0, u.cycle)
assert phase.to_value(1) == u.cycle.to(u.radian)
@pytest.mark.parametrize("log_unit", (u.mag, u.dex, u.dB))
def test_logarithmic(log_unit):
# check conversion of mag, dB, and dex to dimensionless and vice versa
with pytest.raises(u.UnitsError):
log_unit.to(1, 0.0)
with pytest.raises(u.UnitsError):
u.dimensionless_unscaled.to(log_unit)
assert log_unit.to(1, 0.0, equivalencies=u.logarithmic()) == 1.0
assert u.dimensionless_unscaled.to(log_unit, equivalencies=u.logarithmic()) == 0.0
# also try with quantities
q_dex = np.array([0.0, -1.0, 1.0, 2.0]) * u.dex
q_expected = 10.0**q_dex.value * u.dimensionless_unscaled
q_log_unit = q_dex.to(log_unit)
assert np.all(q_log_unit.to(1, equivalencies=u.logarithmic()) == q_expected)
assert np.all(q_expected.to(log_unit, equivalencies=u.logarithmic()) == q_log_unit)
with u.set_enabled_equivalencies(u.logarithmic()):
assert np.all(np.abs(q_log_unit - q_expected.to(log_unit)) < 1.0e-10 * log_unit)
doppler_functions = [u.doppler_optical, u.doppler_radio, u.doppler_relativistic]
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_frequency_0(function):
rest = 105.01 * u.GHz
velo0 = rest.to(u.km / u.s, equivalencies=function(rest))
assert velo0.value == 0
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_wavelength_0(function):
rest = 105.01 * u.GHz
q1 = 0.00285489437196 * u.m
velo0 = q1.to(u.km / u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_energy_0(function):
rest = 105.01 * u.GHz
q1 = 0.0004342864648539744 * u.eV
velo0 = q1.to(u.km / u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_frequency_circle(function):
rest = 105.01 * u.GHz
shifted = 105.03 * u.GHz
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
freq = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(freq.value, shifted.value, decimal=7)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_wavelength_circle(function):
rest = 105.01 * u.nm
shifted = 105.03 * u.nm
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
wav = velo.to(u.nm, equivalencies=function(rest))
np.testing.assert_almost_equal(wav.value, shifted.value, decimal=7)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_energy_circle(function):
rest = 1.0501 * u.eV
shifted = 1.0503 * u.eV
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
en = velo.to(u.eV, equivalencies=function(rest))
np.testing.assert_almost_equal(en.value, shifted.value, decimal=7)
values_ghz = (999.899940784289, 999.8999307714406, 999.8999357778647)
@pytest.mark.parametrize(
("function", "value"), list(zip(doppler_functions, values_ghz))
)
def test_30kms(function, value):
rest = 1000 * u.GHz
velo = 30 * u.km / u.s
shifted = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(shifted.value, value, decimal=7)
bad_values = (5, 5 * u.Jy, None)
@pytest.mark.parametrize(
("function", "value"), list(zip(doppler_functions, bad_values))
)
def test_bad_restfreqs(function, value):
with pytest.raises(u.UnitsError):
function(value)
@pytest.mark.parametrize(
("z", "rv_ans"),
[
(0, 0 * (u.km / u.s)),
(0.001, 299642.56184583 * (u.m / u.s)),
(-1, -2.99792458e8 * (u.m / u.s)),
],
)
def test_doppler_redshift(z, rv_ans):
z_in = z * u.dimensionless_unscaled
rv_out = z_in.to(u.km / u.s, u.doppler_redshift())
z_out = rv_out.to(u.dimensionless_unscaled, u.doppler_redshift())
assert_quantity_allclose(rv_out, rv_ans)
assert_quantity_allclose(z_out, z_in) # Check roundtrip
def test_doppler_redshift_no_cosmology():
from astropy.cosmology.units import redshift
with pytest.raises(u.UnitConversionError, match="not convertible"):
(0 * (u.km / u.s)).to(redshift, u.doppler_redshift())
def test_massenergy():
# The relative tolerance of these tests is set by the uncertainties
# in the charge of the electron, which is known to about
# 3e-9 (relative tolerance). Therefore, we limit the
# precision of the tests to 1e-7 to be safe. The masses are
# (loosely) known to ~ 5e-8 rel tolerance, so we couldn't test to
# 1e-7 if we used the values from astropy.constants; that is,
# they might change by more than 1e-7 in some future update, so instead
# they are hardwired here.
# Electron, proton, neutron, muon, 1g
mass_eV = u.Quantity(
[510.998928e3, 938.272046e6, 939.565378e6, 105.6583715e6, 5.60958884539e32],
u.eV,
)
mass_g = u.Quantity(
[9.10938291e-28, 1.672621777e-24, 1.674927351e-24, 1.88353147e-25, 1], u.g
)
# Test both ways
assert np.allclose(
mass_eV.to_value(u.g, equivalencies=u.mass_energy()), mass_g.value, rtol=1e-7
)
assert np.allclose(
mass_g.to_value(u.eV, equivalencies=u.mass_energy()), mass_eV.value, rtol=1e-7
)
# Basic tests of 'derived' equivalencies
# Surface density
sdens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**2)
sdens_g = u.Quantity(1e-4, u.g / u.cm**2)
assert np.allclose(
sdens_eV.to_value(u.g / u.cm**2, equivalencies=u.mass_energy()),
sdens_g.value,
rtol=1e-7,
)
assert np.allclose(
sdens_g.to_value(u.eV / u.m**2, equivalencies=u.mass_energy()),
sdens_eV.value,
rtol=1e-7,
)
# Density
dens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**3)
dens_g = u.Quantity(1e-6, u.g / u.cm**3)
assert np.allclose(
dens_eV.to_value(u.g / u.cm**3, equivalencies=u.mass_energy()),
dens_g.value,
rtol=1e-7,
)
assert np.allclose(
dens_g.to_value(u.eV / u.m**3, equivalencies=u.mass_energy()),
dens_eV.value,
rtol=1e-7,
)
# Power
pow_eV = u.Quantity(5.60958884539e32, u.eV / u.s)
pow_g = u.Quantity(1, u.g / u.s)
assert np.allclose(
pow_eV.to_value(u.g / u.s, equivalencies=u.mass_energy()),
pow_g.value,
rtol=1e-7,
)
assert np.allclose(
pow_g.to_value(u.eV / u.s, equivalencies=u.mass_energy()),
pow_eV.value,
rtol=1e-7,
)
def test_is_equivalent():
assert u.m.is_equivalent(u.pc)
assert u.cycle.is_equivalent(u.mas)
assert not u.cycle.is_equivalent(u.dimensionless_unscaled)
assert u.cycle.is_equivalent(u.dimensionless_unscaled, u.dimensionless_angles())
assert not (u.Hz.is_equivalent(u.J))
assert u.Hz.is_equivalent(u.J, u.spectral())
assert u.J.is_equivalent(u.Hz, u.spectral())
assert u.pc.is_equivalent(u.arcsecond, u.parallax())
assert u.arcminute.is_equivalent(u.au, u.parallax())
# Pass a tuple for multiple possibilities
assert u.cm.is_equivalent((u.m, u.s, u.kg))
assert u.ms.is_equivalent((u.m, u.s, u.kg))
assert u.g.is_equivalent((u.m, u.s, u.kg))
assert not u.L.is_equivalent((u.m, u.s, u.kg))
assert not (u.km / u.s).is_equivalent((u.m, u.s, u.kg))
def test_parallax():
a = u.arcsecond.to(u.pc, 10, u.parallax())
assert_allclose(a, 0.10, rtol=1.0e-12)
b = u.pc.to(u.arcsecond, a, u.parallax())
assert_allclose(b, 10, rtol=1.0e-12)
a = u.arcminute.to(u.au, 1, u.parallax())
assert_allclose(a, 3437.746770785, rtol=1.0e-12)
b = u.au.to(u.arcminute, a, u.parallax())
assert_allclose(b, 1, rtol=1.0e-12)
val = (-1 * u.mas).to(u.pc, u.parallax())
assert np.isnan(val.value)
val = (-1 * u.mas).to_value(u.pc, u.parallax())
assert np.isnan(val)
def test_parallax2():
a = u.arcsecond.to(u.pc, [0.1, 2.5], u.parallax())
assert_allclose(a, [10, 0.4], rtol=1.0e-12)
def test_spectral():
a = u.AA.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e18)
b = u.Hz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.AA.to(u.MHz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e12)
b = u.MHz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.m.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e8)
b = u.Hz.to(u.m, a, u.spectral())
assert_allclose(b, 1)
def test_spectral2():
a = u.nm.to(u.J, 500, u.spectral())
assert_allclose(a, 3.972891366538605e-19)
b = u.J.to(u.nm, a, u.spectral())
assert_allclose(b, 500)
a = u.AA.to(u.Hz, 1, u.spectral())
b = u.Hz.to(u.J, a, u.spectral())
c = u.AA.to(u.J, 1, u.spectral())
assert_allclose(b, c)
c = u.J.to(u.Hz, b, u.spectral())
assert_allclose(a, c)
def test_spectral3():
a = u.nm.to(u.Hz, [1000, 2000], u.spectral())
assert_allclose(a, [2.99792458e14, 1.49896229e14])
@pytest.mark.parametrize(
("in_val", "in_unit"),
[
([0.1, 5000.0, 10000.0], u.AA),
([1e5, 2.0, 1.0], u.micron**-1),
([2.99792458e19, 5.99584916e14, 2.99792458e14], u.Hz),
([1.98644568e-14, 3.97289137e-19, 1.98644568e-19], u.J),
],
)
def test_spectral4(in_val, in_unit):
"""Wave number conversion w.r.t. wavelength, freq, and energy."""
# Spectroscopic and angular
out_units = [u.micron**-1, u.radian / u.micron]
answers = [[1e5, 2.0, 1.0], [6.28318531e05, 12.5663706, 6.28318531]]
for out_unit, ans in zip(out_units, answers):
# Forward
a = in_unit.to(out_unit, in_val, u.spectral())
assert_allclose(a, ans)
# Backward
b = out_unit.to(in_unit, ans, u.spectral())
assert_allclose(b, in_val)
@pytest.mark.parametrize(
"wav", (3500 * u.AA, 8.5654988e14 * u.Hz, 1 / (3500 * u.AA), 5.67555959e-19 * u.J)
)
def test_spectraldensity2(wav):
# flux density
flambda = u.erg / u.angstrom / u.cm**2 / u.s
fnu = u.erg / u.Hz / u.cm**2 / u.s
a = flambda.to(fnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
# integrated flux
f_int = u.erg / u.cm**2 / u.s
phot_int = u.ph / u.cm**2 / u.s
a = f_int.to(phot_int, 1, u.spectral_density(wav))
assert_allclose(a, 1.7619408e11)
a = phot_int.to(f_int, 1, u.spectral_density(wav))
assert_allclose(a, 5.67555959e-12)
# luminosity density
llambda = u.erg / u.angstrom / u.s
lnu = u.erg / u.Hz / u.s
a = llambda.to(lnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
a = lnu.to(llambda, 1, u.spectral_density(wav))
assert_allclose(a, 2.44728537142857e11)
def test_spectraldensity3():
# Define F_nu in Jy
f_nu = u.Jy
# Define F_lambda in ergs / cm^2 / s / micron
f_lambda = u.erg / u.cm**2 / u.s / u.micron
# 1 GHz
one_ghz = u.Quantity(1, u.GHz)
# Convert to ergs / cm^2 / s / Hz
assert_allclose(f_nu.to(u.erg / u.cm**2 / u.s / u.Hz, 1.0), 1.0e-23, 10)
# Convert to ergs / cm^2 / s at 10 Ghz
assert_allclose(
f_nu.to(
u.erg / u.cm**2 / u.s, 1.0, equivalencies=u.spectral_density(one_ghz * 10)
),
1.0e-13,
)
# Convert to F_lambda at 1 Ghz
assert_allclose(
f_nu.to(f_lambda, 1.0, equivalencies=u.spectral_density(one_ghz)),
3.335640951981521e-20,
)
# Convert to Jy at 1 Ghz
assert_allclose(
f_lambda.to(u.Jy, 1.0, equivalencies=u.spectral_density(one_ghz)),
1.0 / 3.335640951981521e-20,
)
# Convert to ergs / cm^2 / s at 10 microns
assert_allclose(
f_lambda.to(
u.erg / u.cm**2 / u.s,
1.0,
equivalencies=u.spectral_density(u.Quantity(10, u.micron)),
),
10.0,
)
def test_spectraldensity4():
"""PHOTLAM and PHOTNU conversions."""
flam = u.erg / (u.cm**2 * u.s * u.AA)
fnu = u.erg / (u.cm**2 * u.s * u.Hz)
photlam = u.photon / (u.cm**2 * u.s * u.AA)
photnu = u.photon / (u.cm**2 * u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_photlam = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_photnu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
flux_jy = [3.20735792e-2, 3.29903646e-2, 3.21727226e-2]
flux_stmag = [12.41858665, 12.38919182, 12.41764379]
flux_abmag = [12.63463143, 12.60403221, 12.63128047]
# PHOTLAM <--> FLAM
assert_allclose(
photlam.to(flam, flux_photlam, u.spectral_density(wave)), flux_flam, rtol=1e-6
)
assert_allclose(
flam.to(photlam, flux_flam, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> FNU
assert_allclose(
photlam.to(fnu, flux_photlam, u.spectral_density(wave)), flux_fnu, rtol=1e-6
)
assert_allclose(
fnu.to(photlam, flux_fnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> Jy
assert_allclose(
photlam.to(u.Jy, flux_photlam, u.spectral_density(wave)), flux_jy, rtol=1e-6
)
assert_allclose(
u.Jy.to(photlam, flux_jy, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> PHOTNU
assert_allclose(
photlam.to(photnu, flux_photlam, u.spectral_density(wave)),
flux_photnu,
rtol=1e-6,
)
assert_allclose(
photnu.to(photlam, flux_photnu, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
# PHOTNU <--> FNU
assert_allclose(
photnu.to(fnu, flux_photnu, u.spectral_density(wave)), flux_fnu, rtol=1e-6
)
assert_allclose(
fnu.to(photnu, flux_fnu, u.spectral_density(wave)), flux_photnu, rtol=1e-6
)
# PHOTNU <--> FLAM
assert_allclose(
photnu.to(flam, flux_photnu, u.spectral_density(wave)), flux_flam, rtol=1e-6
)
assert_allclose(
flam.to(photnu, flux_flam, u.spectral_density(wave)), flux_photnu, rtol=1e-6
)
# PHOTLAM <--> STMAG
assert_allclose(
photlam.to(u.STmag, flux_photlam, u.spectral_density(wave)),
flux_stmag,
rtol=1e-6,
)
assert_allclose(
u.STmag.to(photlam, flux_stmag, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
# PHOTLAM <--> ABMAG
assert_allclose(
photlam.to(u.ABmag, flux_photlam, u.spectral_density(wave)),
flux_abmag,
rtol=1e-6,
)
assert_allclose(
u.ABmag.to(photlam, flux_abmag, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
def test_spectraldensity5():
"""Test photon luminosity density conversions."""
L_la = u.erg / (u.s * u.AA)
L_nu = u.erg / (u.s * u.Hz)
phot_L_la = u.photon / (u.s * u.AA)
phot_L_nu = u.photon / (u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_phot_L_la = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_phot_L_nu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_L_la = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_L_nu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# PHOTLAM <--> FLAM
assert_allclose(
phot_L_la.to(L_la, flux_phot_L_la, u.spectral_density(wave)),
flux_L_la,
rtol=1e-6,
)
assert_allclose(
L_la.to(phot_L_la, flux_L_la, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTLAM <--> FNU
assert_allclose(
phot_L_la.to(L_nu, flux_phot_L_la, u.spectral_density(wave)),
flux_L_nu,
rtol=1e-6,
)
assert_allclose(
L_nu.to(phot_L_la, flux_L_nu, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTLAM <--> PHOTNU
assert_allclose(
phot_L_la.to(phot_L_nu, flux_phot_L_la, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
assert_allclose(
phot_L_nu.to(phot_L_la, flux_phot_L_nu, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTNU <--> FNU
assert_allclose(
phot_L_nu.to(L_nu, flux_phot_L_nu, u.spectral_density(wave)),
flux_L_nu,
rtol=1e-6,
)
assert_allclose(
L_nu.to(phot_L_nu, flux_L_nu, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
# PHOTNU <--> FLAM
assert_allclose(
phot_L_nu.to(L_la, flux_phot_L_nu, u.spectral_density(wave)),
flux_L_la,
rtol=1e-6,
)
assert_allclose(
L_la.to(phot_L_nu, flux_L_la, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
def test_spectraldensity6():
"""Test surface brightness conversions."""
slam = u.erg / (u.cm**2 * u.s * u.AA * u.sr)
snu = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
sb_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
sb_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# S(nu) <--> S(lambda)
assert_allclose(snu.to(slam, sb_fnu, u.spectral_density(wave)), sb_flam, rtol=1e-6)
assert_allclose(slam.to(snu, sb_flam, u.spectral_density(wave)), sb_fnu, rtol=1e-6)
@pytest.mark.parametrize(
("from_unit", "to_unit"),
[
(u.ph / u.cm**2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.ph / u.cm**2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV)),
(u.erg / u.cm**2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.erg / u.cm**2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV)),
],
)
def test_spectraldensity_not_allowed(from_unit, to_unit):
"""Not allowed to succeed as
per https://github.com/astropy/astropy/pull/10015
"""
with pytest.raises(u.UnitConversionError, match="not convertible"):
from_unit.to(to_unit, 1, u.spectral_density(1 * u.AA))
# The other way
with pytest.raises(u.UnitConversionError, match="not convertible"):
to_unit.to(from_unit, 1, u.spectral_density(1 * u.AA))
def test_equivalent_units():
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = u.g.find_equivalent_units()
units_set = set(units)
match = {
u.M_e, u.M_p, u.g, u.kg, u.solMass, u.t, u.u, u.M_earth,
u.M_jup, imperial.oz, imperial.lb, imperial.st, imperial.ton,
imperial.slug,
} # fmt: skip
assert units_set == match
r = repr(units)
assert r.count("\n") == len(units) + 2
def test_equivalent_units2():
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad,
} # fmt: skip
assert units == match
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, imperial.BTU, u.Hz, u.J, u.Ry,
imperial.cal, u.cm, u.eV, u.erg, imperial.ft, imperial.fur,
imperial.inch, imperial.kcal, u.lyr, u.m, imperial.mi, u.lsec,
imperial.mil, u.micron, u.pc, u.solRad, imperial.yd, u.Bq, u.Ci,
imperial.nmi, u.k, u.earthRad, u.jupiterRad,
} # fmt: skip
assert units == match
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad,
} # fmt: skip
assert units == match
def test_trivial_equivalency():
assert u.m.to(u.kg, equivalencies=[(u.m, u.kg)]) == 1.0
def test_invalid_equivalency():
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m,)])
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m, 5.0)])
def test_irrelevant_equivalency():
with pytest.raises(u.UnitsError):
u.m.to(u.kg, equivalencies=[(u.m, u.l)])
def test_brightness_temperature():
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
np.testing.assert_almost_equal(
tb.value,
(1 * u.Jy).to_value(
u.K, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)
),
)
np.testing.assert_almost_equal(
1.0,
tb.to_value(
u.Jy, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)
),
)
def test_swapped_args_brightness_temperature():
"""
#5173 changes the order of arguments but accepts the old (deprecated) args
"""
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
with pytest.warns(AstropyDeprecationWarning) as w:
result = (1 * u.Jy).to(u.K, equivalencies=u.brightness_temperature(omega_B, nu))
roundtrip = result.to(u.Jy, equivalencies=u.brightness_temperature(omega_B, nu))
assert len(w) == 2
np.testing.assert_almost_equal(tb.value, result.value)
np.testing.assert_almost_equal(roundtrip.value, 1)
def test_surfacebrightness():
sb = 50 * u.MJy / u.sr
k = sb.to(u.K, u.brightness_temperature(50 * u.GHz))
np.testing.assert_almost_equal(k.value, 0.650965, 5)
assert k.unit.is_equivalent(u.K)
def test_beam():
# pick a beam area: 2 pi r^2 = area of a Gaussina with sigma=50 arcsec
omega_B = 2 * np.pi * (50 * u.arcsec) ** 2
new_beam = (5 * u.beam).to(u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(omega_B.to(u.sr).value * 5, new_beam.value)
assert new_beam.unit.is_equivalent(u.sr)
# make sure that it's still consistent with 5 beams
nbeams = new_beam.to(u.beam, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(nbeams.value, 5)
# test inverse beam equivalency
# (this is just a sanity check that the equivalency is defined;
# it's not for testing numerical consistency)
(5 / u.beam).to(1 / u.sr, u.equivalencies.beam_angular_area(omega_B))
# test practical case
# (this is by far the most important one)
flux_density = (5 * u.Jy / u.beam).to(
u.MJy / u.sr, u.equivalencies.beam_angular_area(omega_B)
)
np.testing.assert_almost_equal(flux_density.value, 13.5425483146382)
def test_thermodynamic_temperature():
nu = 143 * u.GHz
tb = 0.0026320501262630277 * u.K
eq = u.thermodynamic_temperature(nu, T_cmb=2.7255 * u.K)
np.testing.assert_almost_equal(
tb.value, (1 * (u.MJy / u.sr)).to_value(u.K, equivalencies=eq)
)
np.testing.assert_almost_equal(1.0, tb.to_value(u.MJy / u.sr, equivalencies=eq))
def test_equivalency_context():
with u.set_enabled_equivalencies(u.dimensionless_angles()):
phase = u.Quantity(1.0, u.cycle)
assert_allclose(np.exp(1j * phase), 1.0)
Omega = u.cycle / (1.0 * u.minute)
assert_allclose(np.exp(1j * Omega * 60.0 * u.second), 1.0)
# ensure we can turn off equivalencies even within the scope
with pytest.raises(u.UnitsError):
phase.to(1, equivalencies=None)
# test the manager also works in the Quantity constructor.
q1 = u.Quantity(phase, u.dimensionless_unscaled)
assert_allclose(q1.value, u.cycle.to(u.radian))
# and also if we use a class that happens to have a unit attribute.
class MyQuantityLookalike(np.ndarray):
pass
mylookalike = np.array(1.0).view(MyQuantityLookalike)
mylookalike.unit = "cycle"
# test the manager also works in the Quantity constructor.
q2 = u.Quantity(mylookalike, u.dimensionless_unscaled)
assert_allclose(q2.value, u.cycle.to(u.radian))
with u.set_enabled_equivalencies(u.spectral()):
u.GHz.to(u.cm)
eq_on = u.GHz.find_equivalent_units()
with pytest.raises(u.UnitsError):
u.GHz.to(u.cm, equivalencies=None)
# without equivalencies, we should find a smaller (sub)set
eq_off = u.GHz.find_equivalent_units()
assert all(eq in set(eq_on) for eq in eq_off)
assert set(eq_off) < set(eq_on)
# Check the equivalency manager also works in ufunc evaluations,
# not just using (wrong) scaling. [#2496]
l2v = u.doppler_optical(6000 * u.angstrom)
l1 = 6010 * u.angstrom
assert l1.to(u.km / u.s, equivalencies=l2v) > 100.0 * u.km / u.s
with u.set_enabled_equivalencies(l2v):
assert l1 > 100.0 * u.km / u.s
assert abs((l1 - 500.0 * u.km / u.s).to(u.angstrom)) < 1.0 * u.km / u.s
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
def just_to_from_units(equivalencies):
return [(equiv[0], equiv[1]) for equiv in equivalencies]
tf_dimensionless_angles = just_to_from_units(u.dimensionless_angles())
tf_spectral = just_to_from_units(u.spectral())
# <=1 b/c might have the dimensionless_redshift equivalency enabled.
assert len(base_registry.equivalencies) <= 1
with u.set_enabled_equivalencies(u.dimensionless_angles()):
new_registry = u.get_current_unit_registry()
assert set(just_to_from_units(new_registry.equivalencies)) == set(
tf_dimensionless_angles
)
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.set_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert set(just_to_from_units(newer_registry.equivalencies)) == set(
tf_spectral
)
assert set(newer_registry.all_units) == set(base_registry.all_units)
assert set(just_to_from_units(new_registry.equivalencies)) == set(
tf_dimensionless_angles
)
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.add_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert set(just_to_from_units(newer_registry.equivalencies)) == set(
tf_dimensionless_angles
) | set(tf_spectral)
assert set(newer_registry.all_units) == set(base_registry.all_units)
assert base_registry is u.get_current_unit_registry()
def test_temperature():
from astropy.units.imperial import deg_F, deg_R
t_k = 0 * u.K
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -273.15)
assert_allclose(t_k.to_value(deg_F, u.temperature()), -459.67)
t_k = 20 * u.K
assert_allclose(t_k.to_value(deg_R, u.temperature()), 36.0)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.K, u.temperature()), 11.11, atol=0.01)
t_k = 20 * deg_F
assert_allclose(t_k.to_value(deg_R, u.temperature()), 479.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(deg_F, u.temperature()), -439.67)
t_k = 20 * u.deg_C
assert_allclose(t_k.to_value(deg_R, u.temperature()), 527.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -262.039, atol=0.01)
def test_temperature_energy():
x = 1000 * u.K
y = (x * constants.k_B).to(u.keV)
assert_allclose(x.to_value(u.keV, u.temperature_energy()), y.value)
assert_allclose(y.to_value(u.K, u.temperature_energy()), x.value)
def test_molar_mass_amu():
x = 1 * (u.g / u.mol)
y = 1 * u.u
assert_allclose(x.to_value(u.u, u.molar_mass_amu()), y.value)
assert_allclose(y.to_value(u.g / u.mol, u.molar_mass_amu()), x.value)
with pytest.raises(u.UnitsError):
x.to(u.u)
def test_compose_equivalencies():
x = u.Unit("arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.pc
x = u.Unit("2 arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.Unit(0.5 * u.pc)
x = u.degree.compose(equivalencies=u.dimensionless_angles())
assert u.Unit(u.degree.to(u.radian)) in x
x = (u.nm).compose(
units=(u.m, u.s), equivalencies=u.doppler_optical(0.55 * u.micron)
)
for y in x:
if y.bases == [u.m, u.s]:
assert y.powers == [1, -1]
assert_allclose(
y.scale,
u.nm.to(u.m / u.s, equivalencies=u.doppler_optical(0.55 * u.micron)),
)
break
else:
assert False, "Didn't find speed in compose results"
def test_pixel_scale():
pix = 75 * u.pix
asec = 30 * u.arcsec
pixscale = 0.4 * u.arcsec / u.pix
pixscale2 = 2.5 * u.pix / u.arcsec
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_pixel_scale_invalid_scale_unit():
pixscale = 0.4 * u.arcsec
pixscale2 = 0.4 * u.arcsec / u.pix**2
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale)
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale2)
def test_pixel_scale_acceptable_scale_unit():
pix = 75 * u.pix
v = 3000 * (u.cm / u.s)
pixscale = 0.4 * (u.m / u.s / u.pix)
pixscale2 = 2.5 * (u.pix / (u.m / u.s))
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_plate_scale():
mm = 1.5 * u.mm
asec = 30 * u.arcsec
platescale = 20 * u.arcsec / u.mm
platescale2 = 0.05 * u.mm / u.arcsec
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale)), mm)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale2)), mm)
def test_equivelency():
ps = u.pixel_scale(10 * u.arcsec / u.pix)
assert isinstance(ps, Equivalency)
assert isinstance(ps.name, list)
assert len(ps.name) == 1
assert ps.name[0] == "pixel_scale"
assert isinstance(ps.kwargs, list)
assert len(ps.kwargs) == 1
assert ps.kwargs[0] == {"pixscale": 10 * u.arcsec / u.pix}
def test_add_equivelencies():
e1 = u.pixel_scale(10 * u.arcsec / u.pixel) + u.temperature_energy()
assert isinstance(e1, Equivalency)
assert e1.name == ["pixel_scale", "temperature_energy"]
assert isinstance(e1.kwargs, list)
assert e1.kwargs == [{"pixscale": 10 * u.arcsec / u.pix}, dict()]
e2 = u.pixel_scale(10 * u.arcsec / u.pixel) + [1, 2, 3]
assert isinstance(e2, list)
def test_pprint():
pprint_class = u.UnitBase.EquivalentUnitsList
equiv_units_to_Hz = u.Hz.find_equivalent_units()
assert pprint_class.__repr__(equiv_units_to_Hz).splitlines() == [
" Primary name | Unit definition | Aliases ",
"[",
" Bq | 1 / s | becquerel ,",
" Ci | 3.7e+10 / s | curie ,",
" Hz | 1 / s | Hertz, hertz ,",
"]",
]
assert (
pprint_class._repr_html_(equiv_units_to_Hz) == '<table style="width:50%">'
"<tr><th>Primary name</th><th>Unit definition</th>"
"<th>Aliases</th></tr>"
"<tr><td>Bq</td><td>1 / s</td><td>becquerel</td></tr>"
"<tr><td>Ci</td><td>3.7e+10 / s</td><td>curie</td></tr>"
"<tr><td>Hz</td><td>1 / s</td><td>Hertz, hertz</td></tr></table>"
)
|
e13f86f5d1ade8c40af890a70c2c1613a186202b9e8f9e70c17d2369ec26b768 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units.format package
"""
import warnings
from contextlib import nullcontext
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.constants import si
from astropy.units import PrefixUnit, Unit, UnitBase, UnitsWarning, dex
from astropy.units import format as u_format
from astropy.units.utils import is_effectively_unity
@pytest.mark.parametrize(
"strings, unit",
[
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m**2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m**-3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m**1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m**0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2)),
],
)
def test_unit_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.Generic.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string", ["sin( /pixel /s)", "mag(mag)", "dB(dB(mW))", "dex()"]
)
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.Generic.parse(string)
@pytest.mark.parametrize(
"strings, unit",
[
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm**2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm**2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["km/s/Mpc"], u.km / u.s / u.Mpc),
(["km/(s.Mpc)"], u.km / u.s / u.Mpc),
(["10+3J/m/s/kpc2"], u.Unit(1e3 * u.W / (u.m * u.kpc**2))),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11/m"], u.Unit(1.5e11 / u.m)),
(["/s"], u.s**-1),
(["m2"], u.m**2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.0e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.0e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2**30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h),
(["[cm/s2]"], dex(u.cm / u.s**2)),
(["[K]"], dex(u.K)),
(["[-]"], dex(u.dimensionless_unscaled)),
],
)
def test_cds_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.CDS.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string",
[
"0.1 nm",
"solMass(3/2)",
"km / s",
"km s-1",
"km/s.Mpc-1",
"/s.Mpc",
"pix0.1nm",
"pix/(0.1nm)",
"km*s",
"km**2",
"5x8+3m",
"0.1---",
"---m",
"m---",
"--",
"0.1-",
"-m",
"m-",
"mag(s-1)",
"dB(mW)",
"dex(cm s-2)",
"[--]",
],
)
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.CDS.parse(string)
def test_cds_dimensionless():
assert u.Unit("---", format="cds") == u.dimensionless_unscaled
assert u.dimensionless_unscaled.to_string(format="cds") == "---"
def test_cds_log10_dimensionless():
assert u.Unit("[-]", format="cds") == u.dex(u.dimensionless_unscaled)
assert u.dex(u.dimensionless_unscaled).to_string(format="cds") == "[-]"
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize(
"strings, unit",
[
(
["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s,
),
(
["/pixel /s", "/(pixel * s)"],
(u.pixel * u.s) ** -1,
),
(
[
"count /m**2 /s /eV",
"count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)",
],
u.count * u.m**-2 * u.s**-1 * u.eV**-1,
),
(
["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel),
),
(
["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom),
),
(
[
"10**(46) erg /s",
"10**46 erg /s",
"10**(39) J /s",
"10**(39) W",
"10**(15) YW",
"YJ /fs",
],
10**46 * u.erg / u.s,
),
(
[
"10**(-7) J /cm**2 /MeV",
"10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)",
"nJ /m**2 /eV",
],
10**-7 * u.J * u.cm**-2 * u.MeV**-1,
),
(
[
"sqrt(erg /pixel /s /GHz)",
"(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)",
],
(u.erg * u.pixel**-1 * u.s**-1 * u.GHz**-1) ** 0.5,
),
(
[
"(count /s) (/pixel /s)",
"(count /s) * (/pixel /s)",
"count /pixel /s**2",
],
(u.count / u.s) * (1.0 / (u.pixel * u.s)),
),
],
)
def test_ogip_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.OGIP.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string",
[
"log(photon /m**2 /s /Hz)",
"sin( /pixel /s)",
"log(photon /cm**2 /s /Hz) /(sin( /pixel /s))",
"log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)",
"dB(mW)",
"dex(cm/s**2)",
],
)
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.OGIP.parse(string)
class RoundtripBase:
deprecated_units = set()
def check_roundtrip(self, unit, output_format=None):
if output_format is None:
output_format = self.format_
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Same warning shows up multiple times
s = unit.to_string(output_format)
if s in self.deprecated_units:
with pytest.warns(UnitsWarning, match="deprecated") as w:
a = Unit(s, format=self.format_)
assert len(w) == 1
else:
a = Unit(s, format=self.format_) # No warning
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
def check_roundtrip_decompose(self, unit):
ud = unit.decompose()
s = ud.to_string(self.format_)
assert " " not in s
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, ud.scale, rtol=1e-5)
class TestRoundtripGeneric(RoundtripBase):
format_ = "generic"
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u.__dict__.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
self.check_roundtrip(unit, output_format="unicode")
self.check_roundtrip_decompose(unit)
class TestRoundtripVOUnit(RoundtripBase):
format_ = "vounit"
deprecated_units = u_format.VOUnit._deprecated_units
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.VOUnit._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit not in (u.mag, u.dB):
self.check_roundtrip_decompose(unit)
class TestRoundtripFITS(RoundtripBase):
format_ = "fits"
deprecated_units = u_format.Fits._deprecated_units
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.Fits._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
class TestRoundtripCDS(RoundtripBase):
format_ = "cds"
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.CDS._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit == u.mag:
# Skip mag: decomposes into dex, which is unknown to CDS.
return
self.check_roundtrip_decompose(unit)
@pytest.mark.parametrize(
"unit", [u.dex(unit) for unit in (u.cm / u.s**2, u.K, u.Lsun)]
)
def test_roundtrip_dex(self, unit):
string = unit.to_string(format="cds")
recovered = u.Unit(string, format="cds")
assert recovered == unit
class TestRoundtripOGIP(RoundtripBase):
format_ = "ogip"
deprecated_units = u_format.OGIP._deprecated_units | {"d"}
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.OGIP._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
if str(unit) in ("d", "0.001 Crab"):
# Special-case day, which gets auto-converted to hours, and mCrab,
# which the default check does not recognize as a deprecated unit.
with pytest.warns(UnitsWarning):
s = unit.to_string(self.format_)
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
else:
self.check_roundtrip(unit)
if str(unit) in ("mag", "byte", "Crab"):
# Skip mag and byte, which decompose into dex and bit, resp.,
# both of which are unknown to OGIP, as well as Crab, which does
# not decompose, and thus gives a deprecated unit warning.
return
power_of_ten = np.log10(unit.decompose().scale)
if abs(power_of_ten - round(power_of_ten)) > 1e-3:
ctx = pytest.warns(UnitsWarning, match="power of 10")
elif str(unit) == "0.001 Crab":
ctx = pytest.warns(UnitsWarning, match="deprecated")
else:
ctx = nullcontext()
with ctx:
self.check_roundtrip_decompose(unit)
def test_fits_units_available():
u_format.Fits._units
def test_vo_units_available():
u_format.VOUnit._units
def test_cds_units_available():
u_format.CDS._units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
from astropy.units import cds
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string("latex") == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_new_style_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert f"{fluxunit:latex}" == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_latex_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex = r"$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$"
assert fluxunit.to_string("latex") == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex_inline = r"$\mathrm{1 \times 10^{-24}\,erg\,Hz^{-1}\,s^{-1}\,cm^{-2}}$"
assert fluxunit.to_string("latex_inline") == latex_inline
@pytest.mark.parametrize(
"format_spec, string, decomposed",
[
("generic", "erg / (Angstrom s cm2)", "1e+07 kg / (m s3)"),
("s", "erg / (Angstrom s cm2)", "1e+07 kg / (m s3)"),
("console", "erg Angstrom^-1 s^-1 cm^-2", "10000000 kg m^-1 s^-3"),
(
"latex",
r"$\mathrm{\frac{erg}{\mathring{A}\,s\,cm^{2}}}$",
r"$\mathrm{10000000\,\frac{kg}{m\,s^{3}}}$",
),
(
"latex_inline",
r"$\mathrm{erg\,\mathring{A}^{-1}\,s^{-1}\,cm^{-2}}$",
r"$\mathrm{10000000\,kg\,m^{-1}\,s^{-3}}$",
),
("unicode", "erg Å⁻¹ s⁻¹ cm⁻²", "10000000 kg m⁻¹ s⁻³"),
(">25s", " erg / (Angstrom s cm2)", " 1e+07 kg / (m s3)"),
("cds", "erg.Angstrom-1.s-1.cm-2", "10000000kg.m-1.s-3"),
("ogip", "10 erg / (nm s cm**2)", "1e+07 kg / (m s**3)"),
("fits", "erg Angstrom-1 s-1 cm-2", "10**7 kg m-1 s-3"),
("vounit", "erg.Angstrom**-1.s**-1.cm**-2", "10000000kg.m**-1.s**-3"),
# TODO: make fits and vounit less awful!
],
)
def test_format_styles(format_spec, string, decomposed):
fluxunit = u.erg / (u.cm**2 * u.s * u.Angstrom)
if format_spec == "vounit":
# erg and Angstrom are deprecated in vounit.
with pytest.warns(UnitsWarning, match="deprecated"):
formatted = format(fluxunit, format_spec)
else:
formatted = format(fluxunit, format_spec)
assert formatted == string
# Decomposed mostly to test that scale factors are dealt with properly
# in the various formats.
assert format(fluxunit.decompose(), format_spec) == decomposed
@pytest.mark.parametrize(
"format_spec, fraction, string, decomposed",
[
("generic", False, "erg s-1 cm-2", "0.001 kg s-3"),
(
"console",
"multiline",
" erg \n------\ns cm^2",
" kg \n0.001 ---\n s^3",
),
("console", "inline", "erg / (s cm^2)", "0.001 kg / s^3"),
("unicode", "multiline", " erg \n─────\ns cm²", " kg\n0.001 ──\n s³"),
("unicode", "inline", "erg / (s cm²)", "0.001 kg / s³"),
(
"latex",
False,
r"$\mathrm{erg\,s^{-1}\,cm^{-2}}$",
r"$\mathrm{0.001\,kg\,s^{-3}}$",
),
(
"latex",
"inline",
r"$\mathrm{erg / (s\,cm^{2})}$",
r"$\mathrm{0.001\,kg / s^{3}}$",
),
# TODO: make generic with fraction=False less awful!
],
)
def test_format_styles_non_default_fraction(format_spec, fraction, string, decomposed):
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string(format_spec, fraction=fraction) == string
assert fluxunit.decompose().to_string(format_spec, fraction=fraction) == decomposed
@pytest.mark.parametrize("format_spec", ["generic", "cds", "fits", "ogip", "vounit"])
def test_no_multiline_fraction(format_spec):
fluxunit = u.W / u.m**2
with pytest.raises(ValueError, match="only supports.*not fraction='multiline'"):
fluxunit.to_string(format_spec, fraction="multiline")
@pytest.mark.parametrize(
"format_spec",
["generic", "cds", "fits", "ogip", "vounit", "latex", "console", "unicode"],
)
def test_unknown_fraction_style(format_spec):
fluxunit = u.W / u.m**2
with pytest.raises(ValueError, match="only supports.*parrot"):
fluxunit.to_string(format_spec, fraction="parrot")
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string("fits") == "erg Hz-1"
myunit2 = myunit * u.bit**3
assert myunit2.to_string("fits") == "bit3 erg Hz-1"
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string("fits")
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string("console")
@pytest.mark.parametrize(
"format,string",
[
("generic", "10"),
("console", "10"),
("unicode", "10"),
("cds", "10"),
("latex", r"$\mathrm{10}$"),
],
)
def test_scale_only(format, string):
unit = u.Unit(10)
assert unit.to_string(format) == string
def test_flexible_float():
assert u.min._represents.to_string("latex") == r"$\mathrm{60\,s}$"
def test_fits_to_string_function_error():
"""Test function raises TypeError on bad input.
This instead of returning None, see gh-11825.
"""
with pytest.raises(TypeError, match="unit argument must be"):
u_format.Fits.to_string(None)
def test_fraction_repr():
area = u.cm**2.0
assert "." not in area.to_string("latex")
fractional = u.cm**2.5
assert "5/2" in fractional.to_string("latex")
assert fractional.to_string("unicode") == "cm⁵⸍²"
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3.0 * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the round-tripping tested above."""
assert u.Unit("%") == u.percent == u.Unit(0.01)
assert u.Unit("%", format="cds") == u.Unit(0.01)
assert u.Unit(0.01).to_string("cds") == "%"
with pytest.raises(ValueError):
u.Unit("%", format="fits")
with pytest.raises(ValueError):
u.Unit("%", format="vounit")
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit("0.1") == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit("1.e-4") == u.Unit(1.0e-4)
assert u.Unit("10-4", format="cds") == u.Unit(1.0e-4)
assert u.Unit("10+8").to_string("cds") == "10+8"
with pytest.raises(ValueError):
u.Unit(0.15).to_string("fits")
assert u.Unit(0.1).to_string("fits") == "10**-1"
with pytest.raises(ValueError):
u.Unit(0.1).to_string("vounit")
def test_deprecated_did_you_mean_units():
with pytest.raises(ValueError) as exc_info:
u.Unit("ANGSTROM", format="fits")
assert "Did you mean Angstrom or angstrom?" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
u.Unit("crab", format="ogip")
assert "Crab (deprecated)" in str(exc_info.value)
assert "mCrab (deprecated)" in str(exc_info.value)
with pytest.warns(
UnitsWarning,
match=r".* Did you mean 0\.1nm, Angstrom "
r"\(deprecated\) or angstrom \(deprecated\)\?",
) as w:
u.Unit("ANGSTROM", format="vounit")
assert len(w) == 1
assert str(w[0].message).count("0.1nm") == 1
with pytest.warns(UnitsWarning, match=r".* 0\.1nm\.") as w:
u.Unit("angstrom", format="vounit")
assert len(w) == 1
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.Fits().parse(string)
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError), warnings.catch_warnings():
# ct, dex also raise warnings - irrelevant here.
warnings.simplefilter("ignore")
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
assert u.Unit("KiB", format="vounit") == u.Unit("1024 B")
assert u.Unit("Kibyte", format="vounit") == u.Unit("1024 B")
assert u.Unit("Kibit", format="vounit") == u.Unit("128 B")
with pytest.warns(UnitsWarning) as w:
u.Unit("kibibyte", format="vounit")
assert len(w) == 1
def test_vounit_unknown():
assert u.Unit("unknown", format="vounit") is None
assert u.Unit("UNKNOWN", format="vounit") is None
assert u.Unit("", format="vounit") is u.dimensionless_unscaled
def test_vounit_details():
assert u.Unit("Pa", format="vounit") is u.Pascal
assert u.Unit("ka", format="vounit") == u.Unit("1000 yr")
assert u.Unit("pix", format="vounit") == u.Unit("pixel", format="vounit")
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string("vounit") == "10m"
assert u.Unit("dam dag").to_string("vounit") == "100g.m"
# Parse round-trip
with pytest.warns(UnitsWarning, match="deprecated"):
flam = u.erg / u.cm / u.cm / u.s / u.AA
x = u.format.VOUnit.to_string(flam)
assert x == "erg.Angstrom**-1.s**-1.cm**-2"
new_flam = u.format.VOUnit.parse(x)
assert new_flam == flam
@pytest.mark.parametrize(
"unit, vounit, number, scale, voscale",
[
("nm", "nm", 0.1, "10^-1", "0.1"),
("fm", "fm", 100.0, "10+2", "100"),
("m^2", "m**2", 100.0, "100.0", "100"),
("cm", "cm", 2.54, "2.54", "2.54"),
("kg", "kg", 1.898124597e27, "1.898124597E27", "1.8981246e+27"),
("m/s", "m.s**-1", 299792458.0, "299792458", "2.9979246e+08"),
("cm2", "cm**2", 1.0e-20, "10^(-20)", "1e-20"),
],
)
def test_vounit_scale_factor(unit, vounit, number, scale, voscale):
x = u.Unit(f"{scale} {unit}")
assert x == number * u.Unit(unit)
assert x.to_string(format="vounit") == voscale + vounit
def test_vounit_custom():
x = u.Unit("'foo' m", format="vounit")
x_vounit = x.to_string("vounit")
assert x_vounit == "'foo'.m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format="vounit")
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string("vounit")
assert x_vounit == "m.m'foo'"
x_string = x.to_string()
assert x_string == "m mfoo"
def test_vounit_implicit_custom():
# Yikes, this becomes "femto-urlong"... But at least there's a warning.
with pytest.warns(UnitsWarning) as w:
x = u.Unit("furlong/week", format="vounit")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == "urlong"
assert len(w) == 2
assert "furlong" in str(w[0].message)
assert "week" in str(w[1].message)
@pytest.mark.parametrize(
"scale, number, string",
[
("10+2", 100, "10**2"),
("10(+2)", 100, "10**2"),
("10**+2", 100, "10**2"),
("10**(+2)", 100, "10**2"),
("10^+2", 100, "10**2"),
("10^(+2)", 100, "10**2"),
("10**2", 100, "10**2"),
("10**(2)", 100, "10**2"),
("10^2", 100, "10**2"),
("10^(2)", 100, "10**2"),
("10-20", 10 ** (-20), "10**-20"),
("10(-20)", 10 ** (-20), "10**-20"),
("10**-20", 10 ** (-20), "10**-20"),
("10**(-20)", 10 ** (-20), "10**-20"),
("10^-20", 10 ** (-20), "10**-20"),
("10^(-20)", 10 ** (-20), "10**-20"),
],
)
def test_fits_scale_factor(scale, number, string):
x = u.Unit(scale + " erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " erg Angstrom-1 s-1 cm-2"
x = u.Unit(scale + "*erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " erg Angstrom-1 s-1 cm-2"
def test_fits_scale_factor_errors():
with pytest.raises(ValueError):
x = u.Unit("1000 erg/(s cm**2 Angstrom)", format="fits")
with pytest.raises(ValueError):
x = u.Unit("12 erg/(s cm**2 Angstrom)", format="fits")
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format="fits")
x = u.Unit(100.0 * u.erg)
assert x.to_string(format="fits") == "10**2 erg"
def test_double_superscript():
"""Regression test for #5870, #8699, #9218; avoid double superscripts."""
assert (u.deg).to_string("latex") == r"$\mathrm{{}^{\circ}}$"
assert (u.deg**2).to_string("latex") == r"$\mathrm{deg^{2}}$"
assert (u.arcmin).to_string("latex") == r"$\mathrm{{}^{\prime}}$"
assert (u.arcmin**2).to_string("latex") == r"$\mathrm{arcmin^{2}}$"
assert (u.arcsec).to_string("latex") == r"$\mathrm{{}^{\prime\prime}}$"
assert (u.arcsec**2).to_string("latex") == r"$\mathrm{arcsec^{2}}$"
assert (u.hourangle).to_string("latex") == r"$\mathrm{{}^{h}}$"
assert (u.hourangle**2).to_string("latex") == r"$\mathrm{hourangle^{2}}$"
assert (u.electron).to_string("latex") == r"$\mathrm{e^{-}}$"
assert (u.electron**2).to_string("latex") == r"$\mathrm{electron^{2}}$"
def test_no_prefix_superscript():
"""Regression test for gh-911 and #14419."""
assert u.mdeg.to_string("latex") == r"$\mathrm{mdeg}$"
assert u.narcmin.to_string("latex") == r"$\mathrm{narcmin}$"
assert u.parcsec.to_string("latex") == r"$\mathrm{parcsec}$"
assert u.mdeg.to_string("unicode") == "mdeg"
assert u.narcmin.to_string("unicode") == "narcmin"
assert u.parcsec.to_string("unicode") == "parcsec"
@pytest.mark.parametrize(
"power,expected",
(
(1.0, "m"),
(2.0, "m2"),
(-10, "1 / m10"),
(1.5, "m(3/2)"),
(2 / 3, "m(2/3)"),
(7 / 11, "m(7/11)"),
(-1 / 64, "1 / m(1/64)"),
(1 / 100, "m(1/100)"),
(2 / 101, "m(0.019801980198019802)"),
(Fraction(2, 101), "m(2/101)"),
),
)
def test_powers(power, expected):
"""Regression test for #9279 - powers should not be oversimplified."""
unit = u.m**power
s = unit.to_string()
assert s == expected
assert unit == s
@pytest.mark.parametrize(
"string,unit",
[
("\N{MICRO SIGN}g", u.microgram),
("\N{GREEK SMALL LETTER MU}g", u.microgram),
("g\N{MINUS SIGN}1", u.g ** (-1)),
("m\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", 1 / u.m),
("m s\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", u.m / u.s),
("m\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT PLUS SIGN}\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT THREE}", u.m**3),
("m\N{SUPERSCRIPT ONE}\N{SUPERSCRIPT ZERO}", u.m**10),
("\N{GREEK CAPITAL LETTER OMEGA}", u.ohm),
("\N{OHM SIGN}", u.ohm), # deprecated but for compatibility
("\N{MICRO SIGN}\N{GREEK CAPITAL LETTER OMEGA}", u.microOhm),
("\N{ANGSTROM SIGN}", u.Angstrom),
("\N{ANGSTROM SIGN} \N{OHM SIGN}", u.Angstrom * u.Ohm),
("\N{LATIN CAPITAL LETTER A WITH RING ABOVE}", u.Angstrom),
("\N{LATIN CAPITAL LETTER A}\N{COMBINING RING ABOVE}", u.Angstrom),
("m\N{ANGSTROM SIGN}", u.milliAngstrom),
("°C", u.deg_C),
("°", u.deg),
("M⊙", u.Msun), # \N{CIRCLED DOT OPERATOR}
("L☉", u.Lsun), # \N{SUN}
("M⊕", u.Mearth), # normal earth symbol = \N{CIRCLED PLUS}
("M♁", u.Mearth), # be generous with \N{EARTH}
("R♃", u.Rjup), # \N{JUPITER}
("′", u.arcmin), # \N{PRIME}
("R∞", u.Ry),
("Mₚ", u.M_p),
],
)
def test_unicode(string, unit):
assert u_format.Generic.parse(string) == unit
assert u.Unit(string) == unit
@pytest.mark.parametrize(
"string",
[
"g\N{MICRO SIGN}",
"g\N{MINUS SIGN}",
"m\N{SUPERSCRIPT MINUS}1",
"m+\N{SUPERSCRIPT ONE}",
"m\N{MINUS SIGN}\N{SUPERSCRIPT ONE}",
"k\N{ANGSTROM SIGN}",
],
)
def test_unicode_failures(string):
with pytest.raises(ValueError):
u.Unit(string)
@pytest.mark.parametrize("format_", ("unicode", "latex", "latex_inline"))
def test_parse_error_message_for_output_only_format(format_):
with pytest.raises(NotImplementedError, match="not parse"):
u.Unit("m", format=format_)
def test_unknown_parser():
with pytest.raises(ValueError, match=r"Unknown.*unicode'\] for output only"):
u.Unit("m", format="foo")
def test_celsius_fits():
assert u.Unit("Celsius", format="fits") == u.deg_C
assert u.Unit("deg C", format="fits") == u.deg_C
# check that compounds do what we expect: what do we expect?
assert u.Unit("deg C kg-1", format="fits") == u.C * u.deg / u.kg
assert u.Unit("Celsius kg-1", format="fits") == u.deg_C / u.kg
assert u.deg_C.to_string("fits") == "Celsius"
@pytest.mark.parametrize(
"format_spec, string",
[
("generic", "dB(1 / m)"),
("latex", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{\frac{1}{m}} \right)}$"),
("latex_inline", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{m^{-1}} \right)}$"),
("console", "dB(m^-1)"),
("unicode", "dB(m⁻¹)"),
],
)
def test_function_format_styles(format_spec, string):
dbunit = u.decibel(u.m**-1)
assert dbunit.to_string(format_spec) == string
assert f"{dbunit:{format_spec}}" == string
@pytest.mark.parametrize(
"format_spec, fraction, string",
[
("console", "multiline", " 1\ndB(-)\n m"),
("console", "inline", "dB(1 / m)"),
("unicode", "multiline", " 1\ndB(─)\n m"),
("unicode", "inline", "dB(1 / m)"),
("latex", False, r"$\mathrm{dB}$$\mathrm{\left( \mathrm{m^{-1}} \right)}$"),
("latex", "inline", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{1 / m} \right)}$"),
],
)
def test_function_format_styles_non_default_fraction(format_spec, fraction, string):
dbunit = u.decibel(u.m**-1)
assert dbunit.to_string(format_spec, fraction=fraction) == string
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.